aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig59
-rw-r--r--drivers/dma/Makefile5
-rw-r--r--drivers/dma/acpi-dma.c17
-rw-r--r--drivers/dma/at_hdmac.c1
-rw-r--r--drivers/dma/cppi41.c20
-rw-r--r--drivers/dma/dmaengine.c11
-rw-r--r--drivers/dma/dmatest.c4
-rw-r--r--drivers/dma/dw/core.c77
-rw-r--r--drivers/dma/dw/pci.c32
-rw-r--r--drivers/dma/dw/platform.c18
-rw-r--r--drivers/dma/dw/regs.h4
-rw-r--r--drivers/dma/edma.c346
-rw-r--r--drivers/dma/fsl-edma.c985
-rw-r--r--drivers/dma/fsldma.c306
-rw-r--r--drivers/dma/imx-dma.c13
-rw-r--r--drivers/dma/imx-sdma.c25
-rw-r--r--drivers/dma/ioat/dma.c52
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/dma_v2.c11
-rw-r--r--drivers/dma/ioat/dma_v3.c3
-rw-r--r--drivers/dma/mmp_pdma.c103
-rw-r--r--drivers/dma/mmp_tdma.c50
-rw-r--r--drivers/dma/mpc512x_dma.c342
-rw-r--r--drivers/dma/mv_xor.c32
-rw-r--r--drivers/dma/omap-dma.c677
-rw-r--r--drivers/dma/pch_dma.c7
-rw-r--r--drivers/dma/ppc4xx/Makefile2
-rw-r--r--drivers/dma/ppc4xx/apm82181-adma.c2201
-rw-r--r--drivers/dma/ppc4xx/ppc460ex_4chan_dma.c1110
-rw-r--r--drivers/dma/ppc4xx/ppc460ex_4chan_dma.h531
-rw-r--r--drivers/dma/qcom_bam_dma.c1111
-rw-r--r--drivers/dma/s3c24xx-dma.c115
-rw-r--r--drivers/dma/sa11x0-dma.c4
-rw-r--r--drivers/dma/sh/Kconfig8
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rcar-audmapp.c320
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c1
-rw-r--r--drivers/dma/sh/shdma-base.c108
-rw-r--r--drivers/dma/sh/shdma-of.c3
-rw-r--r--drivers/dma/sh/shdmac.c28
-rw-r--r--drivers/dma/sh/sudmac.c11
-rw-r--r--drivers/dma/sirf-dma.c23
-rw-r--r--drivers/dma/ste_dma40.c186
-rw-r--r--drivers/dma/xilinx/Makefile1
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c1379
45 files changed, 9729 insertions, 615 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9bed1a2a67a..d761ad3ba09 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -195,9 +195,26 @@ config AMCC_PPC440SPE_ADMA
help
Enable support for the AMCC PPC440SPe RAID engines.
+config AMCC_PPC460EX_460GT_4CHAN_DMA
+ tristate "AMCC PPC460EX PPC460GT PLB DMA support"
+ depends on 460EX || 460GT || APM821xx
+ select DMA_ENGINE
+ default y
+
+config APM82181_ADMA
+ tristate "APM82181 Asynchonous DMA support"
+ depends on APM821xx
+ select ASYNC_CORE
+ select ASYNC_TX_DMA
+ select DMA_ENGINE
+ select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ default y
+ ---help---
+ Enable support for the APM82181 Asynchonous DMA engines.
+
config TIMB_DMA
tristate "Timberdale FPGA DMA support"
- depends on MFD_TIMBERDALE || HAS_IOMEM
+ depends on MFD_TIMBERDALE
select DMA_ENGINE
help
Enable support for the Timberdale FPGA DMA engine.
@@ -234,7 +251,7 @@ config PL330_DMA
config PCH_DMA
tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
- depends on PCI && X86
+ depends on PCI && (X86_32 || COMPILE_TEST)
select DMA_ENGINE
help
Enable support for Intel EG20T PCH DMA engine.
@@ -269,7 +286,7 @@ config MXS_DMA
select DMA_ENGINE
help
Support the MXS DMA engine. This engine including APBH-DMA
- and APBX-DMA is integrated into Freescale i.MX23/28 chips.
+ and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
config EP93XX_DMA
bool "Cirrus Logic EP93xx DMA support"
@@ -308,7 +325,7 @@ config DMA_OMAP
config DMA_BCM2835
tristate "BCM2835 DMA engine support"
- depends on (ARCH_BCM2835 || MACH_BCM2708)
+ depends on ARCH_BCM2835
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -346,9 +363,34 @@ config MOXART_DMA
tristate "MOXART DMA support"
depends on ARCH_MOXART
select DMA_ENGINE
+ select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the MOXA ART SoC DMA controller.
+
+config FSL_EDMA
+ tristate "Freescale eDMA engine support"
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the Freescale eDMA engine with programmable channel
+ multiplexing capability for DMA request sources(slot).
+ This module can be found on Freescale Vybrid and LS-1 SoCs.
+
+config XILINX_VDMA
+ tristate "Xilinx AXI VDMA Engine"
+ depends on (ARCH_ZYNQ || MICROBLAZE)
+ select DMA_ENGINE
+ help
+ Enable support for Xilinx AXI VDMA Soft IP.
+
+ This engine provides high-bandwidth direct memory access
+ between memory and AXI4-Stream video type target
+ peripherals including peripherals which support AXI4-
+ Stream Video Protocol. It has two stream interfaces/
+ channels, Memory Mapped to Stream (MM2S) and Stream to
+ Memory Mapped (S2MM) for the data transfers.
config DMA_ENGINE
bool
@@ -400,4 +442,13 @@ config DMATEST
config DMA_ENGINE_RAID
bool
+config QCOM_BAM_DMA
+ tristate "QCOM BAM DMA support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the QCOM BAM DMA controller. This controller
+ provides DMA capabilities for a variety of on-chip devices.
+
endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a029d0f4a1b..7acb4c437fa 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,7 +21,7 @@ obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE_BASE) += sh/
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
-obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
+obj-y += ppc4xx/
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
@@ -44,3 +44,6 @@ obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
+obj-y += xilinx/
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 1e506afa33f..de361a156b3 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -13,6 +13,7 @@
*/
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -265,7 +266,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
*/
void devm_acpi_dma_controller_free(struct device *dev)
{
- WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL));
+ WARN_ON(devres_release(dev, devm_acpi_dma_release, NULL, NULL));
}
EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
@@ -343,7 +344,7 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
* @index: index of FixedDMA descriptor for @dev
*
* Return:
- * Pointer to appropriate dma channel on success or NULL on error.
+ * Pointer to appropriate dma channel on success or an error pointer.
*/
struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
size_t index)
@@ -358,10 +359,10 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
/* Check if the device was enumerated by ACPI */
if (!dev || !ACPI_HANDLE(dev))
- return NULL;
+ return ERR_PTR(-ENODEV);
if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
- return NULL;
+ return ERR_PTR(-ENODEV);
memset(&pdata, 0, sizeof(pdata));
pdata.index = index;
@@ -376,7 +377,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
acpi_dev_free_resource_list(&resource_list);
if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0)
- return NULL;
+ return ERR_PTR(-ENODEV);
mutex_lock(&acpi_dma_lock);
@@ -399,7 +400,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
}
mutex_unlock(&acpi_dma_lock);
- return chan;
+ return chan ? chan : ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
@@ -413,7 +414,7 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
* the first FixedDMA descriptor is TX and second is RX.
*
* Return:
- * Pointer to appropriate dma channel on success or NULL on error.
+ * Pointer to appropriate dma channel on success or an error pointer.
*/
struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
const char *name)
@@ -425,7 +426,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
else if (!strcmp(name, "rx"))
index = 1;
else
- return NULL;
+ return ERR_PTR(-ENODEV);
return acpi_dma_request_slave_chan_by_index(dev, index);
}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index e2c04dc81e2..c13a3bb0f59 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1569,7 +1569,6 @@ static int at_dma_remove(struct platform_device *pdev)
/* Disable interrupts */
atc_disable_chan_irq(atdma, chan->chan_id);
- tasklet_disable(&atchan->tasklet);
tasklet_kill(&atchan->tasklet);
list_del(&chan->device_node);
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index c18aebf7d5a..8f8b0b60887 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -86,6 +86,9 @@
#define USBSS_IRQ_PD_COMP (1 << 2)
+/* Packet Descriptor */
+#define PD2_ZERO_LENGTH (1 << 19)
+
struct cppi41_channel {
struct dma_chan chan;
struct dma_async_tx_descriptor txd;
@@ -307,7 +310,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
__iormb();
while (val) {
- u32 desc;
+ u32 desc, len;
q_num = __fls(val);
val &= ~(1 << q_num);
@@ -319,9 +322,13 @@ static irqreturn_t cppi41_irq(int irq, void *data)
q_num, desc);
continue;
}
- c->residue = pd_trans_len(c->desc->pd6) -
- pd_trans_len(c->desc->pd0);
+ if (c->desc->pd2 & PD2_ZERO_LENGTH)
+ len = 0;
+ else
+ len = pd_trans_len(c->desc->pd0);
+
+ c->residue = pd_trans_len(c->desc->pd6) - len;
dma_cookie_complete(&c->txd);
c->txd.callback(c->txd.callback_param);
}
@@ -620,12 +627,15 @@ static int cppi41_stop_chan(struct dma_chan *chan)
u32 desc_phys;
int ret;
+ desc_phys = lower_32_bits(c->desc_phys);
+ desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+ if (!cdd->chan_busy[desc_num])
+ return 0;
+
ret = cppi41_tear_down_chan(c);
if (ret)
return ret;
- desc_phys = lower_32_bits(c->desc_phys);
- desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
WARN_ON(!cdd->chan_busy[desc_num]);
cdd->chan_busy[desc_num] = NULL;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ed610b49751..d5d30ed863c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -627,18 +627,13 @@ EXPORT_SYMBOL_GPL(__dma_request_channel);
struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
const char *name)
{
- struct dma_chan *chan;
-
/* If device-tree is present get slave info from here */
if (dev->of_node)
return of_dma_request_slave_channel(dev->of_node, name);
/* If device was enumerated by ACPI get slave info from here */
- if (ACPI_HANDLE(dev)) {
- chan = acpi_dma_request_slave_chan_by_name(dev, name);
- if (chan)
- return chan;
- }
+ if (ACPI_HANDLE(dev))
+ return acpi_dma_request_slave_chan_by_name(dev, name);
return ERR_PTR(-ENODEV);
}
@@ -1014,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
dma_unmap_page(dev, unmap->addr[i], unmap->len,
DMA_BIDIRECTIONAL);
}
+ cnt = unmap->map_cnt;
mempool_free(unmap, __get_unmap_pool(cnt)->pool);
}
@@ -1079,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
memset(unmap, 0, sizeof(*unmap));
kref_init(&unmap->kref);
unmap->dev = dev;
+ unmap->map_cnt = nr;
return unmap;
}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 05b6dea770a..e27cec25c59 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -340,7 +340,7 @@ static unsigned int min_odd(unsigned int x, unsigned int y)
static void result(const char *err, unsigned int n, unsigned int src_off,
unsigned int dst_off, unsigned int len, unsigned long data)
{
- pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
+ pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
current->comm, n, err, src_off, dst_off, len, data);
}
@@ -348,7 +348,7 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
unsigned int dst_off, unsigned int len,
unsigned long data)
{
- pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
+ pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
current->comm, n, err, src_off, dst_off, len, data);
}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 13ac3f240e7..a27ded53ab4 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -33,8 +33,8 @@
* of which use ARM any more). See the "Databook" from Synopsys for
* information beyond what licensees probably provide.
*
- * The driver has currently been tested only with the Atmel AT32AP7000,
- * which does not support descriptor writeback.
+ * The driver has been tested with the Atmel AT32AP7000, which does not
+ * support descriptor writeback.
*/
static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
@@ -1479,7 +1479,6 @@ static void dw_dma_off(struct dw_dma *dw)
int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
{
struct dw_dma *dw;
- size_t size;
bool autocfg;
unsigned int dw_params;
unsigned int nr_channels;
@@ -1487,6 +1486,20 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
int err;
int i;
+ dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ dw->regs = chip->regs;
+ chip->dw = dw;
+
+ dw->clk = devm_clk_get(chip->dev, "hclk");
+ if (IS_ERR(dw->clk))
+ return PTR_ERR(dw->clk);
+ err = clk_prepare_enable(dw->clk);
+ if (err)
+ return err;
+
dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
@@ -1494,33 +1507,31 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
if (!pdata && autocfg) {
pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ if (!pdata) {
+ err = -ENOMEM;
+ goto err_pdata;
+ }
/* Fill platform data with the default values */
pdata->is_private = true;
pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
- } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
- return -EINVAL;
+ } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
+ err = -EINVAL;
+ goto err_pdata;
+ }
if (autocfg)
nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
else
nr_channels = pdata->nr_channels;
- size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
- dw = devm_kzalloc(chip->dev, size, GFP_KERNEL);
- if (!dw)
- return -ENOMEM;
-
- dw->clk = devm_clk_get(chip->dev, "hclk");
- if (IS_ERR(dw->clk))
- return PTR_ERR(dw->clk);
- clk_prepare_enable(dw->clk);
-
- dw->regs = chip->regs;
- chip->dw = dw;
+ dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan),
+ GFP_KERNEL);
+ if (!dw->chan) {
+ err = -ENOMEM;
+ goto err_pdata;
+ }
/* Get hardware configuration parameters */
if (autocfg) {
@@ -1545,21 +1556,22 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
- err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
- IRQF_SHARED, "dw_dmac", dw);
- if (err)
- return err;
-
/* Create a pool of consistent memory blocks for hardware descriptors */
dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
sizeof(struct dw_desc), 4, 0);
if (!dw->desc_pool) {
dev_err(chip->dev, "No memory for descriptors dma pool\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_pdata;
}
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
+ err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
+ "dw_dmac", dw);
+ if (err)
+ goto err_pdata;
+
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
@@ -1647,12 +1659,20 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
dma_writel(dw, CFG, DW_CFG_DMA_EN);
+ err = dma_async_device_register(&dw->dma);
+ if (err)
+ goto err_dma_register;
+
dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
nr_channels);
- dma_async_device_register(&dw->dma);
-
return 0;
+
+err_dma_register:
+ free_irq(chip->irq, dw);
+err_pdata:
+ clk_disable_unprepare(dw->clk);
+ return err;
}
EXPORT_SYMBOL_GPL(dw_dma_probe);
@@ -1664,6 +1684,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
dw_dma_off(dw);
dma_async_device_unregister(&dw->dma);
+ free_irq(chip->irq, dw);
tasklet_kill(&dw->tasklet);
list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
@@ -1672,6 +1693,8 @@ int dw_dma_remove(struct dw_dma_chip *chip)
channel_clear_bit(dw, CH_EN, dwc->mask);
}
+ clk_disable_unprepare(dw->clk);
+
return 0;
}
EXPORT_SYMBOL_GPL(dw_dma_remove);
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index e89fc24b829..39e30c3c7a9 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -75,7 +75,31 @@ static void dw_pci_remove(struct pci_dev *pdev)
dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
}
-static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
+#ifdef CONFIG_PM_SLEEP
+
+static int dw_pci_suspend_late(struct device *dev)
+{
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct dw_dma_chip *chip = pci_get_drvdata(pci);
+
+ return dw_dma_suspend(chip);
+};
+
+static int dw_pci_resume_early(struct device *dev)
+{
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct dw_dma_chip *chip = pci_get_drvdata(pci);
+
+ return dw_dma_resume(chip);
+};
+
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops dw_pci_dev_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_pci_suspend_late, dw_pci_resume_early)
+};
+
+static const struct pci_device_id dw_pci_id_table[] = {
/* Medfield */
{ PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata },
{ PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata },
@@ -83,6 +107,9 @@ static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
/* BayTrail */
{ PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata },
{ PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata },
+
+ /* Haswell */
+ { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata },
{ }
};
MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
@@ -92,6 +119,9 @@ static struct pci_driver dw_pci_driver = {
.id_table = dw_pci_id_table,
.probe = dw_pci_probe,
.remove = dw_pci_remove,
+ .driver = {
+ .pm = &dw_pci_dev_pm_ops,
+ },
};
module_pci_driver(dw_pci_driver);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 453822cc4f9..c5b339af6be 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -256,7 +256,7 @@ MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
#ifdef CONFIG_PM_SLEEP
-static int dw_suspend_noirq(struct device *dev)
+static int dw_suspend_late(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
@@ -264,7 +264,7 @@ static int dw_suspend_noirq(struct device *dev)
return dw_dma_suspend(chip);
}
-static int dw_resume_noirq(struct device *dev)
+static int dw_resume_early(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
@@ -272,20 +272,10 @@ static int dw_resume_noirq(struct device *dev)
return dw_dma_resume(chip);
}
-#else /* !CONFIG_PM_SLEEP */
-
-#define dw_suspend_noirq NULL
-#define dw_resume_noirq NULL
-
-#endif /* !CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops dw_dev_pm_ops = {
- .suspend_noirq = dw_suspend_noirq,
- .resume_noirq = dw_resume_noirq,
- .freeze_noirq = dw_suspend_noirq,
- .thaw_noirq = dw_resume_noirq,
- .restore_noirq = dw_resume_noirq,
- .poweroff_noirq = dw_suspend_noirq,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
};
static struct platform_driver dw_driver = {
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index deb4274f80f..bb98d3e91e8 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -252,13 +252,13 @@ struct dw_dma {
struct tasklet_struct tasklet;
struct clk *clk;
+ /* channels */
+ struct dw_dma_chan *chan;
u8 all_chan_mask;
/* hardware configuration */
unsigned char nr_masters;
unsigned char data_width[4];
-
- struct dw_dma_chan chan[0];
};
static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index cd8da451d19..d08c4dedef3 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -57,14 +57,48 @@
#define EDMA_MAX_SLOTS MAX_NR_SG
#define EDMA_DESCRIPTORS 16
+struct edma_pset {
+ u32 len;
+ dma_addr_t addr;
+ struct edmacc_param param;
+};
+
struct edma_desc {
struct virt_dma_desc vdesc;
struct list_head node;
+ enum dma_transfer_direction direction;
int cyclic;
int absync;
int pset_nr;
+ struct edma_chan *echan;
int processed;
- struct edmacc_param pset[0];
+
+ /*
+ * The following 4 elements are used for residue accounting.
+ *
+ * - processed_stat: the number of SG elements we have traversed
+ * so far to cover accounting. This is updated directly to processed
+ * during edma_callback and is always <= processed, because processed
+ * refers to the number of pending transfer (programmed to EDMA
+ * controller), where as processed_stat tracks number of transfers
+ * accounted for so far.
+ *
+ * - residue: The amount of bytes we have left to transfer for this desc
+ *
+ * - residue_stat: The residue in bytes of data we have covered
+ * so far for accounting. This is updated directly to residue
+ * during callbacks to keep it current.
+ *
+ * - sg_len: Tracks the length of the current intermediate transfer,
+ * this is required to update the residue during intermediate transfer
+ * completion callback.
+ */
+ int processed_stat;
+ u32 sg_len;
+ u32 residue;
+ u32 residue_stat;
+
+ struct edma_pset pset[0];
};
struct edma_cc;
@@ -136,12 +170,14 @@ static void edma_execute(struct edma_chan *echan)
/* Find out how many left */
left = edesc->pset_nr - edesc->processed;
nslots = min(MAX_NR_SG, left);
+ edesc->sg_len = 0;
/* Write descriptor PaRAM set(s) */
for (i = 0; i < nslots; i++) {
j = i + edesc->processed;
- edma_write_slot(echan->slot[i], &edesc->pset[j]);
- dev_dbg(echan->vchan.chan.device->dev,
+ edma_write_slot(echan->slot[i], &edesc->pset[j].param);
+ edesc->sg_len += edesc->pset[j].len;
+ dev_vdbg(echan->vchan.chan.device->dev,
"\n pset[%d]:\n"
" chnum\t%d\n"
" slot\t%d\n"
@@ -154,14 +190,14 @@ static void edma_execute(struct edma_chan *echan)
" cidx\t%08x\n"
" lkrld\t%08x\n",
j, echan->ch_num, echan->slot[i],
- edesc->pset[j].opt,
- edesc->pset[j].src,
- edesc->pset[j].dst,
- edesc->pset[j].a_b_cnt,
- edesc->pset[j].ccnt,
- edesc->pset[j].src_dst_bidx,
- edesc->pset[j].src_dst_cidx,
- edesc->pset[j].link_bcntrld);
+ edesc->pset[j].param.opt,
+ edesc->pset[j].param.src,
+ edesc->pset[j].param.dst,
+ edesc->pset[j].param.a_b_cnt,
+ edesc->pset[j].param.ccnt,
+ edesc->pset[j].param.src_dst_bidx,
+ edesc->pset[j].param.src_dst_cidx,
+ edesc->pset[j].param.link_bcntrld);
/* Link to the previous slot if not the last set */
if (i != (nslots - 1))
edma_link(echan->slot[i], echan->slot[i+1]);
@@ -182,11 +218,14 @@ static void edma_execute(struct edma_chan *echan)
echan->ecc->dummy_slot);
}
- edma_resume(echan->ch_num);
-
if (edesc->processed <= MAX_NR_SG) {
- dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
+ dev_dbg(dev, "first transfer starting on channel %d\n",
+ echan->ch_num);
edma_start(echan->ch_num);
+ } else {
+ dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
+ echan->ch_num, edesc->processed);
+ edma_resume(echan->ch_num);
}
/*
@@ -195,7 +234,7 @@ static void edma_execute(struct edma_chan *echan)
* MAX_NR_SG
*/
if (echan->missed) {
- dev_dbg(dev, "missed event in execute detected\n");
+ dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
edma_clean_channel(echan->ch_num);
edma_stop(echan->ch_num);
edma_start(echan->ch_num);
@@ -240,6 +279,26 @@ static int edma_slave_config(struct edma_chan *echan,
return 0;
}
+static int edma_dma_pause(struct edma_chan *echan)
+{
+ /* Pause/Resume only allowed with cyclic mode */
+ if (!echan->edesc->cyclic)
+ return -EINVAL;
+
+ edma_pause(echan->ch_num);
+ return 0;
+}
+
+static int edma_dma_resume(struct edma_chan *echan)
+{
+ /* Pause/Resume only allowed with cyclic mode */
+ if (!echan->edesc->cyclic)
+ return -EINVAL;
+
+ edma_resume(echan->ch_num);
+ return 0;
+}
+
static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
@@ -255,6 +314,14 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
config = (struct dma_slave_config *)arg;
ret = edma_slave_config(echan, config);
break;
+ case DMA_PAUSE:
+ ret = edma_dma_pause(echan);
+ break;
+
+ case DMA_RESUME:
+ ret = edma_dma_resume(echan);
+ break;
+
default:
ret = -ENOSYS;
}
@@ -273,18 +340,23 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* @dma_length: Total length of the DMA transfer
* @direction: Direction of the transfer
*/
-static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
+static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
enum dma_slave_buswidth dev_width, unsigned int dma_length,
enum dma_transfer_direction direction)
{
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
+ struct edmacc_param *param = &epset->param;
int acnt, bcnt, ccnt, cidx;
int src_bidx, dst_bidx, src_cidx, dst_cidx;
int absync;
acnt = dev_width;
+
+ /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
+ if (!burst)
+ burst = 1;
/*
* If the maxburst is equal to the fifo width, use
* A-synced transfers. This allows for large contiguous
@@ -335,41 +407,50 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
cidx = acnt * bcnt;
}
+ epset->len = dma_length;
+
if (direction == DMA_MEM_TO_DEV) {
src_bidx = acnt;
src_cidx = cidx;
dst_bidx = 0;
dst_cidx = 0;
+ epset->addr = src_addr;
} else if (direction == DMA_DEV_TO_MEM) {
src_bidx = 0;
src_cidx = 0;
dst_bidx = acnt;
dst_cidx = cidx;
+ epset->addr = dst_addr;
+ } else if (direction == DMA_MEM_TO_MEM) {
+ src_bidx = acnt;
+ src_cidx = cidx;
+ dst_bidx = acnt;
+ dst_cidx = cidx;
} else {
dev_err(dev, "%s: direction not implemented yet\n", __func__);
return -EINVAL;
}
- pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+ param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
/* Configure A or AB synchronized transfers */
if (absync)
- pset->opt |= SYNCDIM;
+ param->opt |= SYNCDIM;
- pset->src = src_addr;
- pset->dst = dst_addr;
+ param->src = src_addr;
+ param->dst = dst_addr;
- pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
- pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
+ param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+ param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
- pset->a_b_cnt = bcnt << 16 | acnt;
- pset->ccnt = ccnt;
+ param->a_b_cnt = bcnt << 16 | acnt;
+ param->ccnt = ccnt;
/*
* Only time when (bcntrld) auto reload is required is for
* A-sync case, and in this case, a requirement of reload value
* of SZ_64K-1 only is assured. 'link' is initially set to NULL
* and then later will be populated by edma_execute.
*/
- pset->link_bcntrld = 0xffffffff;
+ param->link_bcntrld = 0xffffffff;
return absync;
}
@@ -399,23 +480,26 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
dev_width = echan->cfg.dst_addr_width;
burst = echan->cfg.dst_maxburst;
} else {
- dev_err(dev, "%s: bad direction?\n", __func__);
+ dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
return NULL;
}
if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
- dev_err(dev, "Undefined slave buswidth\n");
+ dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
return NULL;
}
edesc = kzalloc(sizeof(*edesc) + sg_len *
sizeof(edesc->pset[0]), GFP_ATOMIC);
if (!edesc) {
- dev_dbg(dev, "Failed to allocate a descriptor\n");
+ dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
return NULL;
}
edesc->pset_nr = sg_len;
+ edesc->residue = 0;
+ edesc->direction = direction;
+ edesc->echan = echan;
/* Allocate a PaRAM slot, if needed */
nslots = min_t(unsigned, MAX_NR_SG, sg_len);
@@ -427,7 +511,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
kfree(edesc);
- dev_err(dev, "Failed to allocate slot\n");
+ dev_err(dev, "%s: Failed to allocate slot\n",
+ __func__);
return NULL;
}
}
@@ -450,16 +535,56 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
}
edesc->absync = ret;
+ edesc->residue += sg_dma_len(sg);
/* If this is the last in a current SG set of transactions,
enable interrupts so that next set is processed */
if (!((i+1) % MAX_NR_SG))
- edesc->pset[i].opt |= TCINTEN;
+ edesc->pset[i].param.opt |= TCINTEN;
/* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
- edesc->pset[i].opt |= TCINTEN;
+ edesc->pset[i].param.opt |= TCINTEN;
}
+ edesc->residue_stat = edesc->residue;
+
+ return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
+struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long tx_flags)
+{
+ int ret;
+ struct edma_desc *edesc;
+ struct device *dev = chan->device->dev;
+ struct edma_chan *echan = to_edma_chan(chan);
+
+ if (unlikely(!echan || !len))
+ return NULL;
+
+ edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC);
+ if (!edesc) {
+ dev_dbg(dev, "Failed to allocate a descriptor\n");
+ return NULL;
+ }
+
+ edesc->pset_nr = 1;
+
+ ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
+ DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM);
+ if (ret < 0)
+ return NULL;
+
+ edesc->absync = ret;
+
+ /*
+ * Enable intermediate transfer chaining to re-trigger channel
+ * on completion of every TR, and enable transfer-completion
+ * interrupt on completion of the whole transfer.
+ */
+ edesc->pset[0].param.opt |= ITCCHEN;
+ edesc->pset[0].param.opt |= TCINTEN;
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
@@ -491,12 +616,12 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
dev_width = echan->cfg.dst_addr_width;
burst = echan->cfg.dst_maxburst;
} else {
- dev_err(dev, "%s: bad direction?\n", __func__);
+ dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
return NULL;
}
if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
- dev_err(dev, "Undefined slave buswidth\n");
+ dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
return NULL;
}
@@ -521,16 +646,18 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
edesc = kzalloc(sizeof(*edesc) + nslots *
sizeof(edesc->pset[0]), GFP_ATOMIC);
if (!edesc) {
- dev_dbg(dev, "Failed to allocate a descriptor\n");
+ dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
return NULL;
}
edesc->cyclic = 1;
edesc->pset_nr = nslots;
+ edesc->residue = edesc->residue_stat = buf_len;
+ edesc->direction = direction;
+ edesc->echan = echan;
- dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots);
- dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len);
- dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len);
+ dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
+ __func__, echan->ch_num, nslots, period_len, buf_len);
for (i = 0; i < nslots; i++) {
/* Allocate a PaRAM slot, if needed */
@@ -539,7 +666,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
edma_alloc_slot(EDMA_CTLR(echan->ch_num),
EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
- dev_err(dev, "Failed to allocate slot\n");
+ kfree(edesc);
+ dev_err(dev, "%s: Failed to allocate slot\n",
+ __func__);
return NULL;
}
}
@@ -553,16 +682,18 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
dst_addr, burst, dev_width, period_len,
direction);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(edesc);
return NULL;
+ }
if (direction == DMA_DEV_TO_MEM)
dst_addr += period_len;
else
src_addr += period_len;
- dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
- dev_dbg(dev,
+ dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
+ dev_vdbg(dev,
"\n pset[%d]:\n"
" chnum\t%d\n"
" slot\t%d\n"
@@ -575,14 +706,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
" cidx\t%08x\n"
" lkrld\t%08x\n",
i, echan->ch_num, echan->slot[i],
- edesc->pset[i].opt,
- edesc->pset[i].src,
- edesc->pset[i].dst,
- edesc->pset[i].a_b_cnt,
- edesc->pset[i].ccnt,
- edesc->pset[i].src_dst_bidx,
- edesc->pset[i].src_dst_cidx,
- edesc->pset[i].link_bcntrld);
+ edesc->pset[i].param.opt,
+ edesc->pset[i].param.src,
+ edesc->pset[i].param.dst,
+ edesc->pset[i].param.a_b_cnt,
+ edesc->pset[i].param.ccnt,
+ edesc->pset[i].param.src_dst_bidx,
+ edesc->pset[i].param.src_dst_cidx,
+ edesc->pset[i].param.link_bcntrld);
edesc->absync = ret;
@@ -590,7 +721,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
* Enable interrupts for every period because callback
* has to be called for every period.
*/
- edesc->pset[i].opt |= TCINTEN;
+ edesc->pset[i].param.opt |= TCINTEN;
}
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -601,7 +732,6 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
struct edma_chan *echan = data;
struct device *dev = echan->vchan.chan.device->dev;
struct edma_desc *edesc;
- unsigned long flags;
struct edmacc_param p;
edesc = echan->edesc;
@@ -612,27 +742,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
switch (ch_status) {
case EDMA_DMA_COMPLETE:
- spin_lock_irqsave(&echan->vchan.lock, flags);
+ spin_lock(&echan->vchan.lock);
if (edesc) {
if (edesc->cyclic) {
vchan_cyclic_callback(&edesc->vdesc);
} else if (edesc->processed == edesc->pset_nr) {
dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
+ edesc->residue = 0;
edma_stop(echan->ch_num);
vchan_cookie_complete(&edesc->vdesc);
edma_execute(echan);
} else {
dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
+
+ /* Update statistics for tx_status */
+ edesc->residue -= edesc->sg_len;
+ edesc->residue_stat = edesc->residue;
+ edesc->processed_stat = edesc->processed;
+
edma_execute(echan);
}
}
- spin_unlock_irqrestore(&echan->vchan.lock, flags);
+ spin_unlock(&echan->vchan.lock);
break;
case EDMA_DMA_CC_ERROR:
- spin_lock_irqsave(&echan->vchan.lock, flags);
+ spin_lock(&echan->vchan.lock);
edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
@@ -663,7 +800,7 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
edma_trigger_channel(echan->ch_num);
}
- spin_unlock_irqrestore(&echan->vchan.lock, flags);
+ spin_unlock(&echan->vchan.lock);
break;
default:
@@ -699,7 +836,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
echan->alloced = true;
echan->slot[0] = echan->ch_num;
- dev_dbg(dev, "allocated channel for %u:%u\n",
+ dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
return 0;
@@ -751,23 +888,52 @@ static void edma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&echan->vchan.lock, flags);
}
-static size_t edma_desc_size(struct edma_desc *edesc)
+static u32 edma_residue(struct edma_desc *edesc)
{
+ bool dst = edesc->direction == DMA_DEV_TO_MEM;
+ struct edma_pset *pset = edesc->pset;
+ dma_addr_t done, pos;
int i;
- size_t size;
-
- if (edesc->absync)
- for (size = i = 0; i < edesc->pset_nr; i++)
- size += (edesc->pset[i].a_b_cnt & 0xffff) *
- (edesc->pset[i].a_b_cnt >> 16) *
- edesc->pset[i].ccnt;
- else
- size = (edesc->pset[0].a_b_cnt & 0xffff) *
- (edesc->pset[0].a_b_cnt >> 16) +
- (edesc->pset[0].a_b_cnt & 0xffff) *
- (SZ_64K - 1) * edesc->pset[0].ccnt;
-
- return size;
+
+ /*
+ * We always read the dst/src position from the first RamPar
+ * pset. That's the one which is active now.
+ */
+ pos = edma_get_position(edesc->echan->slot[0], dst);
+
+ /*
+ * Cyclic is simple. Just subtract pset[0].addr from pos.
+ *
+ * We never update edesc->residue in the cyclic case, so we
+ * can tell the remaining room to the end of the circular
+ * buffer.
+ */
+ if (edesc->cyclic) {
+ done = pos - pset->addr;
+ edesc->residue_stat = edesc->residue - done;
+ return edesc->residue_stat;
+ }
+
+ /*
+ * For SG operation we catch up with the last processed
+ * status.
+ */
+ pset += edesc->processed_stat;
+
+ for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
+ /*
+ * If we are inside this pset address range, we know
+ * this is the active one. Get the current delta and
+ * stop walking the psets.
+ */
+ if (pos >= pset->addr && pos < pset->addr + pset->len)
+ return edesc->residue_stat - (pos - pset->addr);
+
+ /* Otherwise mark it done and update residue_stat. */
+ edesc->processed_stat++;
+ edesc->residue_stat -= pset->len;
+ }
+ return edesc->residue_stat;
}
/* Check request completion status */
@@ -785,13 +951,10 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
return ret;
spin_lock_irqsave(&echan->vchan.lock, flags);
- vdesc = vchan_find_desc(&echan->vchan, cookie);
- if (vdesc) {
- txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx));
- } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
- struct edma_desc *edesc = echan->edesc;
- txstate->residue = edma_desc_size(edesc);
- }
+ if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
+ txstate->residue = edma_residue(echan->edesc);
+ else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
+ txstate->residue = to_edma_desc(&vdesc->tx)->residue;
spin_unlock_irqrestore(&echan->vchan.lock, flags);
return ret;
@@ -817,18 +980,43 @@ static void __init edma_chan_init(struct edma_cc *ecc,
}
}
+#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static int edma_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = EDMA_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = true;
+ caps->cmd_terminate = true;
+ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ return 0;
+}
+
static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
struct device *dev)
{
dma->device_prep_slave_sg = edma_prep_slave_sg;
dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
+ dma->device_prep_dma_memcpy = edma_prep_dma_memcpy;
dma->device_alloc_chan_resources = edma_alloc_chan_resources;
dma->device_free_chan_resources = edma_free_chan_resources;
dma->device_issue_pending = edma_issue_pending;
dma->device_tx_status = edma_tx_status;
dma->device_control = edma_control;
+ dma->device_slave_caps = edma_dma_device_slave_caps;
dma->dev = dev;
+ /*
+ * code using dma memcpy must make sure alignment of
+ * length is at dma->copy_align boundary.
+ */
+ dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
INIT_LIST_HEAD(&dma->channels);
}
@@ -856,6 +1044,8 @@ static int edma_probe(struct platform_device *pdev)
dma_cap_zero(ecc->dma_slave.cap_mask);
dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask);
edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
new file mode 100644
index 00000000000..b396a7fb53a
--- /dev/null
+++ b/drivers/dma/fsl-edma.c
@@ -0,0 +1,985 @@
+/*
+ * drivers/dma/fsl-edma.c
+ *
+ * Copyright 2013-2014 Freescale Semiconductor, Inc.
+ *
+ * Driver for the Freescale eDMA engine with flexible channel multiplexing
+ * capability for DMA request sources. The eDMA block can be found on some
+ * Vybrid and Layerscape SoCs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define EDMA_CR 0x00
+#define EDMA_ES 0x04
+#define EDMA_ERQ 0x0C
+#define EDMA_EEI 0x14
+#define EDMA_SERQ 0x1B
+#define EDMA_CERQ 0x1A
+#define EDMA_SEEI 0x19
+#define EDMA_CEEI 0x18
+#define EDMA_CINT 0x1F
+#define EDMA_CERR 0x1E
+#define EDMA_SSRT 0x1D
+#define EDMA_CDNE 0x1C
+#define EDMA_INTR 0x24
+#define EDMA_ERR 0x2C
+
+#define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
+#define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
+#define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
+#define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
+#define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
+#define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
+#define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
+#define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
+#define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
+#define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
+#define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
+#define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
+#define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
+
+#define EDMA_CR_EDBG BIT(1)
+#define EDMA_CR_ERCA BIT(2)
+#define EDMA_CR_ERGA BIT(3)
+#define EDMA_CR_HOE BIT(4)
+#define EDMA_CR_HALT BIT(5)
+#define EDMA_CR_CLM BIT(6)
+#define EDMA_CR_EMLM BIT(7)
+#define EDMA_CR_ECX BIT(16)
+#define EDMA_CR_CX BIT(17)
+
+#define EDMA_SEEI_SEEI(x) ((x) & 0x1F)
+#define EDMA_CEEI_CEEI(x) ((x) & 0x1F)
+#define EDMA_CINT_CINT(x) ((x) & 0x1F)
+#define EDMA_CERR_CERR(x) ((x) & 0x1F)
+
+#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
+#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
+#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
+#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
+#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
+#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
+#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
+#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
+#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
+#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
+#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
+#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
+#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
+#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
+
+#define EDMA_TCD_SOFF_SOFF(x) (x)
+#define EDMA_TCD_NBYTES_NBYTES(x) (x)
+#define EDMA_TCD_SLAST_SLAST(x) (x)
+#define EDMA_TCD_DADDR_DADDR(x) (x)
+#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
+#define EDMA_TCD_DOFF_DOFF(x) (x)
+#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
+#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
+
+#define EDMA_TCD_CSR_START BIT(0)
+#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
+#define EDMA_TCD_CSR_INT_HALF BIT(2)
+#define EDMA_TCD_CSR_D_REQ BIT(3)
+#define EDMA_TCD_CSR_E_SG BIT(4)
+#define EDMA_TCD_CSR_E_LINK BIT(5)
+#define EDMA_TCD_CSR_ACTIVE BIT(6)
+#define EDMA_TCD_CSR_DONE BIT(7)
+
+#define EDMAMUX_CHCFG_DIS 0x0
+#define EDMAMUX_CHCFG_ENBL 0x80
+#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
+
+#define DMAMUX_NR 2
+
+#define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+struct fsl_edma_hw_tcd {
+ u32 saddr;
+ u16 soff;
+ u16 attr;
+ u32 nbytes;
+ u32 slast;
+ u32 daddr;
+ u16 doff;
+ u16 citer;
+ u32 dlast_sga;
+ u16 csr;
+ u16 biter;
+};
+
+struct fsl_edma_sw_tcd {
+ dma_addr_t ptcd;
+ struct fsl_edma_hw_tcd *vtcd;
+};
+
+struct fsl_edma_slave_config {
+ enum dma_transfer_direction dir;
+ enum dma_slave_buswidth addr_width;
+ u32 dev_addr;
+ u32 burst;
+ u32 attr;
+};
+
+struct fsl_edma_chan {
+ struct virt_dma_chan vchan;
+ enum dma_status status;
+ struct fsl_edma_engine *edma;
+ struct fsl_edma_desc *edesc;
+ struct fsl_edma_slave_config fsc;
+ struct dma_pool *tcd_pool;
+};
+
+struct fsl_edma_desc {
+ struct virt_dma_desc vdesc;
+ struct fsl_edma_chan *echan;
+ bool iscyclic;
+ unsigned int n_tcds;
+ struct fsl_edma_sw_tcd tcd[];
+};
+
+struct fsl_edma_engine {
+ struct dma_device dma_dev;
+ void __iomem *membase;
+ void __iomem *muxbase[DMAMUX_NR];
+ struct clk *muxclk[DMAMUX_NR];
+ struct mutex fsl_edma_mutex;
+ u32 n_chans;
+ int txirq;
+ int errirq;
+ bool big_endian;
+ struct fsl_edma_chan chans[];
+};
+
+/*
+ * R/W functions for big- or little-endian registers
+ * the eDMA controller's endian is independent of the CPU core's endian.
+ */
+
+static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+ if (edma->big_endian)
+ return ioread16be(addr);
+ else
+ return ioread16(addr);
+}
+
+static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+ if (edma->big_endian)
+ return ioread32be(addr);
+ else
+ return ioread32(addr);
+}
+
+static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
+{
+ iowrite8(val, addr);
+}
+
+static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
+{
+ if (edma->big_endian)
+ iowrite16be(val, addr);
+ else
+ iowrite16(val, addr);
+}
+
+static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
+{
+ if (edma->big_endian)
+ iowrite32be(val, addr);
+ else
+ iowrite32(val, addr);
+}
+
+static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct fsl_edma_chan, vchan.chan);
+}
+
+static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct fsl_edma_desc, vdesc);
+}
+
+static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
+{
+ void __iomem *addr = fsl_chan->edma->membase;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
+ edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
+}
+
+static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
+{
+ void __iomem *addr = fsl_chan->edma->membase;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
+ edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
+}
+
+static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
+ unsigned int slot, bool enable)
+{
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+ void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR];
+ unsigned chans_per_mux, ch_off;
+
+ chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
+ ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+
+ if (enable)
+ edma_writeb(fsl_chan->edma,
+ EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot),
+ muxaddr + ch_off);
+ else
+ edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
+}
+
+static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
+{
+ switch (addr_width) {
+ case 1:
+ return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
+ case 2:
+ return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
+ case 4:
+ return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+ case 8:
+ return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
+ default:
+ return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+ }
+}
+
+static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct fsl_edma_desc *fsl_desc;
+ int i;
+
+ fsl_desc = to_fsl_edma_desc(vdesc);
+ for (i = 0; i < fsl_desc->n_tcds; i++)
+ dma_pool_free(fsl_desc->echan->tcd_pool,
+ fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
+ kfree(fsl_desc);
+}
+
+static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct dma_slave_config *cfg = (void *)arg;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_edma_disable_request(fsl_chan);
+ fsl_chan->edesc = NULL;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ return 0;
+
+ case DMA_SLAVE_CONFIG:
+ fsl_chan->fsc.dir = cfg->direction;
+ if (cfg->direction == DMA_DEV_TO_MEM) {
+ fsl_chan->fsc.dev_addr = cfg->src_addr;
+ fsl_chan->fsc.addr_width = cfg->src_addr_width;
+ fsl_chan->fsc.burst = cfg->src_maxburst;
+ fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
+ } else if (cfg->direction == DMA_MEM_TO_DEV) {
+ fsl_chan->fsc.dev_addr = cfg->dst_addr;
+ fsl_chan->fsc.addr_width = cfg->dst_addr_width;
+ fsl_chan->fsc.burst = cfg->dst_maxburst;
+ fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+
+ case DMA_PAUSE:
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->edesc) {
+ fsl_edma_disable_request(fsl_chan);
+ fsl_chan->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ return 0;
+
+ case DMA_RESUME:
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->edesc) {
+ fsl_edma_enable_request(fsl_chan);
+ fsl_chan->status = DMA_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ return 0;
+
+ default:
+ return -ENXIO;
+ }
+}
+
+static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
+ struct virt_dma_desc *vdesc, bool in_progress)
+{
+ struct fsl_edma_desc *edesc = fsl_chan->edesc;
+ void __iomem *addr = fsl_chan->edma->membase;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+ enum dma_transfer_direction dir = fsl_chan->fsc.dir;
+ dma_addr_t cur_addr, dma_addr;
+ size_t len, size;
+ int i;
+
+ /* calculate the total size in this desc */
+ for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
+ len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
+ * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
+
+ if (!in_progress)
+ return len;
+
+ if (dir == DMA_MEM_TO_DEV)
+ cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
+ else
+ cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
+
+ /* figure out the finished and calculate the residue */
+ for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
+ size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
+ * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
+ if (dir == DMA_MEM_TO_DEV)
+ dma_addr = edma_readl(fsl_chan->edma,
+ &(edesc->tcd[i].vtcd->saddr));
+ else
+ dma_addr = edma_readl(fsl_chan->edma,
+ &(edesc->tcd[i].vtcd->daddr));
+
+ len -= size;
+ if (cur_addr > dma_addr && cur_addr < dma_addr + size) {
+ len += dma_addr + size - cur_addr;
+ break;
+ }
+ }
+
+ return len;
+}
+
+static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ if (status == DMA_COMPLETE)
+ return status;
+
+ if (!txstate)
+ return fsl_chan->status;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
+ if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
+ txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
+ else if (vdesc)
+ txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
+ else
+ txstate->residue = 0;
+
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ return fsl_chan->status;
+}
+
+static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan,
+ u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes,
+ u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga,
+ u16 csr)
+{
+ void __iomem *addr = fsl_chan->edma->membase;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ /*
+ * TCD parameters have been swapped in fill_tcd_params(),
+ * so just write them to registers in the cpu endian here
+ */
+ writew(0, addr + EDMA_TCD_CSR(ch));
+ writel(src, addr + EDMA_TCD_SADDR(ch));
+ writel(dst, addr + EDMA_TCD_DADDR(ch));
+ writew(attr, addr + EDMA_TCD_ATTR(ch));
+ writew(soff, addr + EDMA_TCD_SOFF(ch));
+ writel(nbytes, addr + EDMA_TCD_NBYTES(ch));
+ writel(slast, addr + EDMA_TCD_SLAST(ch));
+ writew(citer, addr + EDMA_TCD_CITER(ch));
+ writew(biter, addr + EDMA_TCD_BITER(ch));
+ writew(doff, addr + EDMA_TCD_DOFF(ch));
+ writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch));
+ writew(csr, addr + EDMA_TCD_CSR(ch));
+}
+
+static void fill_tcd_params(struct fsl_edma_engine *edma,
+ struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+ u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
+ u16 biter, u16 doff, u32 dlast_sga, bool major_int,
+ bool disable_req, bool enable_sg)
+{
+ u16 csr = 0;
+
+ /*
+ * eDMA hardware SGs require the TCD parameters stored in memory
+ * the same endian as the eDMA module so that they can be loaded
+ * automatically by the engine
+ */
+ edma_writel(edma, src, &(tcd->saddr));
+ edma_writel(edma, dst, &(tcd->daddr));
+ edma_writew(edma, attr, &(tcd->attr));
+ edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff));
+ edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes));
+ edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast));
+ edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer));
+ edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff));
+ edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga));
+ edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter));
+ if (major_int)
+ csr |= EDMA_TCD_CSR_INT_MAJOR;
+
+ if (disable_req)
+ csr |= EDMA_TCD_CSR_D_REQ;
+
+ if (enable_sg)
+ csr |= EDMA_TCD_CSR_E_SG;
+
+ edma_writew(edma, csr, &(tcd->csr));
+}
+
+static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
+ int sg_len)
+{
+ struct fsl_edma_desc *fsl_desc;
+ int i;
+
+ fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
+ GFP_NOWAIT);
+ if (!fsl_desc)
+ return NULL;
+
+ fsl_desc->echan = fsl_chan;
+ fsl_desc->n_tcds = sg_len;
+ for (i = 0; i < sg_len; i++) {
+ fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
+ GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
+ if (!fsl_desc->tcd[i].vtcd)
+ goto err;
+ }
+ return fsl_desc;
+
+err:
+ while (--i >= 0)
+ dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
+ kfree(fsl_desc);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct fsl_edma_desc *fsl_desc;
+ dma_addr_t dma_buf_next;
+ int sg_len, i;
+ u32 src_addr, dst_addr, last_sg, nbytes;
+ u16 soff, doff, iter;
+
+ if (!is_slave_direction(fsl_chan->fsc.dir))
+ return NULL;
+
+ sg_len = buf_len / period_len;
+ fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = true;
+
+ dma_buf_next = dma_addr;
+ nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
+ iter = period_len / nbytes;
+
+ for (i = 0; i < sg_len; i++) {
+ if (dma_buf_next >= dma_addr + buf_len)
+ dma_buf_next = dma_addr;
+
+ /* get next sg's physical address */
+ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+ if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+ src_addr = dma_buf_next;
+ dst_addr = fsl_chan->fsc.dev_addr;
+ soff = fsl_chan->fsc.addr_width;
+ doff = 0;
+ } else {
+ src_addr = fsl_chan->fsc.dev_addr;
+ dst_addr = dma_buf_next;
+ soff = 0;
+ doff = fsl_chan->fsc.addr_width;
+ }
+
+ fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr,
+ dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
+ iter, iter, doff, last_sg, true, false, true);
+ dma_buf_next += period_len;
+ }
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct fsl_edma_desc *fsl_desc;
+ struct scatterlist *sg;
+ u32 src_addr, dst_addr, last_sg, nbytes;
+ u16 soff, doff, iter;
+ int i;
+
+ if (!is_slave_direction(fsl_chan->fsc.dir))
+ return NULL;
+
+ fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = false;
+
+ nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
+ for_each_sg(sgl, sg, sg_len, i) {
+ /* get next sg's physical address */
+ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+ if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+ src_addr = sg_dma_address(sg);
+ dst_addr = fsl_chan->fsc.dev_addr;
+ soff = fsl_chan->fsc.addr_width;
+ doff = 0;
+ } else {
+ src_addr = fsl_chan->fsc.dev_addr;
+ dst_addr = sg_dma_address(sg);
+ soff = 0;
+ doff = fsl_chan->fsc.addr_width;
+ }
+
+ iter = sg_dma_len(sg) / nbytes;
+ if (i < sg_len - 1) {
+ last_sg = fsl_desc->tcd[(i + 1)].ptcd;
+ fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
+ src_addr, dst_addr, fsl_chan->fsc.attr,
+ soff, nbytes, 0, iter, iter, doff, last_sg,
+ false, false, true);
+ } else {
+ last_sg = 0;
+ fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
+ src_addr, dst_addr, fsl_chan->fsc.attr,
+ soff, nbytes, 0, iter, iter, doff, last_sg,
+ true, true, false);
+ }
+ }
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+
+static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
+{
+ struct fsl_edma_hw_tcd *tcd;
+ struct virt_dma_desc *vdesc;
+
+ vdesc = vchan_next_desc(&fsl_chan->vchan);
+ if (!vdesc)
+ return;
+ fsl_chan->edesc = to_fsl_edma_desc(vdesc);
+ tcd = fsl_chan->edesc->tcd[0].vtcd;
+ fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr,
+ tcd->soff, tcd->nbytes, tcd->slast, tcd->citer,
+ tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr);
+ fsl_edma_enable_request(fsl_chan);
+ fsl_chan->status = DMA_IN_PROGRESS;
+}
+
+static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ unsigned int intr, ch;
+ void __iomem *base_addr;
+ struct fsl_edma_chan *fsl_chan;
+
+ base_addr = fsl_edma->membase;
+
+ intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
+ if (!intr)
+ return IRQ_NONE;
+
+ for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+ if (intr & (0x1 << ch)) {
+ edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
+ base_addr + EDMA_CINT);
+
+ fsl_chan = &fsl_edma->chans[ch];
+
+ spin_lock(&fsl_chan->vchan.lock);
+ if (!fsl_chan->edesc->iscyclic) {
+ list_del(&fsl_chan->edesc->vdesc.node);
+ vchan_cookie_complete(&fsl_chan->edesc->vdesc);
+ fsl_chan->edesc = NULL;
+ fsl_chan->status = DMA_COMPLETE;
+ } else {
+ vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
+ }
+
+ if (!fsl_chan->edesc)
+ fsl_edma_xfer_desc(fsl_chan);
+
+ spin_unlock(&fsl_chan->vchan.lock);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ unsigned int err, ch;
+
+ err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
+ if (!err)
+ return IRQ_NONE;
+
+ for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+ if (err & (0x1 << ch)) {
+ fsl_edma_disable_request(&fsl_edma->chans[ch]);
+ edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
+ fsl_edma->membase + EDMA_CERR);
+ fsl_edma->chans[ch].status = DMA_ERROR;
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
+{
+ if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
+ return IRQ_HANDLED;
+
+ return fsl_edma_err_handler(irq, dev_id);
+}
+
+static void fsl_edma_issue_pending(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+
+ if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
+ fsl_edma_xfer_desc(fsl_chan);
+
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+}
+
+static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
+ struct dma_chan *chan, *_chan;
+
+ if (dma_spec->args_count != 2)
+ return NULL;
+
+ mutex_lock(&fsl_edma->fsl_edma_mutex);
+ list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
+ if (chan->client_count)
+ continue;
+ if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) {
+ chan = dma_get_slave_channel(chan);
+ if (chan) {
+ chan->device->privatecnt++;
+ fsl_edma_chan_mux(to_fsl_edma_chan(chan),
+ dma_spec->args[1], true);
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return chan;
+ }
+ }
+ }
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return NULL;
+}
+
+static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+ fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
+ sizeof(struct fsl_edma_hw_tcd),
+ 32, 0);
+ return 0;
+}
+
+static void fsl_edma_free_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_edma_disable_request(fsl_chan);
+ fsl_edma_chan_mux(fsl_chan, 0, false);
+ fsl_chan->edesc = NULL;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ dma_pool_destroy(fsl_chan->tcd_pool);
+ fsl_chan->tcd_pool = NULL;
+}
+
+static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
+ caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = true;
+ caps->cmd_terminate = true;
+
+ return 0;
+}
+
+static int
+fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+ int ret;
+
+ fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
+ if (fsl_edma->txirq < 0) {
+ dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
+ return fsl_edma->txirq;
+ }
+
+ fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
+ if (fsl_edma->errirq < 0) {
+ dev_err(&pdev->dev, "Can't get edma-err irq.\n");
+ return fsl_edma->errirq;
+ }
+
+ if (fsl_edma->txirq == fsl_edma->errirq) {
+ ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
+ fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
+ return ret;
+ }
+ } else {
+ ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
+ fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
+ fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int fsl_edma_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct fsl_edma_engine *fsl_edma;
+ struct fsl_edma_chan *fsl_chan;
+ struct resource *res;
+ int len, chans;
+ int ret, i;
+
+ ret = of_property_read_u32(np, "dma-channels", &chans);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get dma-channels.\n");
+ return ret;
+ }
+
+ len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
+ fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!fsl_edma)
+ return -ENOMEM;
+
+ fsl_edma->n_chans = chans;
+ mutex_init(&fsl_edma->fsl_edma_mutex);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_edma->membase))
+ return PTR_ERR(fsl_edma->membase);
+
+ for (i = 0; i < DMAMUX_NR; i++) {
+ char clkname[32];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
+ fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_edma->muxbase[i]))
+ return PTR_ERR(fsl_edma->muxbase[i]);
+
+ sprintf(clkname, "dmamux%d", i);
+ fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
+ if (IS_ERR(fsl_edma->muxclk[i])) {
+ dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
+ return PTR_ERR(fsl_edma->muxclk[i]);
+ }
+
+ ret = clk_prepare_enable(fsl_edma->muxclk[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
+ return ret;
+ }
+
+ }
+
+ ret = fsl_edma_irq_init(pdev, fsl_edma);
+ if (ret)
+ return ret;
+
+ fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
+
+ INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+
+ fsl_chan->edma = fsl_edma;
+
+ fsl_chan->vchan.desc_free = fsl_edma_free_desc;
+ vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
+
+ edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+ fsl_edma_chan_mux(fsl_chan, 0, false);
+ }
+
+ dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
+ dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
+
+ fsl_edma->dma_dev.dev = &pdev->dev;
+ fsl_edma->dma_dev.device_alloc_chan_resources
+ = fsl_edma_alloc_chan_resources;
+ fsl_edma->dma_dev.device_free_chan_resources
+ = fsl_edma_free_chan_resources;
+ fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
+ fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
+ fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
+ fsl_edma->dma_dev.device_control = fsl_edma_control;
+ fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
+ fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps;
+
+ platform_set_drvdata(pdev, fsl_edma);
+
+ ret = dma_async_device_register(&fsl_edma->dma_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
+ dma_async_device_unregister(&fsl_edma->dma_dev);
+ return ret;
+ }
+
+ /* enable round robin arbitration */
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
+
+ return 0;
+}
+
+static int fsl_edma_remove(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
+ int i;
+
+ of_dma_controller_free(np);
+ dma_async_device_unregister(&fsl_edma->dma_dev);
+
+ for (i = 0; i < DMAMUX_NR; i++)
+ clk_disable_unprepare(fsl_edma->muxclk[i]);
+
+ return 0;
+}
+
+static const struct of_device_id fsl_edma_dt_ids[] = {
+ { .compatible = "fsl,vf610-edma", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+
+static struct platform_driver fsl_edma_driver = {
+ .driver = {
+ .name = "fsl-edma",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_edma_dt_ids,
+ },
+ .probe = fsl_edma_probe,
+ .remove = fsl_edma_remove,
+};
+
+static int __init fsl_edma_init(void)
+{
+ return platform_driver_register(&fsl_edma_driver);
+}
+subsys_initcall(fsl_edma_init);
+
+static void __exit fsl_edma_exit(void)
+{
+ platform_driver_unregister(&fsl_edma_driver);
+}
+module_exit(fsl_edma_exit);
+
+MODULE_ALIAS("platform:fsl-edma");
+MODULE_DESCRIPTION("Freescale eDMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index f157c6f76b3..e0fec68aed2 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -61,6 +61,16 @@ static u32 get_sr(struct fsldma_chan *chan)
return DMA_IN(chan, &chan->regs->sr, 32);
}
+static void set_mr(struct fsldma_chan *chan, u32 val)
+{
+ DMA_OUT(chan, &chan->regs->mr, val, 32);
+}
+
+static u32 get_mr(struct fsldma_chan *chan)
+{
+ return DMA_IN(chan, &chan->regs->mr, 32);
+}
+
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{
DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
@@ -71,6 +81,11 @@ static dma_addr_t get_cdar(struct fsldma_chan *chan)
return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}
+static void set_bcr(struct fsldma_chan *chan, u32 val)
+{
+ DMA_OUT(chan, &chan->regs->bcr, val, 32);
+}
+
static u32 get_bcr(struct fsldma_chan *chan)
{
return DMA_IN(chan, &chan->regs->bcr, 32);
@@ -135,7 +150,7 @@ static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
static void dma_init(struct fsldma_chan *chan)
{
/* Reset the channel */
- DMA_OUT(chan, &chan->regs->mr, 0, 32);
+ set_mr(chan, 0);
switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
@@ -144,16 +159,15 @@ static void dma_init(struct fsldma_chan *chan)
* EOLNIE - End of links interrupt enable
* BWC - Bandwidth sharing among channels
*/
- DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
- | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
+ set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE
+ | FSL_DMA_MR_EOLNIE);
break;
case FSL_DMA_IP_83XX:
/* Set the channel to below modes:
* EOTIE - End-of-transfer interrupt enable
* PRC_RM - PCI read multiple
*/
- DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
- | FSL_DMA_MR_PRC_RM, 32);
+ set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM);
break;
}
}
@@ -175,10 +189,10 @@ static void dma_start(struct fsldma_chan *chan)
{
u32 mode;
- mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode = get_mr(chan);
if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
- DMA_OUT(chan, &chan->regs->bcr, 0, 32);
+ set_bcr(chan, 0);
mode |= FSL_DMA_MR_EMP_EN;
} else {
mode &= ~FSL_DMA_MR_EMP_EN;
@@ -191,7 +205,7 @@ static void dma_start(struct fsldma_chan *chan)
mode |= FSL_DMA_MR_CS;
}
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ set_mr(chan, mode);
}
static void dma_halt(struct fsldma_chan *chan)
@@ -200,7 +214,7 @@ static void dma_halt(struct fsldma_chan *chan)
int i;
/* read the mode register */
- mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode = get_mr(chan);
/*
* The 85xx controller supports channel abort, which will stop
@@ -209,14 +223,14 @@ static void dma_halt(struct fsldma_chan *chan)
*/
if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
mode |= FSL_DMA_MR_CA;
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ set_mr(chan, mode);
mode &= ~FSL_DMA_MR_CA;
}
/* stop the DMA controller */
mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ set_mr(chan, mode);
/* wait for the DMA controller to become idle */
for (i = 0; i < 100; i++) {
@@ -245,7 +259,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
{
u32 mode;
- mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode = get_mr(chan);
switch (size) {
case 0:
@@ -259,7 +273,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
break;
}
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ set_mr(chan, mode);
}
/**
@@ -277,7 +291,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
{
u32 mode;
- mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode = get_mr(chan);
switch (size) {
case 0:
@@ -291,7 +305,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
break;
}
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ set_mr(chan, mode);
}
/**
@@ -312,10 +326,10 @@ static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
BUG_ON(size > 1024);
- mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode = get_mr(chan);
mode |= (__ilog2(size) << 24) & 0x0f000000;
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ set_mr(chan, mode);
}
/**
@@ -404,6 +418,19 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
}
/**
+ * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool.
+ * @chan : Freescale DMA channel
+ * @desc: descriptor to be freed
+ */
+static void fsl_dma_free_descriptor(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ list_del(&desc->node);
+ chan_dbg(chan, "LD %p free\n", desc);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+}
+
+/**
* fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
* @chan : Freescale DMA channel
*
@@ -426,14 +453,107 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
desc->async_tx.tx_submit = fsl_dma_tx_submit;
desc->async_tx.phys = pdesc;
-#ifdef FSL_DMA_LD_DEBUG
chan_dbg(chan, "LD %p allocated\n", desc);
-#endif
return desc;
}
/**
+ * fsl_chan_xfer_ld_queue - transfer any pending transactions
+ * @chan : Freescale DMA channel
+ *
+ * HARDWARE STATE: idle
+ * LOCKING: must hold chan->desc_lock
+ */
+static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
+{
+ struct fsl_desc_sw *desc;
+
+ /*
+ * If the list of pending descriptors is empty, then we
+ * don't need to do any work at all
+ */
+ if (list_empty(&chan->ld_pending)) {
+ chan_dbg(chan, "no pending LDs\n");
+ return;
+ }
+
+ /*
+ * The DMA controller is not idle, which means that the interrupt
+ * handler will start any queued transactions when it runs after
+ * this transaction finishes
+ */
+ if (!chan->idle) {
+ chan_dbg(chan, "DMA controller still busy\n");
+ return;
+ }
+
+ /*
+ * If there are some link descriptors which have not been
+ * transferred, we need to start the controller
+ */
+
+ /*
+ * Move all elements from the queue of pending transactions
+ * onto the list of running transactions
+ */
+ chan_dbg(chan, "idle, starting controller\n");
+ desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
+ list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
+
+ /*
+ * The 85xx DMA controller doesn't clear the channel start bit
+ * automatically at the end of a transfer. Therefore we must clear
+ * it in software before starting the transfer.
+ */
+ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+ u32 mode;
+
+ mode = get_mr(chan);
+ mode &= ~FSL_DMA_MR_CS;
+ set_mr(chan, mode);
+ }
+
+ /*
+ * Program the descriptor's address into the DMA controller,
+ * then start the DMA transaction
+ */
+ set_cdar(chan, desc->async_tx.phys);
+ get_cdar(chan);
+
+ dma_start(chan);
+ chan->idle = false;
+}
+
+/**
+ * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
+ * @chan: Freescale DMA channel
+ * @desc: descriptor to cleanup and free
+ *
+ * This function is used on a descriptor which has been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies, and then
+ * free the descriptor.
+ */
+static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ struct dma_async_tx_descriptor *txd = &desc->async_tx;
+
+ /* Run the link descriptor callback function */
+ if (txd->callback) {
+ chan_dbg(chan, "LD %p callback\n", desc);
+ txd->callback(txd->callback_param);
+ }
+
+ /* Run any dependencies */
+ dma_run_dependencies(txd);
+
+ dma_descriptor_unmap(txd);
+ chan_dbg(chan, "LD %p free\n", desc);
+ dma_pool_free(chan->desc_pool, desc, txd->phys);
+}
+
+/**
* fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
* @chan : Freescale DMA channel
*
@@ -477,13 +597,8 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan,
{
struct fsl_desc_sw *desc, *_desc;
- list_for_each_entry_safe(desc, _desc, list, node) {
- list_del(&desc->node);
-#ifdef FSL_DMA_LD_DEBUG
- chan_dbg(chan, "LD %p free\n", desc);
-#endif
- dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
- }
+ list_for_each_entry_safe(desc, _desc, list, node)
+ fsl_dma_free_descriptor(chan, desc);
}
static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
@@ -491,13 +606,8 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
{
struct fsl_desc_sw *desc, *_desc;
- list_for_each_entry_safe_reverse(desc, _desc, list, node) {
- list_del(&desc->node);
-#ifdef FSL_DMA_LD_DEBUG
- chan_dbg(chan, "LD %p free\n", desc);
-#endif
- dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
- }
+ list_for_each_entry_safe_reverse(desc, _desc, list, node)
+ fsl_dma_free_descriptor(chan, desc);
}
/**
@@ -520,35 +630,6 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
}
static struct dma_async_tx_descriptor *
-fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
-{
- struct fsldma_chan *chan;
- struct fsl_desc_sw *new;
-
- if (!dchan)
- return NULL;
-
- chan = to_fsl_chan(dchan);
-
- new = fsl_dma_alloc_descriptor(chan);
- if (!new) {
- chan_err(chan, "%s\n", msg_ld_oom);
- return NULL;
- }
-
- new->async_tx.cookie = -EBUSY;
- new->async_tx.flags = flags;
-
- /* Insert the link descriptor to the LD ring */
- list_add_tail(&new->node, &new->tx_list);
-
- /* Set End-of-link to the last link descriptor of new list */
- set_ld_eol(chan, new);
-
- return &new->async_tx;
-}
-
-static struct dma_async_tx_descriptor *
fsl_dma_prep_memcpy(struct dma_chan *dchan,
dma_addr_t dma_dst, dma_addr_t dma_src,
size_t len, unsigned long flags)
@@ -817,105 +898,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
}
/**
- * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
- * @chan: Freescale DMA channel
- * @desc: descriptor to cleanup and free
- *
- * This function is used on a descriptor which has been executed by the DMA
- * controller. It will run any callbacks, submit any dependencies, and then
- * free the descriptor.
- */
-static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- struct dma_async_tx_descriptor *txd = &desc->async_tx;
-
- /* Run the link descriptor callback function */
- if (txd->callback) {
-#ifdef FSL_DMA_LD_DEBUG
- chan_dbg(chan, "LD %p callback\n", desc);
-#endif
- txd->callback(txd->callback_param);
- }
-
- /* Run any dependencies */
- dma_run_dependencies(txd);
-
- dma_descriptor_unmap(txd);
-#ifdef FSL_DMA_LD_DEBUG
- chan_dbg(chan, "LD %p free\n", desc);
-#endif
- dma_pool_free(chan->desc_pool, desc, txd->phys);
-}
-
-/**
- * fsl_chan_xfer_ld_queue - transfer any pending transactions
- * @chan : Freescale DMA channel
- *
- * HARDWARE STATE: idle
- * LOCKING: must hold chan->desc_lock
- */
-static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
-{
- struct fsl_desc_sw *desc;
-
- /*
- * If the list of pending descriptors is empty, then we
- * don't need to do any work at all
- */
- if (list_empty(&chan->ld_pending)) {
- chan_dbg(chan, "no pending LDs\n");
- return;
- }
-
- /*
- * The DMA controller is not idle, which means that the interrupt
- * handler will start any queued transactions when it runs after
- * this transaction finishes
- */
- if (!chan->idle) {
- chan_dbg(chan, "DMA controller still busy\n");
- return;
- }
-
- /*
- * If there are some link descriptors which have not been
- * transferred, we need to start the controller
- */
-
- /*
- * Move all elements from the queue of pending transactions
- * onto the list of running transactions
- */
- chan_dbg(chan, "idle, starting controller\n");
- desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
- list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
-
- /*
- * The 85xx DMA controller doesn't clear the channel start bit
- * automatically at the end of a transfer. Therefore we must clear
- * it in software before starting the transfer.
- */
- if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
- u32 mode;
-
- mode = DMA_IN(chan, &chan->regs->mr, 32);
- mode &= ~FSL_DMA_MR_CS;
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
- }
-
- /*
- * Program the descriptor's address into the DMA controller,
- * then start the DMA transaction
- */
- set_cdar(chan, desc->async_tx.phys);
- get_cdar(chan);
-
- dma_start(chan);
- chan->idle = false;
-}
-
-/**
* fsl_dma_memcpy_issue_pending - Issue the DMA start command
* @chan : Freescale DMA channel
*/
@@ -1304,12 +1286,10 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
- dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
dma_cap_set(DMA_SG, fdev->common.cap_mask);
dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
- fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
fdev->common.device_tx_status = fsl_tx_status;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 6f9ac2022ab..286660a12cc 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -422,12 +422,12 @@ static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
/* Tasklet error handler */
tasklet_schedule(&imxdma->channel[i].dma_tasklet);
- printk(KERN_WARNING
- "DMA timeout on channel %d -%s%s%s%s\n", i,
- errcode & IMX_DMA_ERR_BURST ? " burst" : "",
- errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
- errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
- errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
+ dev_warn(imxdma->dev,
+ "DMA timeout on channel %d -%s%s%s%s\n", i,
+ errcode & IMX_DMA_ERR_BURST ? " burst" : "",
+ errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
+ errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
+ errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
}
return IRQ_HANDLED;
}
@@ -1236,6 +1236,7 @@ static int imxdma_remove(struct platform_device *pdev)
static struct platform_driver imxdma_driver = {
.driver = {
.name = "imx-dma",
+ .owner = THIS_MODULE,
.of_match_table = imx_dma_of_dev_id,
},
.id_table = imx_dma_devtype,
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 4e7918339b1..14867e3ac8f 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -255,6 +255,7 @@ struct sdma_channel {
enum dma_slave_buswidth word_size;
unsigned int buf_tail;
unsigned int num_bd;
+ unsigned int period_len;
struct sdma_buffer_descriptor *bd;
dma_addr_t bd_phys;
unsigned int pc_from_device, pc_to_device;
@@ -449,6 +450,7 @@ static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
+ { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
@@ -592,6 +594,12 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
{
+ if (sdmac->desc.callback)
+ sdmac->desc.callback(sdmac->desc.callback_param);
+}
+
+static void sdma_update_channel_loop(struct sdma_channel *sdmac)
+{
struct sdma_buffer_descriptor *bd;
/*
@@ -606,15 +614,10 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
if (bd->mode.status & BD_RROR)
sdmac->status = DMA_ERROR;
- else
- sdmac->status = DMA_IN_PROGRESS;
bd->mode.status |= BD_DONE;
sdmac->buf_tail++;
sdmac->buf_tail %= sdmac->num_bd;
-
- if (sdmac->desc.callback)
- sdmac->desc.callback(sdmac->desc.callback_param);
}
}
@@ -670,6 +673,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
int channel = fls(stat) - 1;
struct sdma_channel *sdmac = &sdma->channel[channel];
+ if (sdmac->flags & IMX_DMA_SG_LOOP)
+ sdma_update_channel_loop(sdmac);
+
tasklet_schedule(&sdmac->tasklet);
__clear_bit(channel, &stat);
@@ -1130,6 +1136,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
sdmac->status = DMA_IN_PROGRESS;
sdmac->buf_tail = 0;
+ sdmac->period_len = period_len;
sdmac->flags |= IMX_DMA_SG_LOOP;
sdmac->direction = direction;
@@ -1226,9 +1233,15 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
+ u32 residue;
+
+ if (sdmac->flags & IMX_DMA_SG_LOOP)
+ residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
+ else
+ residue = sdmac->chn_count - sdmac->chn_real_count;
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
- sdmac->chn_count - sdmac->chn_real_count);
+ residue);
return sdmac->status;
}
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 87529181efc..4e3549a1613 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -77,7 +77,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
chan = ioat_chan_by_index(instance, bit);
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &chan->state))
+ tasklet_schedule(&chan->cleanup_task);
}
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -93,7 +94,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
{
struct ioat_chan_common *chan = data;
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &chan->state))
+ tasklet_schedule(&chan->cleanup_task);
return IRQ_HANDLED;
}
@@ -116,7 +118,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
chan->timer.function = device->timer_fn;
chan->timer.data = data;
tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
- tasklet_disable(&chan->cleanup_task);
}
/**
@@ -354,13 +355,49 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
writel(((u64) chan->completion_dma) >> 32,
chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
- tasklet_enable(&chan->cleanup_task);
+ set_bit(IOAT_RUN, &chan->state);
ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
__func__, ioat->desccount);
return ioat->desccount;
}
+void ioat_stop(struct ioat_chan_common *chan)
+{
+ struct ioatdma_device *device = chan->device;
+ struct pci_dev *pdev = device->pdev;
+ int chan_id = chan_num(chan);
+ struct msix_entry *msix;
+
+ /* 1/ stop irq from firing tasklets
+ * 2/ stop the tasklet from re-arming irqs
+ */
+ clear_bit(IOAT_RUN, &chan->state);
+
+ /* flush inflight interrupts */
+ switch (device->irq_mode) {
+ case IOAT_MSIX:
+ msix = &device->msix_entries[chan_id];
+ synchronize_irq(msix->vector);
+ break;
+ case IOAT_MSI:
+ case IOAT_INTX:
+ synchronize_irq(pdev->irq);
+ break;
+ default:
+ break;
+ }
+
+ /* flush inflight timers */
+ del_timer_sync(&chan->timer);
+
+ /* flush inflight tasklet runs */
+ tasklet_kill(&chan->cleanup_task);
+
+ /* final cleanup now that everything is quiesced and can't re-arm */
+ device->cleanup_fn((unsigned long) &chan->common);
+}
+
/**
* ioat1_dma_free_chan_resources - release all the descriptors
* @chan: the channel to be cleaned
@@ -379,9 +416,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
if (ioat->desccount == 0)
return;
- tasklet_disable(&chan->cleanup_task);
- del_timer_sync(&chan->timer);
- ioat1_cleanup(ioat);
+ ioat_stop(chan);
/* Delay 100ms after reset to allow internal DMA logic to quiesce
* before removing DMA descriptor resources.
@@ -526,8 +561,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
static void ioat1_cleanup_event(unsigned long data)
{
struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat1_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 11fb877ddca..e982f00a984 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -356,6 +356,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device);
int ioat_dma_setup_interrupts(struct ioatdma_device *device);
+void ioat_stop(struct ioat_chan_common *chan);
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5d3affe7e97..8d1058085ee 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -190,8 +190,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
void ioat2_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat2_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
@@ -553,10 +556,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat->issued = 0;
ioat->tail = 0;
ioat->alloc_order = order;
+ set_bit(IOAT_RUN, &chan->state);
spin_unlock_bh(&ioat->prep_lock);
spin_unlock_bh(&chan->cleanup_lock);
- tasklet_enable(&chan->cleanup_task);
ioat2_start_null_desc(ioat);
/* check that we got off the ground */
@@ -566,7 +569,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
if (is_ioat_active(status) || is_ioat_idle(status)) {
- set_bit(IOAT_RUN, &chan->state);
return 1 << ioat->alloc_order;
} else {
u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -809,11 +811,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
if (!ioat->ring)
return;
- tasklet_disable(&chan->cleanup_task);
- del_timer_sync(&chan->timer);
- device->cleanup_fn((unsigned long) c);
+ ioat_stop(chan);
device->reset_hw(chan);
- clear_bit(IOAT_RUN, &chan->state);
spin_lock_bh(&chan->cleanup_lock);
spin_lock_bh(&ioat->prep_lock);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 820817e97e6..b9b38a1cf92 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -464,8 +464,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
static void ioat3_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat3_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index b439679f412..a7b186d536b 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -29,8 +29,8 @@
#define DALGN 0x00a0
#define DINT 0x00f0
#define DDADR 0x0200
-#define DSADR 0x0204
-#define DTADR 0x0208
+#define DSADR(n) (0x0204 + ((n) << 4))
+#define DTADR(n) (0x0208 + ((n) << 4))
#define DCMD 0x020c
#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
@@ -277,7 +277,7 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
return;
/* clear the channel mapping in DRCMR */
- reg = DRCMR(pchan->phy->vchan->drcmr);
+ reg = DRCMR(pchan->drcmr);
writel(0, pchan->phy->base + reg);
spin_lock_irqsave(&pdev->phy_lock, flags);
@@ -748,11 +748,92 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
return 0;
}
+static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
+ dma_cookie_t cookie)
+{
+ struct mmp_pdma_desc_sw *sw;
+ u32 curr, residue = 0;
+ bool passed = false;
+ bool cyclic = chan->cyclic_first != NULL;
+
+ /*
+ * If the channel does not have a phy pointer anymore, it has already
+ * been completed. Therefore, its residue is 0.
+ */
+ if (!chan->phy)
+ return 0;
+
+ if (chan->dir == DMA_DEV_TO_MEM)
+ curr = readl(chan->phy->base + DTADR(chan->phy->idx));
+ else
+ curr = readl(chan->phy->base + DSADR(chan->phy->idx));
+
+ list_for_each_entry(sw, &chan->chain_running, node) {
+ u32 start, end, len;
+
+ if (chan->dir == DMA_DEV_TO_MEM)
+ start = sw->desc.dtadr;
+ else
+ start = sw->desc.dsadr;
+
+ len = sw->desc.dcmd & DCMD_LENGTH;
+ end = start + len;
+
+ /*
+ * 'passed' will be latched once we found the descriptor which
+ * lies inside the boundaries of the curr pointer. All
+ * descriptors that occur in the list _after_ we found that
+ * partially handled descriptor are still to be processed and
+ * are hence added to the residual bytes counter.
+ */
+
+ if (passed) {
+ residue += len;
+ } else if (curr >= start && curr <= end) {
+ residue += end - curr;
+ passed = true;
+ }
+
+ /*
+ * Descriptors that have the ENDIRQEN bit set mark the end of a
+ * transaction chain, and the cookie assigned with it has been
+ * returned previously from mmp_pdma_tx_submit().
+ *
+ * In case we have multiple transactions in the running chain,
+ * and the cookie does not match the one the user asked us
+ * about, reset the state variables and start over.
+ *
+ * This logic does not apply to cyclic transactions, where all
+ * descriptors have the ENDIRQEN bit set, and for which we
+ * can't have multiple transactions on one channel anyway.
+ */
+ if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
+ continue;
+
+ if (sw->async_tx.cookie == cookie) {
+ return residue;
+ } else {
+ residue = 0;
+ passed = false;
+ }
+ }
+
+ /* We should only get here in case of cyclic transactions */
+ return residue;
+}
+
static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- return dma_cookie_status(dchan, cookie, txstate);
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ if (likely(ret != DMA_ERROR))
+ dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
+
+ return ret;
}
/**
@@ -858,8 +939,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
struct mmp_pdma_chan *chan;
int ret;
- chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan),
- GFP_KERNEL);
+ chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
if (chan == NULL)
return -ENOMEM;
@@ -867,8 +947,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
phy->base = pdev->base;
if (irq) {
- ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0,
- "pdma", phy);
+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
+ IRQF_SHARED, "pdma", phy);
if (ret) {
dev_err(pdev->dev, "channel request irq fail!\n");
return ret;
@@ -946,8 +1026,7 @@ static int mmp_pdma_probe(struct platform_device *op)
irq_num++;
}
- pdev->phy = devm_kcalloc(pdev->dev,
- dma_channels, sizeof(struct mmp_pdma_chan),
+ pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
GFP_KERNEL);
if (pdev->phy == NULL)
return -ENOMEM;
@@ -957,8 +1036,8 @@ static int mmp_pdma_probe(struct platform_device *op)
if (irq_num != dma_channels) {
/* all chan share one irq, demux inside */
irq = platform_get_irq(op, 0);
- ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0,
- "pdma", pdev);
+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
+ IRQF_SHARED, "pdma", pdev);
if (ret)
return ret;
}
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 33f96aaa80c..724f7f4c972 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -22,6 +22,7 @@
#include <mach/regs-icu.h>
#include <linux/platform_data/dma-mmp_tdma.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include "dmaengine.h"
@@ -541,6 +542,45 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
return 0;
}
+struct mmp_tdma_filter_param {
+ struct device_node *of_node;
+ unsigned int chan_id;
+};
+
+static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
+{
+ struct mmp_tdma_filter_param *param = fn_param;
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+ struct dma_device *pdma_device = tdmac->chan.device;
+
+ if (pdma_device->dev->of_node != param->of_node)
+ return false;
+
+ if (chan->chan_id != param->chan_id)
+ return false;
+
+ return true;
+}
+
+struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct mmp_tdma_device *tdev = ofdma->of_dma_data;
+ dma_cap_mask_t mask = tdev->device.cap_mask;
+ struct mmp_tdma_filter_param param;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ param.of_node = ofdma->of_node;
+ param.chan_id = dma_spec->args[0];
+
+ if (param.chan_id >= TDMA_CHANNEL_NUM)
+ return NULL;
+
+ return dma_request_channel(mask, mmp_tdma_filter_fn, &param);
+}
+
static struct of_device_id mmp_tdma_dt_ids[] = {
{ .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
{ .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
@@ -631,6 +671,16 @@ static int mmp_tdma_probe(struct platform_device *pdev)
return ret;
}
+ if (pdev->dev.of_node) {
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ mmp_tdma_xlate, tdev);
+ if (ret) {
+ dev_err(tdev->device.dev,
+ "failed to register controller\n");
+ dma_async_device_unregister(&tdev->device);
+ }
+ }
+
dev_info(tdev->device.dev, "initialized\n");
return 0;
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 448750da440..2ad43738ac8 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -2,6 +2,7 @@
* Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
* Copyright (C) Semihalf 2009
* Copyright (C) Ilya Yanok, Emcraft Systems 2010
+ * Copyright (C) Alexander Popov, Promcontroller 2014
*
* Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
* (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -29,8 +30,18 @@
*/
/*
- * This is initial version of MPC5121 DMA driver. Only memory to memory
- * transfers are supported (tested using dmatest module).
+ * MPC512x and MPC8308 DMA driver. It supports
+ * memory to memory data transfers (tested using dmatest module) and
+ * data transfers between memory and peripheral I/O memory
+ * by means of slave scatter/gather with these limitations:
+ * - chunked transfers (described by s/g lists with more than one item)
+ * are refused as long as proper support for scatter/gather is missing;
+ * - transfers on MPC8308 always start from software as this SoC appears
+ * not to have external request lines for peripheral flow control;
+ * - only peripheral devices with 4-byte FIFO access register are supported;
+ * - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
+ * source and destination addresses must be 4-byte aligned
+ * and transfer size must be aligned on (4 * maxburst) boundary;
*/
#include <linux/module.h>
@@ -52,9 +63,17 @@
#define MPC_DMA_DESCRIPTORS 64
/* Macro definitions */
-#define MPC_DMA_CHANNELS 64
#define MPC_DMA_TCD_OFFSET 0x1000
+/*
+ * Maximum channel counts for individual hardware variants
+ * and the maximum channel count over all supported controllers,
+ * used for data structure size
+ */
+#define MPC8308_DMACHAN_MAX 16
+#define MPC512x_DMACHAN_MAX 64
+#define MPC_DMA_CHANNELS 64
+
/* Arbitration mode of group and channel */
#define MPC_DMA_DMACR_EDCG (1 << 31)
#define MPC_DMA_DMACR_ERGA (1 << 3)
@@ -181,6 +200,7 @@ struct mpc_dma_desc {
dma_addr_t tcd_paddr;
int error;
struct list_head node;
+ int will_access_peripheral;
};
struct mpc_dma_chan {
@@ -193,6 +213,12 @@ struct mpc_dma_chan {
struct mpc_dma_tcd *tcd;
dma_addr_t tcd_paddr;
+ /* Settings for access to peripheral FIFO */
+ dma_addr_t src_per_paddr;
+ u32 src_tcd_nunits;
+ dma_addr_t dst_per_paddr;
+ u32 dst_tcd_nunits;
+
/* Lock for this structure */
spinlock_t lock;
};
@@ -243,8 +269,23 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
struct mpc_dma_desc *mdesc;
int cid = mchan->chan.chan_id;
- /* Move all queued descriptors to active list */
- list_splice_tail_init(&mchan->queued, &mchan->active);
+ while (!list_empty(&mchan->queued)) {
+ mdesc = list_first_entry(&mchan->queued,
+ struct mpc_dma_desc, node);
+ /*
+ * Grab either several mem-to-mem transfer descriptors
+ * or one peripheral transfer descriptor,
+ * don't mix mem-to-mem and peripheral transfer descriptors
+ * within the same 'active' list.
+ */
+ if (mdesc->will_access_peripheral) {
+ if (list_empty(&mchan->active))
+ list_move_tail(&mdesc->node, &mchan->active);
+ break;
+ } else {
+ list_move_tail(&mdesc->node, &mchan->active);
+ }
+ }
/* Chain descriptors into one transaction */
list_for_each_entry(mdesc, &mchan->active, node) {
@@ -270,7 +311,17 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
if (first != prev)
mdma->tcd[cid].e_sg = 1;
- out_8(&mdma->regs->dmassrt, cid);
+
+ if (mdma->is_mpc8308) {
+ /* MPC8308, no request lines, software initiated start */
+ out_8(&mdma->regs->dmassrt, cid);
+ } else if (first->will_access_peripheral) {
+ /* Peripherals involved, start by external request signal */
+ out_8(&mdma->regs->dmaserq, cid);
+ } else {
+ /* Memory to memory transfer, software initiated start */
+ out_8(&mdma->regs->dmassrt, cid);
+ }
}
/* Handle interrupt on one half of DMA controller (32 channels) */
@@ -588,6 +639,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
}
mdesc->error = 0;
+ mdesc->will_access_peripheral = 0;
tcd = mdesc->tcd;
/* Prepare Transfer Control Descriptor for this transaction */
@@ -635,6 +687,193 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
return &mdesc->desc;
}
+static struct dma_async_tx_descriptor *
+mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ struct mpc_dma_desc *mdesc = NULL;
+ dma_addr_t per_paddr;
+ u32 tcd_nunits;
+ struct mpc_dma_tcd *tcd;
+ unsigned long iflags;
+ struct scatterlist *sg;
+ size_t len;
+ int iter, i;
+
+ /* Currently there is no proper support for scatter/gather */
+ if (sg_len != 1)
+ return NULL;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ spin_lock_irqsave(&mchan->lock, iflags);
+
+ mdesc = list_first_entry(&mchan->free,
+ struct mpc_dma_desc, node);
+ if (!mdesc) {
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+ /* Try to free completed descriptors */
+ mpc_dma_process_completed(mdma);
+ return NULL;
+ }
+
+ list_del(&mdesc->node);
+
+ if (direction == DMA_DEV_TO_MEM) {
+ per_paddr = mchan->src_per_paddr;
+ tcd_nunits = mchan->src_tcd_nunits;
+ } else {
+ per_paddr = mchan->dst_per_paddr;
+ tcd_nunits = mchan->dst_tcd_nunits;
+ }
+
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+
+ if (per_paddr == 0 || tcd_nunits == 0)
+ goto err_prep;
+
+ mdesc->error = 0;
+ mdesc->will_access_peripheral = 1;
+
+ /* Prepare Transfer Control Descriptor for this transaction */
+ tcd = mdesc->tcd;
+
+ memset(tcd, 0, sizeof(struct mpc_dma_tcd));
+
+ if (!IS_ALIGNED(sg_dma_address(sg), 4))
+ goto err_prep;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ tcd->saddr = per_paddr;
+ tcd->daddr = sg_dma_address(sg);
+ tcd->soff = 0;
+ tcd->doff = 4;
+ } else {
+ tcd->saddr = sg_dma_address(sg);
+ tcd->daddr = per_paddr;
+ tcd->soff = 4;
+ tcd->doff = 0;
+ }
+
+ tcd->ssize = MPC_DMA_TSIZE_4;
+ tcd->dsize = MPC_DMA_TSIZE_4;
+
+ len = sg_dma_len(sg);
+ tcd->nbytes = tcd_nunits * 4;
+ if (!IS_ALIGNED(len, tcd->nbytes))
+ goto err_prep;
+
+ iter = len / tcd->nbytes;
+ if (iter >= 1 << 15) {
+ /* len is too big */
+ goto err_prep;
+ }
+ /* citer_linkch contains the high bits of iter */
+ tcd->biter = iter & 0x1ff;
+ tcd->biter_linkch = iter >> 9;
+ tcd->citer = tcd->biter;
+ tcd->citer_linkch = tcd->biter_linkch;
+
+ tcd->e_sg = 0;
+ tcd->d_req = 1;
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&mchan->lock, iflags);
+ list_add_tail(&mdesc->node, &mchan->prepared);
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+ }
+
+ return &mdesc->desc;
+
+err_prep:
+ /* Put the descriptor back */
+ spin_lock_irqsave(&mchan->lock, iflags);
+ list_add_tail(&mdesc->node, &mchan->free);
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+
+ return NULL;
+}
+
+static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct mpc_dma_chan *mchan;
+ struct mpc_dma *mdma;
+ struct dma_slave_config *cfg;
+ unsigned long flags;
+
+ mchan = dma_chan_to_mpc_dma_chan(chan);
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ /* Disable channel requests */
+ mdma = dma_chan_to_mpc_dma(chan);
+
+ spin_lock_irqsave(&mchan->lock, flags);
+
+ out_8(&mdma->regs->dmacerq, chan->chan_id);
+ list_splice_tail_init(&mchan->prepared, &mchan->free);
+ list_splice_tail_init(&mchan->queued, &mchan->free);
+ list_splice_tail_init(&mchan->active, &mchan->free);
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ return 0;
+
+ case DMA_SLAVE_CONFIG:
+ /*
+ * Software constraints:
+ * - only transfers between a peripheral device and
+ * memory are supported;
+ * - only peripheral devices with 4-byte FIFO access register
+ * are supported;
+ * - minimal transfer chunk is 4 bytes and consequently
+ * source and destination addresses must be 4-byte aligned
+ * and transfer size must be aligned on (4 * maxburst)
+ * boundary;
+ * - during the transfer RAM address is being incremented by
+ * the size of minimal transfer chunk;
+ * - peripheral port's address is constant during the transfer.
+ */
+
+ cfg = (void *)arg;
+
+ if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
+ cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
+ !IS_ALIGNED(cfg->src_addr, 4) ||
+ !IS_ALIGNED(cfg->dst_addr, 4)) {
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&mchan->lock, flags);
+
+ mchan->src_per_paddr = cfg->src_addr;
+ mchan->src_tcd_nunits = cfg->src_maxburst;
+ mchan->dst_per_paddr = cfg->dst_addr;
+ mchan->dst_tcd_nunits = cfg->dst_maxburst;
+
+ /* Apply defaults */
+ if (mchan->src_tcd_nunits == 0)
+ mchan->src_tcd_nunits = 1;
+ if (mchan->dst_tcd_nunits == 0)
+ mchan->dst_tcd_nunits = 1;
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ return 0;
+
+ default:
+ /* Unknown command */
+ break;
+ }
+
+ return -ENXIO;
+}
+
static int mpc_dma_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
@@ -649,13 +888,15 @@ static int mpc_dma_probe(struct platform_device *op)
mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
if (!mdma) {
dev_err(dev, "Memory exhausted!\n");
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err;
}
mdma->irq = irq_of_parse_and_map(dn, 0);
if (mdma->irq == NO_IRQ) {
dev_err(dev, "Error mapping IRQ!\n");
- return -EINVAL;
+ retval = -EINVAL;
+ goto err;
}
if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
@@ -663,14 +904,15 @@ static int mpc_dma_probe(struct platform_device *op)
mdma->irq2 = irq_of_parse_and_map(dn, 1);
if (mdma->irq2 == NO_IRQ) {
dev_err(dev, "Error mapping IRQ!\n");
- return -EINVAL;
+ retval = -EINVAL;
+ goto err_dispose1;
}
}
retval = of_address_to_resource(dn, 0, &res);
if (retval) {
dev_err(dev, "Error parsing memory region!\n");
- return retval;
+ goto err_dispose2;
}
regs_start = res.start;
@@ -678,31 +920,34 @@ static int mpc_dma_probe(struct platform_device *op)
if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
dev_err(dev, "Error requesting memory region!\n");
- return -EBUSY;
+ retval = -EBUSY;
+ goto err_dispose2;
}
mdma->regs = devm_ioremap(dev, regs_start, regs_size);
if (!mdma->regs) {
dev_err(dev, "Error mapping memory region!\n");
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err_dispose2;
}
mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
+ MPC_DMA_TCD_OFFSET);
- retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
- mdma);
+ retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
if (retval) {
dev_err(dev, "Error requesting IRQ!\n");
- return -EINVAL;
+ retval = -EINVAL;
+ goto err_dispose2;
}
if (mdma->is_mpc8308) {
- retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
- DRV_NAME, mdma);
+ retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
+ DRV_NAME, mdma);
if (retval) {
dev_err(dev, "Error requesting IRQ2!\n");
- return -EINVAL;
+ retval = -EINVAL;
+ goto err_free1;
}
}
@@ -710,18 +955,21 @@ static int mpc_dma_probe(struct platform_device *op)
dma = &mdma->dma;
dma->dev = dev;
- if (!mdma->is_mpc8308)
- dma->chancnt = MPC_DMA_CHANNELS;
+ if (mdma->is_mpc8308)
+ dma->chancnt = MPC8308_DMACHAN_MAX;
else
- dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
+ dma->chancnt = MPC512x_DMACHAN_MAX;
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending;
dma->device_tx_status = mpc_dma_tx_status;
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
+ dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
+ dma->device_control = mpc_dma_device_control;
INIT_LIST_HEAD(&dma->channels);
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma->cap_mask);
for (i = 0; i < dma->chancnt; i++) {
mchan = &mdma->channels[i];
@@ -747,7 +995,19 @@ static int mpc_dma_probe(struct platform_device *op)
* - Round-robin group arbitration,
* - Round-robin channel arbitration.
*/
- if (!mdma->is_mpc8308) {
+ if (mdma->is_mpc8308) {
+ /* MPC8308 has 16 channels and lacks some registers */
+ out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
+
+ /* enable snooping */
+ out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
+ /* Disable error interrupts */
+ out_be32(&mdma->regs->dmaeeil, 0);
+
+ /* Clear interrupts status */
+ out_be32(&mdma->regs->dmaintl, 0xFFFF);
+ out_be32(&mdma->regs->dmaerrl, 0xFFFF);
+ } else {
out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
@@ -768,29 +1028,28 @@ static int mpc_dma_probe(struct platform_device *op)
/* Route interrupts to IPIC */
out_be32(&mdma->regs->dmaihsa, 0);
out_be32(&mdma->regs->dmailsa, 0);
- } else {
- /* MPC8308 has 16 channels and lacks some registers */
- out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
-
- /* enable snooping */
- out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
- /* Disable error interrupts */
- out_be32(&mdma->regs->dmaeeil, 0);
-
- /* Clear interrupts status */
- out_be32(&mdma->regs->dmaintl, 0xFFFF);
- out_be32(&mdma->regs->dmaerrl, 0xFFFF);
}
/* Register DMA engine */
dev_set_drvdata(dev, mdma);
retval = dma_async_device_register(dma);
- if (retval) {
- devm_free_irq(dev, mdma->irq, mdma);
- irq_dispose_mapping(mdma->irq);
- }
+ if (retval)
+ goto err_free2;
return retval;
+
+err_free2:
+ if (mdma->is_mpc8308)
+ free_irq(mdma->irq2, mdma);
+err_free1:
+ free_irq(mdma->irq, mdma);
+err_dispose2:
+ if (mdma->is_mpc8308)
+ irq_dispose_mapping(mdma->irq2);
+err_dispose1:
+ irq_dispose_mapping(mdma->irq);
+err:
+ return retval;
}
static int mpc_dma_remove(struct platform_device *op)
@@ -799,7 +1058,11 @@ static int mpc_dma_remove(struct platform_device *op)
struct mpc_dma *mdma = dev_get_drvdata(dev);
dma_async_device_unregister(&mdma->dma);
- devm_free_irq(dev, mdma->irq, mdma);
+ if (mdma->is_mpc8308) {
+ free_irq(mdma->irq2, mdma);
+ irq_dispose_mapping(mdma->irq2);
+ }
+ free_irq(mdma->irq, mdma);
irq_dispose_mapping(mdma->irq);
return 0;
@@ -807,6 +1070,7 @@ static int mpc_dma_remove(struct platform_device *op)
static struct of_device_id mpc_dma_match[] = {
{ .compatible = "fsl,mpc5121-dma", },
+ { .compatible = "fsl,mpc8308-dma", },
{},
};
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 53fb0c8365b..394cbc5c93e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
static void mv_chan_activate(struct mv_xor_chan *chan)
{
- u32 activation;
-
dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
- activation = readl_relaxed(XOR_ACTIVATION(chan));
- activation |= 0x1;
- writel_relaxed(activation, XOR_ACTIVATION(chan));
+
+ /* writel ensures all descriptors are flushed before activation */
+ writel(BIT(0), XOR_ACTIVATION(chan));
}
static char mv_chan_is_busy(struct mv_xor_chan *chan)
@@ -497,8 +495,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_can_chain(grp_start))
goto submit_done;
- dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
- old_chain_tail->async_tx.phys);
+ dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
+ &old_chain_tail->async_tx.phys);
/* fix up the hardware chain */
mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
@@ -527,7 +525,8 @@ submit_done:
/* returns the number of allocated descriptors */
static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
{
- char *hw_desc;
+ void *virt_desc;
+ dma_addr_t dma_desc;
int idx;
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *slot = NULL;
@@ -542,17 +541,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
" %d descriptor slots", idx);
break;
}
- hw_desc = (char *) mv_chan->dma_desc_pool_virt;
- slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ virt_desc = mv_chan->dma_desc_pool_virt;
+ slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->tx_list);
- hw_desc = (char *) mv_chan->dma_desc_pool;
- slot->async_tx.phys =
- (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ dma_desc = mv_chan->dma_desc_pool;
+ slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
spin_lock_bh(&mv_chan->lock);
@@ -582,8 +580,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
int slot_cnt;
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s dest: %x src %x len: %u flags: %ld\n",
- __func__, dest, src, len, flags);
+ "%s dest: %pad src %pad len: %u flags: %ld\n",
+ __func__, &dest, &src, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
@@ -626,8 +624,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s src_cnt: %d len: dest %x %u flags: %ld\n",
- __func__, src_cnt, len, dest, flags);
+ "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
+ __func__, src_cnt, len, &dest, flags);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 362e7c49f2e..b19f04f4390 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
@@ -26,11 +27,21 @@ struct omap_dmadev {
spinlock_t lock;
struct tasklet_struct task;
struct list_head pending;
+ void __iomem *base;
+ const struct omap_dma_reg *reg_map;
+ struct omap_system_dma_plat_info *plat;
+ bool legacy;
+ spinlock_t irq_lock;
+ uint32_t irq_enable_mask;
+ struct omap_chan *lch_map[32];
};
struct omap_chan {
struct virt_dma_chan vc;
struct list_head node;
+ void __iomem *channel_base;
+ const struct omap_dma_reg *reg_map;
+ uint32_t ccr;
struct dma_slave_config cfg;
unsigned dma_sig;
@@ -54,19 +65,93 @@ struct omap_desc {
dma_addr_t dev_addr;
int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
- uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
- uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
- uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
- uint8_t periph_port; /* Peripheral port */
+ uint8_t es; /* CSDP_DATA_TYPE_xxx */
+ uint32_t ccr; /* CCR value */
+ uint16_t clnk_ctrl; /* CLNK_CTRL value */
+ uint16_t cicr; /* CICR value */
+ uint32_t csdp; /* CSDP value */
unsigned sglen;
struct omap_sg sg[0];
};
+enum {
+ CCR_FS = BIT(5),
+ CCR_READ_PRIORITY = BIT(6),
+ CCR_ENABLE = BIT(7),
+ CCR_AUTO_INIT = BIT(8), /* OMAP1 only */
+ CCR_REPEAT = BIT(9), /* OMAP1 only */
+ CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */
+ CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */
+ CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */
+ CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */
+ CCR_SRC_AMODE_CONSTANT = 0 << 12,
+ CCR_SRC_AMODE_POSTINC = 1 << 12,
+ CCR_SRC_AMODE_SGLIDX = 2 << 12,
+ CCR_SRC_AMODE_DBLIDX = 3 << 12,
+ CCR_DST_AMODE_CONSTANT = 0 << 14,
+ CCR_DST_AMODE_POSTINC = 1 << 14,
+ CCR_DST_AMODE_SGLIDX = 2 << 14,
+ CCR_DST_AMODE_DBLIDX = 3 << 14,
+ CCR_CONSTANT_FILL = BIT(16),
+ CCR_TRANSPARENT_COPY = BIT(17),
+ CCR_BS = BIT(18),
+ CCR_SUPERVISOR = BIT(22),
+ CCR_PREFETCH = BIT(23),
+ CCR_TRIGGER_SRC = BIT(24),
+ CCR_BUFFERING_DISABLE = BIT(25),
+ CCR_WRITE_PRIORITY = BIT(26),
+ CCR_SYNC_ELEMENT = 0,
+ CCR_SYNC_FRAME = CCR_FS,
+ CCR_SYNC_BLOCK = CCR_BS,
+ CCR_SYNC_PACKET = CCR_BS | CCR_FS,
+
+ CSDP_DATA_TYPE_8 = 0,
+ CSDP_DATA_TYPE_16 = 1,
+ CSDP_DATA_TYPE_32 = 2,
+ CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */
+ CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */
+ CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */
+ CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */
+ CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */
+ CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */
+ CSDP_SRC_PACKED = BIT(6),
+ CSDP_SRC_BURST_1 = 0 << 7,
+ CSDP_SRC_BURST_16 = 1 << 7,
+ CSDP_SRC_BURST_32 = 2 << 7,
+ CSDP_SRC_BURST_64 = 3 << 7,
+ CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */
+ CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */
+ CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */
+ CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */
+ CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */
+ CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */
+ CSDP_DST_PACKED = BIT(13),
+ CSDP_DST_BURST_1 = 0 << 14,
+ CSDP_DST_BURST_16 = 1 << 14,
+ CSDP_DST_BURST_32 = 2 << 14,
+ CSDP_DST_BURST_64 = 3 << 14,
+
+ CICR_TOUT_IE = BIT(0), /* OMAP1 only */
+ CICR_DROP_IE = BIT(1),
+ CICR_HALF_IE = BIT(2),
+ CICR_FRAME_IE = BIT(3),
+ CICR_LAST_IE = BIT(4),
+ CICR_BLOCK_IE = BIT(5),
+ CICR_PKT_IE = BIT(7), /* OMAP2+ only */
+ CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */
+ CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */
+ CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */
+ CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */
+ CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */
+
+ CLNK_CTRL_ENABLE_LNK = BIT(15),
+};
+
static const unsigned es_bytes[] = {
- [OMAP_DMA_DATA_TYPE_S8] = 1,
- [OMAP_DMA_DATA_TYPE_S16] = 2,
- [OMAP_DMA_DATA_TYPE_S32] = 4,
+ [CSDP_DATA_TYPE_8] = 1,
+ [CSDP_DATA_TYPE_16] = 2,
+ [CSDP_DATA_TYPE_32] = 4,
};
static struct of_dma_filter_info omap_dma_info = {
@@ -93,28 +178,214 @@ static void omap_dma_desc_free(struct virt_dma_desc *vd)
kfree(container_of(vd, struct omap_desc, vd));
}
+static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
+{
+ switch (type) {
+ case OMAP_DMA_REG_16BIT:
+ writew_relaxed(val, addr);
+ break;
+ case OMAP_DMA_REG_2X16BIT:
+ writew_relaxed(val, addr);
+ writew_relaxed(val >> 16, addr + 2);
+ break;
+ case OMAP_DMA_REG_32BIT:
+ writel_relaxed(val, addr);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static unsigned omap_dma_read(unsigned type, void __iomem *addr)
+{
+ unsigned val;
+
+ switch (type) {
+ case OMAP_DMA_REG_16BIT:
+ val = readw_relaxed(addr);
+ break;
+ case OMAP_DMA_REG_2X16BIT:
+ val = readw_relaxed(addr);
+ val |= readw_relaxed(addr + 2) << 16;
+ break;
+ case OMAP_DMA_REG_32BIT:
+ val = readl_relaxed(addr);
+ break;
+ default:
+ WARN_ON(1);
+ val = 0;
+ }
+
+ return val;
+}
+
+static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
+{
+ const struct omap_dma_reg *r = od->reg_map + reg;
+
+ WARN_ON(r->stride);
+
+ omap_dma_write(val, r->type, od->base + r->offset);
+}
+
+static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
+{
+ const struct omap_dma_reg *r = od->reg_map + reg;
+
+ WARN_ON(r->stride);
+
+ return omap_dma_read(r->type, od->base + r->offset);
+}
+
+static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
+{
+ const struct omap_dma_reg *r = c->reg_map + reg;
+
+ omap_dma_write(val, r->type, c->channel_base + r->offset);
+}
+
+static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
+{
+ const struct omap_dma_reg *r = c->reg_map + reg;
+
+ return omap_dma_read(r->type, c->channel_base + r->offset);
+}
+
+static void omap_dma_clear_csr(struct omap_chan *c)
+{
+ if (dma_omap1())
+ omap_dma_chan_read(c, CSR);
+ else
+ omap_dma_chan_write(c, CSR, ~0);
+}
+
+static unsigned omap_dma_get_csr(struct omap_chan *c)
+{
+ unsigned val = omap_dma_chan_read(c, CSR);
+
+ if (!dma_omap1())
+ omap_dma_chan_write(c, CSR, val);
+
+ return val;
+}
+
+static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
+ unsigned lch)
+{
+ c->channel_base = od->base + od->plat->channel_stride * lch;
+
+ od->lch_map[lch] = c;
+}
+
+static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
+{
+ struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+
+ if (__dma_omap15xx(od->plat->dma_attr))
+ omap_dma_chan_write(c, CPC, 0);
+ else
+ omap_dma_chan_write(c, CDAC, 0);
+
+ omap_dma_clear_csr(c);
+
+ /* Enable interrupts */
+ omap_dma_chan_write(c, CICR, d->cicr);
+
+ /* Enable channel */
+ omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
+}
+
+static void omap_dma_stop(struct omap_chan *c)
+{
+ struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+ uint32_t val;
+
+ /* disable irq */
+ omap_dma_chan_write(c, CICR, 0);
+
+ omap_dma_clear_csr(c);
+
+ val = omap_dma_chan_read(c, CCR);
+ if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
+ uint32_t sysconfig;
+ unsigned i;
+
+ sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
+ val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
+ val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
+ omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
+
+ val = omap_dma_chan_read(c, CCR);
+ val &= ~CCR_ENABLE;
+ omap_dma_chan_write(c, CCR, val);
+
+ /* Wait for sDMA FIFO to drain */
+ for (i = 0; ; i++) {
+ val = omap_dma_chan_read(c, CCR);
+ if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
+ break;
+
+ if (i > 100)
+ break;
+
+ udelay(5);
+ }
+
+ if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
+ dev_err(c->vc.chan.device->dev,
+ "DMA drain did not complete on lch %d\n",
+ c->dma_ch);
+
+ omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
+ } else {
+ val &= ~CCR_ENABLE;
+ omap_dma_chan_write(c, CCR, val);
+ }
+
+ mb();
+
+ if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
+ val = omap_dma_chan_read(c, CLNK_CTRL);
+
+ if (dma_omap1())
+ val |= 1 << 14; /* set the STOP_LNK bit */
+ else
+ val &= ~CLNK_CTRL_ENABLE_LNK;
+
+ omap_dma_chan_write(c, CLNK_CTRL, val);
+ }
+}
+
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
unsigned idx)
{
struct omap_sg *sg = d->sg + idx;
+ unsigned cxsa, cxei, cxfi;
- if (d->dir == DMA_DEV_TO_MEM)
- omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
- OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
- else
- omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
- OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
+ if (d->dir == DMA_DEV_TO_MEM) {
+ cxsa = CDSA;
+ cxei = CDEI;
+ cxfi = CDFI;
+ } else {
+ cxsa = CSSA;
+ cxei = CSEI;
+ cxfi = CSFI;
+ }
- omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
- d->sync_mode, c->dma_sig, d->sync_type);
+ omap_dma_chan_write(c, cxsa, sg->addr);
+ omap_dma_chan_write(c, cxei, 0);
+ omap_dma_chan_write(c, cxfi, 0);
+ omap_dma_chan_write(c, CEN, sg->en);
+ omap_dma_chan_write(c, CFN, sg->fn);
- omap_start_dma(c->dma_ch);
+ omap_dma_start(c, d);
}
static void omap_dma_start_desc(struct omap_chan *c)
{
struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
struct omap_desc *d;
+ unsigned cxsa, cxei, cxfi;
if (!vd) {
c->desc = NULL;
@@ -126,12 +397,32 @@ static void omap_dma_start_desc(struct omap_chan *c)
c->desc = d = to_omap_dma_desc(&vd->tx);
c->sgidx = 0;
- if (d->dir == DMA_DEV_TO_MEM)
- omap_set_dma_src_params(c->dma_ch, d->periph_port,
- OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
- else
- omap_set_dma_dest_params(c->dma_ch, d->periph_port,
- OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
+ /*
+ * This provides the necessary barrier to ensure data held in
+ * DMA coherent memory is visible to the DMA engine prior to
+ * the transfer starting.
+ */
+ mb();
+
+ omap_dma_chan_write(c, CCR, d->ccr);
+ if (dma_omap1())
+ omap_dma_chan_write(c, CCR2, d->ccr >> 16);
+
+ if (d->dir == DMA_DEV_TO_MEM) {
+ cxsa = CSSA;
+ cxei = CSEI;
+ cxfi = CSFI;
+ } else {
+ cxsa = CDSA;
+ cxei = CDEI;
+ cxfi = CDFI;
+ }
+
+ omap_dma_chan_write(c, cxsa, d->dev_addr);
+ omap_dma_chan_write(c, cxei, 0);
+ omap_dma_chan_write(c, cxfi, d->fi);
+ omap_dma_chan_write(c, CSDP, d->csdp);
+ omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
omap_dma_start_sg(c, d, 0);
}
@@ -186,24 +477,118 @@ static void omap_dma_sched(unsigned long data)
}
}
+static irqreturn_t omap_dma_irq(int irq, void *devid)
+{
+ struct omap_dmadev *od = devid;
+ unsigned status, channel;
+
+ spin_lock(&od->irq_lock);
+
+ status = omap_dma_glbl_read(od, IRQSTATUS_L1);
+ status &= od->irq_enable_mask;
+ if (status == 0) {
+ spin_unlock(&od->irq_lock);
+ return IRQ_NONE;
+ }
+
+ while ((channel = ffs(status)) != 0) {
+ unsigned mask, csr;
+ struct omap_chan *c;
+
+ channel -= 1;
+ mask = BIT(channel);
+ status &= ~mask;
+
+ c = od->lch_map[channel];
+ if (c == NULL) {
+ /* This should never happen */
+ dev_err(od->ddev.dev, "invalid channel %u\n", channel);
+ continue;
+ }
+
+ csr = omap_dma_get_csr(c);
+ omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
+
+ omap_dma_callback(channel, csr, c);
+ }
+
+ spin_unlock(&od->irq_lock);
+
+ return IRQ_HANDLED;
+}
+
static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
{
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
struct omap_chan *c = to_omap_dma_chan(chan);
+ int ret;
- dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+ if (od->legacy) {
+ ret = omap_request_dma(c->dma_sig, "DMA engine",
+ omap_dma_callback, c, &c->dma_ch);
+ } else {
+ ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
+ &c->dma_ch);
+ }
+
+ dev_dbg(od->ddev.dev, "allocating channel %u for %u\n",
+ c->dma_ch, c->dma_sig);
+
+ if (ret >= 0) {
+ omap_dma_assign(od, c, c->dma_ch);
+
+ if (!od->legacy) {
+ unsigned val;
+
+ spin_lock_irq(&od->irq_lock);
+ val = BIT(c->dma_ch);
+ omap_dma_glbl_write(od, IRQSTATUS_L1, val);
+ od->irq_enable_mask |= val;
+ omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
+
+ val = omap_dma_glbl_read(od, IRQENABLE_L0);
+ val &= ~BIT(c->dma_ch);
+ omap_dma_glbl_write(od, IRQENABLE_L0, val);
+ spin_unlock_irq(&od->irq_lock);
+ }
+ }
+
+ if (dma_omap1()) {
+ if (__dma_omap16xx(od->plat->dma_attr)) {
+ c->ccr = CCR_OMAP31_DISABLE;
+ /* Duplicate what plat-omap/dma.c does */
+ c->ccr |= c->dma_ch + 1;
+ } else {
+ c->ccr = c->dma_sig & 0x1f;
+ }
+ } else {
+ c->ccr = c->dma_sig & 0x1f;
+ c->ccr |= (c->dma_sig & ~0x1f) << 14;
+ }
+ if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
+ c->ccr |= CCR_BUFFERING_DISABLE;
- return omap_request_dma(c->dma_sig, "DMA engine",
- omap_dma_callback, c, &c->dma_ch);
+ return ret;
}
static void omap_dma_free_chan_resources(struct dma_chan *chan)
{
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
struct omap_chan *c = to_omap_dma_chan(chan);
+ if (!od->legacy) {
+ spin_lock_irq(&od->irq_lock);
+ od->irq_enable_mask &= ~BIT(c->dma_ch);
+ omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
+ spin_unlock_irq(&od->irq_lock);
+ }
+
+ c->channel_base = NULL;
+ od->lch_map[c->dma_ch] = NULL;
vchan_free_chan_resources(&c->vc);
omap_free_dma(c->dma_ch);
- dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+ dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig);
}
static size_t omap_dma_sg_size(struct omap_sg *sg)
@@ -239,6 +624,74 @@ static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
return size;
}
+/*
+ * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
+ * read before the DMA controller finished disabling the channel.
+ */
+static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
+{
+ struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+ uint32_t val;
+
+ val = omap_dma_chan_read(c, reg);
+ if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
+ val = omap_dma_chan_read(c, reg);
+
+ return val;
+}
+
+static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
+{
+ struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+ dma_addr_t addr, cdac;
+
+ if (__dma_omap15xx(od->plat->dma_attr)) {
+ addr = omap_dma_chan_read(c, CPC);
+ } else {
+ addr = omap_dma_chan_read_3_3(c, CSAC);
+ cdac = omap_dma_chan_read_3_3(c, CDAC);
+
+ /*
+ * CDAC == 0 indicates that the DMA transfer on the channel has
+ * not been started (no data has been transferred so far).
+ * Return the programmed source start address in this case.
+ */
+ if (cdac == 0)
+ addr = omap_dma_chan_read(c, CSSA);
+ }
+
+ if (dma_omap1())
+ addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
+
+ return addr;
+}
+
+static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
+{
+ struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+ dma_addr_t addr;
+
+ if (__dma_omap15xx(od->plat->dma_attr)) {
+ addr = omap_dma_chan_read(c, CPC);
+ } else {
+ addr = omap_dma_chan_read_3_3(c, CDAC);
+
+ /*
+ * CDAC == 0 indicates that the DMA transfer on the channel
+ * has not been started (no data has been transferred so
+ * far). Return the programmed destination start address in
+ * this case.
+ */
+ if (addr == 0)
+ addr = omap_dma_chan_read(c, CDSA);
+ }
+
+ if (dma_omap1())
+ addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
+
+ return addr;
+}
+
static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
@@ -260,9 +713,9 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
dma_addr_t pos;
if (d->dir == DMA_MEM_TO_DEV)
- pos = omap_get_dma_src_pos(c->dma_ch);
+ pos = omap_dma_get_src_pos(c);
else if (d->dir == DMA_DEV_TO_MEM)
- pos = omap_get_dma_dst_pos(c->dma_ch);
+ pos = omap_dma_get_dst_pos(c);
else
pos = 0;
@@ -304,24 +757,23 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
{
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
struct omap_chan *c = to_omap_dma_chan(chan);
enum dma_slave_buswidth dev_width;
struct scatterlist *sgent;
struct omap_desc *d;
dma_addr_t dev_addr;
- unsigned i, j = 0, es, en, frame_bytes, sync_type;
+ unsigned i, j = 0, es, en, frame_bytes;
u32 burst;
if (dir == DMA_DEV_TO_MEM) {
dev_addr = c->cfg.src_addr;
dev_width = c->cfg.src_addr_width;
burst = c->cfg.src_maxburst;
- sync_type = OMAP_DMA_SRC_SYNC;
} else if (dir == DMA_MEM_TO_DEV) {
dev_addr = c->cfg.dst_addr;
dev_width = c->cfg.dst_addr_width;
burst = c->cfg.dst_maxburst;
- sync_type = OMAP_DMA_DST_SYNC;
} else {
dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
return NULL;
@@ -330,13 +782,13 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
/* Bus width translates to the element size (ES) */
switch (dev_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
- es = OMAP_DMA_DATA_TYPE_S8;
+ es = CSDP_DATA_TYPE_8;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
- es = OMAP_DMA_DATA_TYPE_S16;
+ es = CSDP_DATA_TYPE_16;
break;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
- es = OMAP_DMA_DATA_TYPE_S32;
+ es = CSDP_DATA_TYPE_32;
break;
default: /* not reached */
return NULL;
@@ -350,9 +802,31 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->dir = dir;
d->dev_addr = dev_addr;
d->es = es;
- d->sync_mode = OMAP_DMA_SYNC_FRAME;
- d->sync_type = sync_type;
- d->periph_port = OMAP_DMA_PORT_TIPB;
+
+ d->ccr = c->ccr | CCR_SYNC_FRAME;
+ if (dir == DMA_DEV_TO_MEM)
+ d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
+ else
+ d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
+
+ d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
+ d->csdp = es;
+
+ if (dma_omap1()) {
+ d->cicr |= CICR_TOUT_IE;
+
+ if (dir == DMA_DEV_TO_MEM)
+ d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
+ else
+ d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
+ } else {
+ if (dir == DMA_DEV_TO_MEM)
+ d->ccr |= CCR_TRIGGER_SRC;
+
+ d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+ }
+ if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
+ d->clnk_ctrl = c->dma_ch;
/*
* Build our scatterlist entries: each contains the address,
@@ -382,23 +856,22 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
void *context)
{
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
struct omap_chan *c = to_omap_dma_chan(chan);
enum dma_slave_buswidth dev_width;
struct omap_desc *d;
dma_addr_t dev_addr;
- unsigned es, sync_type;
+ unsigned es;
u32 burst;
if (dir == DMA_DEV_TO_MEM) {
dev_addr = c->cfg.src_addr;
dev_width = c->cfg.src_addr_width;
burst = c->cfg.src_maxburst;
- sync_type = OMAP_DMA_SRC_SYNC;
} else if (dir == DMA_MEM_TO_DEV) {
dev_addr = c->cfg.dst_addr;
dev_width = c->cfg.dst_addr_width;
burst = c->cfg.dst_maxburst;
- sync_type = OMAP_DMA_DST_SYNC;
} else {
dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
return NULL;
@@ -407,13 +880,13 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
/* Bus width translates to the element size (ES) */
switch (dev_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
- es = OMAP_DMA_DATA_TYPE_S8;
+ es = CSDP_DATA_TYPE_8;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
- es = OMAP_DMA_DATA_TYPE_S16;
+ es = CSDP_DATA_TYPE_16;
break;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
- es = OMAP_DMA_DATA_TYPE_S32;
+ es = CSDP_DATA_TYPE_32;
break;
default: /* not reached */
return NULL;
@@ -428,32 +901,51 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
d->dev_addr = dev_addr;
d->fi = burst;
d->es = es;
- if (burst)
- d->sync_mode = OMAP_DMA_SYNC_PACKET;
- else
- d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
- d->sync_type = sync_type;
- d->periph_port = OMAP_DMA_PORT_MPUI;
d->sg[0].addr = buf_addr;
d->sg[0].en = period_len / es_bytes[es];
d->sg[0].fn = buf_len / period_len;
d->sglen = 1;
- if (!c->cyclic) {
- c->cyclic = true;
- omap_dma_link_lch(c->dma_ch, c->dma_ch);
+ d->ccr = c->ccr;
+ if (dir == DMA_DEV_TO_MEM)
+ d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
+ else
+ d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
- if (flags & DMA_PREP_INTERRUPT)
- omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
+ d->cicr = CICR_DROP_IE;
+ if (flags & DMA_PREP_INTERRUPT)
+ d->cicr |= CICR_FRAME_IE;
- omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
- }
+ d->csdp = es;
+
+ if (dma_omap1()) {
+ d->cicr |= CICR_TOUT_IE;
+
+ if (dir == DMA_DEV_TO_MEM)
+ d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
+ else
+ d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
+ } else {
+ if (burst)
+ d->ccr |= CCR_SYNC_PACKET;
+ else
+ d->ccr |= CCR_SYNC_ELEMENT;
+
+ if (dir == DMA_DEV_TO_MEM)
+ d->ccr |= CCR_TRIGGER_SRC;
- if (dma_omap2plus()) {
- omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
- omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
+ d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+
+ d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
}
+ if (__dma_omap15xx(od->plat->dma_attr))
+ d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
+ else
+ d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
+
+ c->cyclic = true;
+
return vchan_tx_prep(&c->vc, &d->vd, flags);
}
@@ -483,20 +975,19 @@ static int omap_dma_terminate_all(struct omap_chan *c)
/*
* Stop DMA activity: we assume the callback will not be called
- * after omap_stop_dma() returns (even if it does, it will see
+ * after omap_dma_stop() returns (even if it does, it will see
* c->desc is NULL and exit.)
*/
if (c->desc) {
c->desc = NULL;
/* Avoid stopping the dma twice */
if (!c->paused)
- omap_stop_dma(c->dma_ch);
+ omap_dma_stop(c);
}
if (c->cyclic) {
c->cyclic = false;
c->paused = false;
- omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
}
vchan_get_all_descriptors(&c->vc, &head);
@@ -513,7 +1004,7 @@ static int omap_dma_pause(struct omap_chan *c)
return -EINVAL;
if (!c->paused) {
- omap_stop_dma(c->dma_ch);
+ omap_dma_stop(c);
c->paused = true;
}
@@ -527,7 +1018,7 @@ static int omap_dma_resume(struct omap_chan *c)
return -EINVAL;
if (c->paused) {
- omap_start_dma(c->dma_ch);
+ omap_dma_start(c, c->desc);
c->paused = false;
}
@@ -573,6 +1064,7 @@ static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
if (!c)
return -ENOMEM;
+ c->reg_map = od->reg_map;
c->dma_sig = dma_sig;
c->vc.desc_free = omap_dma_desc_free;
vchan_init(&c->vc, &od->ddev);
@@ -594,18 +1086,46 @@ static void omap_dma_free(struct omap_dmadev *od)
tasklet_kill(&c->vc.task);
kfree(c);
}
- kfree(od);
+}
+
+#define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static int omap_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = OMAP_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = true;
+ caps->cmd_terminate = true;
+ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ return 0;
}
static int omap_dma_probe(struct platform_device *pdev)
{
struct omap_dmadev *od;
- int rc, i;
+ struct resource *res;
+ int rc, i, irq;
- od = kzalloc(sizeof(*od), GFP_KERNEL);
+ od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ od->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(od->base))
+ return PTR_ERR(od->base);
+
+ od->plat = omap_get_plat_info();
+ if (!od->plat)
+ return -EPROBE_DEFER;
+
+ od->reg_map = od->plat->reg_map;
+
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
@@ -615,10 +1135,12 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
od->ddev.device_control = omap_dma_control;
+ od->ddev.device_slave_caps = omap_dma_device_slave_caps;
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
INIT_LIST_HEAD(&od->pending);
spin_lock_init(&od->lock);
+ spin_lock_init(&od->irq_lock);
tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
@@ -630,6 +1152,21 @@ static int omap_dma_probe(struct platform_device *pdev)
}
}
+ irq = platform_get_irq(pdev, 1);
+ if (irq <= 0) {
+ dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
+ od->legacy = true;
+ } else {
+ /* Disable all interrupts */
+ od->irq_enable_mask = 0;
+ omap_dma_glbl_write(od, IRQENABLE_L1, 0);
+
+ rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
+ IRQF_SHARED, "omap-dma-engine", od);
+ if (rc)
+ return rc;
+ }
+
rc = dma_async_device_register(&od->ddev);
if (rc) {
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
@@ -666,6 +1203,12 @@ static int omap_dma_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&od->ddev);
+
+ if (!od->legacy) {
+ /* Disable all interrupts */
+ omap_dma_glbl_write(od, IRQENABLE_L0, 0);
+ }
+
omap_dma_free(od);
return 0;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 61fdc54a3c8..9f9ca9fe5ce 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -21,6 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pch_dma.h>
@@ -964,16 +965,16 @@ static void pch_dma_remove(struct pci_dev *pdev)
if (pd) {
dma_async_device_unregister(&pd->dma);
+ free_irq(pdev->irq, pd);
+
list_for_each_entry_safe(chan, _c, &pd->dma.channels,
device_node) {
pd_chan = to_pd_chan(chan);
- tasklet_disable(&pd_chan->tasklet);
tasklet_kill(&pd_chan->tasklet);
}
pci_pool_destroy(pd->pool);
- free_irq(pdev->irq, pd);
pci_iounmap(pdev, pd->membase);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -996,7 +997,7 @@ static void pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
-DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
+const struct pci_device_id pch_dma_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
diff --git a/drivers/dma/ppc4xx/Makefile b/drivers/dma/ppc4xx/Makefile
index b3d259b3e52..e7700985371 100644
--- a/drivers/dma/ppc4xx/Makefile
+++ b/drivers/dma/ppc4xx/Makefile
@@ -1 +1,3 @@
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += adma.o
+obj-$(CONFIG_AMCC_PPC460EX_460GT_4CHAN_DMA) += ppc460ex_4chan_dma.o
+obj-$(CONFIG_APM82181_ADMA) += apm82181-adma.o
diff --git a/drivers/dma/ppc4xx/apm82181-adma.c b/drivers/dma/ppc4xx/apm82181-adma.c
new file mode 100644
index 00000000000..c95e704b1d9
--- /dev/null
+++ b/drivers/dma/ppc4xx/apm82181-adma.c
@@ -0,0 +1,2201 @@
+/*
+ * Copyright(c) 2010 Applied Micro Circuits Corporation(AMCC). All rights reserved.
+ *
+ * Author: Tai Tri Nguyen <ttnguyen@appliedmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/*
+ * This driver supports the asynchrounous DMA copy and RAID engines available
+ * on the AppliedMicro APM82181 Processor.
+ * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
+ * ADMA driver written by D.Williams.
+ */
+#define ADMA_DEBUG
+#undef ADMA_DEBUG
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/apm82181-adma.h>
+#include "../dmaengine.h"
+
+#define PPC4XX_EDMA "apm82181-adma: "
+#ifdef ADMA_DEBUG
+#define DBG(string, args...) \
+ printk(PPC4XX_EDMA string ,##args)
+#define INFO DBG("<%s> -- line %d\n",__func__,__LINE__);
+#define ADMA_HEXDUMP(b, l) \
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, (b), (l), false);
+#else
+#define DBG(string, args...) \
+ {if (0) printk(KERN_INFO PPC4XX_EDMA string ,##args); 0; }
+#define INFO DBG("");
+#define ADMA_HEXDUMP(b, l) \
+ {if (0) print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 8, 1, (b), (l), false); 0;}
+#endif
+
+#define MEM_HEXDUMP(b, l) \
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, (b), (l), false);
+
+/* The list of channels exported by apm82181 ADMA */
+struct list_head
+ppc_adma_chan_list = LIST_HEAD_INIT(ppc_adma_chan_list);
+
+/* This flag is set when want to refetch the xor chain in the interrupt
+ * handler
+ */
+static u32 do_xor_refetch = 0;
+
+/* Pointers to last submitted to DMA0/1/2/3 and XOR CDBs */
+static apm82181_desc_t *chan_last_sub[5];
+static apm82181_desc_t *chan_first_cdb[5];
+
+/* Pointer to last linked and submitted xor CB */
+static apm82181_desc_t *xor_last_linked = NULL;
+static apm82181_desc_t *xor_last_submit = NULL;
+
+/* /proc interface is used here to verify the h/w RAID 5 capabilities
+ */
+static struct proc_dir_entry *apm82181_proot;
+
+/* These are used in enable & check routines
+ */
+static u32 apm82181_xor_verified;
+static u32 apm82181_memcpy_verified[4];
+static apm82181_ch_t *apm82181_dma_tchan[5];
+static struct completion apm82181_r5_test_comp;
+
+static inline int apm82181_chan_is_busy(apm82181_ch_t *chan);
+#if 0
+static phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
+{
+ phys_addr_t page_4gb = 0;
+
+ return (page_4gb | addr);
+}
+#endif
+/**
+ * apm82181_adma_device_estimate - estimate the efficiency of processing
+ * the operation given on this channel. It's assumed that 'chan' is
+ * capable to process 'cap' type of operation.
+ * @chan: channel to use
+ * @cap: type of transaction
+ * @src_lst: array of source pointers
+ * @src_cnt: number of source operands
+ * @src_sz: size of each source operand
+ */
+int apm82181_adma_estimate (struct dma_chan *chan,
+ enum dma_transaction_type cap, struct page **src_lst,
+ int src_cnt, size_t src_sz)
+{
+ int ef = 1;
+
+ /* channel idleness increases the priority */
+ if (likely(ef) &&
+ !apm82181_chan_is_busy(to_apm82181_adma_chan(chan)))
+ ef++;
+ else {
+ if(chan->chan_id !=APM82181_XOR_ID)
+ ef = -1;
+ }
+ return ef;
+}
+
+/******************************************************************************
+ * Command (Descriptor) Blocks low-level routines
+ ******************************************************************************/
+/**
+ * apm82181_desc_init_interrupt - initialize the descriptor for INTERRUPT
+ * pseudo operation
+ */
+static inline void apm82181_desc_init_interrupt (apm82181_desc_t *desc,
+ apm82181_ch_t *chan)
+{
+ xor_cb_t *p;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ BUG();
+ break;
+ case APM82181_XOR_ID:
+ p = desc->hw_desc;
+ memset (desc->hw_desc, 0, sizeof(xor_cb_t));
+ /* NOP with Command Block Complete Enable */
+ p->cbc = XOR_CBCR_CBCE_BIT;
+ break;
+ default:
+ printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
+ __FUNCTION__);
+ break;
+ }
+}
+
+/**
+ * apm82181_desc_init_xor - initialize the descriptor for XOR operation
+ */
+static inline void apm82181_desc_init_xor(apm82181_desc_t *desc, int src_cnt,
+ unsigned long flags)
+{
+ xor_cb_t *hw_desc = desc->hw_desc;
+
+ memset (desc->hw_desc, 0, sizeof(xor_cb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = 1;
+
+ hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Enable interrupt on complete */
+ hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
+}
+
+/**
+ * apm82181_desc_init_memcpy - initialize the descriptor for MEMCPY operation
+ */
+static inline void apm82181_desc_init_memcpy(apm82181_desc_t *desc,
+ unsigned long flags)
+{
+ dma_cdb_t *hw_desc = desc->hw_desc;
+
+ memset(hw_desc, 0, sizeof(dma_cdb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(APM82181_DESC_INT, &desc->flags);
+ else
+ clear_bit(APM82181_DESC_INT, &desc->flags);
+ /* dma configuration for running */
+ hw_desc->ctrl.tm = 2; /* soft init mem-mem mode */
+ hw_desc->ctrl.pw = 4; /* transfer width 128 bytes */
+ hw_desc->ctrl.ben = 1;/* buffer enable */
+ hw_desc->ctrl.sai = 1;/* increase source addr */
+ hw_desc->ctrl.dai = 1;/* increase dest addr */
+ hw_desc->ctrl.tce = 1;/* chan stops when TC is reached */
+ hw_desc->ctrl.cp = 3; /* hinghest priority */
+ hw_desc->ctrl.sl = 0; /* source is in PLB */
+ hw_desc->ctrl.pl = 0; /* dest is in PLB */
+ hw_desc->cnt.tcie = 0;/* no interrupt on init */
+ hw_desc->cnt.etie = 0; /* enable error interrupt */
+ hw_desc->cnt.eie = 1; /* enable error interrupt */
+ hw_desc->cnt.link = 0;/* not link to next cdb */
+ hw_desc->cnt.sgl = 0;
+ hw_desc->ctrl.ce =1; /* enable channel */
+ hw_desc->ctrl.cie =1; /* enable int channel */
+}
+
+/**
+ * apm82181_desc_init_memset - initialize the descriptor for MEMSET operation
+ */
+static inline void apm82181_desc_init_memset(apm82181_desc_t *desc, int value,
+ unsigned long flags)
+{
+ //dma_cdb_t *hw_desc = desc->hw_desc;
+
+ memset (desc->hw_desc, 0, sizeof(dma_cdb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(APM82181_DESC_INT, &desc->flags);
+ else
+ clear_bit(APM82181_DESC_INT, &desc->flags);
+
+}
+
+
+
+/**
+ * apm82181_desc_set_src_addr - set source address into the descriptor
+ */
+static inline void apm82181_desc_set_src_addr( apm82181_desc_t *desc,
+ apm82181_ch_t *chan, int src_idx, dma_addr_t addr)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ dma_hw_desc = desc->hw_desc;
+ dma_hw_desc->src_hi = (u32)(addr >> 32);
+ dma_hw_desc->src_lo = (u32)addr;
+ break;
+ case APM82181_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->ops[src_idx].h = (u32)(addr >>32);
+ xor_hw_desc->ops[src_idx].l = (u32)addr;
+ break;
+ }
+}
+
+static void apm82181_adma_set_src(apm82181_desc_t *sw_desc,
+ dma_addr_t addr, int index)
+{
+ apm82181_ch_t *chan = to_apm82181_adma_chan(sw_desc->async_tx.chan);
+
+ sw_desc = sw_desc->group_head;
+
+ if (likely(sw_desc))
+ apm82181_desc_set_src_addr(sw_desc, chan, index, addr);
+}
+
+/**
+ * apm82181_desc_set_dest_addr - set destination address into the descriptor
+ */
+static inline void apm82181_desc_set_dest_addr(apm82181_desc_t *desc,
+ apm82181_ch_t *chan, dma_addr_t addr, u32 index)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ dma_hw_desc = desc->hw_desc;
+ dma_hw_desc->dest_hi = (u32)(addr >> 32);
+ dma_hw_desc->dest_lo = (u32)addr;
+ break;
+ case APM82181_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->cbtah = (u32)(addr >> 32);
+ xor_hw_desc->cbtal |= (u32)addr;
+ break;
+ }
+}
+
+static int plbdma_get_transfer_width(dma_cdb_t *dma_hw_desc)
+{
+ switch (dma_hw_desc->ctrl.pw){
+ case 0:
+ return 1; /* unit: bytes */
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ case 3:
+ return 8;
+ case 4:
+ return 16;
+ }
+ return 0;
+}
+/**
+ * apm82181_desc_set_byte_count - set number of data bytes involved
+ * into the operation
+ */
+static inline void apm82181_desc_set_byte_count(apm82181_desc_t *desc,
+ apm82181_ch_t *chan, size_t byte_count)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+ int terminal_cnt, transfer_width = 0;
+
+ DBG("<%s> byte_count %08x\n", __func__,byte_count);
+ switch (chan->device->id){
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ dma_hw_desc = desc->hw_desc;
+ transfer_width = plbdma_get_transfer_width(dma_hw_desc);
+ terminal_cnt = byte_count/transfer_width;
+ dma_hw_desc->cnt.tc = terminal_cnt;
+ break;
+ case APM82181_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->cbbc = byte_count;
+ break;
+ }
+}
+
+/**
+ * apm82181_xor_set_link - set link address in xor CB
+ */
+static inline void apm82181_xor_set_link (apm82181_desc_t *prev_desc,
+ apm82181_desc_t *next_desc)
+{
+ xor_cb_t *xor_hw_desc = prev_desc->hw_desc;
+
+ if (unlikely(!next_desc || !(next_desc->phys))) {
+ printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
+ __func__, next_desc,
+ next_desc ? next_desc->phys : 0);
+ BUG();
+ }
+ DBG("<%s>:next_desc->phys %llx\n", __func__,next_desc->phys);
+ xor_hw_desc->cbs = 0;
+ xor_hw_desc->cblal = (u32)next_desc->phys;
+ xor_hw_desc->cblah = (u32)(next_desc->phys >> 32);
+ xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
+}
+
+/**
+ * apm82181_desc_set_link - set the address of descriptor following this
+ * descriptor in chain
+ */
+static inline void apm82181_desc_set_link(apm82181_ch_t *chan,
+ apm82181_desc_t *prev_desc, apm82181_desc_t *next_desc)
+{
+ unsigned long flags;
+ apm82181_desc_t *tail = next_desc;
+
+ if (unlikely(!prev_desc || !next_desc ||
+ (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
+ /* If previous next is overwritten something is wrong.
+ * though we may refetch from append to initiate list
+ * processing; in this case - it's ok.
+ */
+ printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
+ "prev->hw_next=0x%p\n", __FUNCTION__, prev_desc,
+ next_desc, prev_desc ? prev_desc->hw_next : 0);
+ BUG();
+ }
+
+ local_irq_save(flags);
+
+ /* do s/w chaining both for DMA and XOR descriptors */
+ prev_desc->hw_next = next_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ break;
+ case APM82181_XOR_ID:
+ /* bind descriptor to the chain */
+ while (tail->hw_next)
+ tail = tail->hw_next;
+ xor_last_linked = tail;
+
+ if (prev_desc == xor_last_submit)
+ /* do not link to the last submitted CB */
+ break;
+ apm82181_xor_set_link (prev_desc, next_desc);
+ break;
+ default:
+ BUG();
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * apm82181_desc_get_src_addr - extract the source address from the descriptor
+ */
+static inline u32 apm82181_desc_get_src_addr(apm82181_desc_t *desc,
+ apm82181_ch_t *chan, int src_idx)
+{
+ dma_cdb_t *dma_hw_desc;
+
+ dma_hw_desc = desc->hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ break;
+ default:
+ return 0;
+ }
+ /* May have 0, 1, 2, or 3 sources */
+ return (dma_hw_desc->src_lo);
+}
+
+/**
+ * apm82181_desc_get_dest_addr - extract the destination address from the
+ * descriptor
+ */
+static inline u32 apm82181_desc_get_dest_addr(apm82181_desc_t *desc,
+ apm82181_ch_t *chan, int idx)
+{
+ dma_cdb_t *dma_hw_desc;
+
+ dma_hw_desc = desc->hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ break;
+ default:
+ return 0;
+ }
+
+ /* May have 0, 1, 2, or 3 sources */
+ return (dma_hw_desc->dest_lo);
+}
+
+/**
+ * apm82181_desc_get_byte_count - extract the byte count from the descriptor
+ */
+static inline u32 apm82181_desc_get_byte_count(apm82181_desc_t *desc,
+ apm82181_ch_t *chan)
+{
+ dma_cdb_t *dma_hw_desc;
+
+ dma_hw_desc = desc->hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ break;
+ default:
+ return 0;
+ }
+ /* May have 0, 1, 2, or 3 sources */
+ //return (dma_hw_desc->cnt);
+}
+
+
+/**
+ * apm82181_desc_get_link - get the address of the descriptor that
+ * follows this one
+ */
+static inline u32 apm82181_desc_get_link(apm82181_desc_t *desc,
+ apm82181_ch_t *chan)
+{
+ if (!desc->hw_next)
+ return 0;
+
+ return desc->hw_next->phys;
+}
+
+/**
+ * apm82181_desc_is_aligned - check alignment
+ */
+static inline int apm82181_desc_is_aligned(apm82181_desc_t *desc,
+ int num_slots)
+{
+ return (desc->idx & (num_slots - 1)) ? 0 : 1;
+}
+
+
+
+/******************************************************************************
+ * ADMA channel low-level routines
+ ******************************************************************************/
+
+static inline phys_addr_t apm82181_chan_get_current_descriptor(apm82181_ch_t *chan);
+static inline void apm82181_chan_append(apm82181_ch_t *chan);
+
+/*
+ * apm82181_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
+ */
+static inline void apm82181_adma_device_clear_eot_status (apm82181_ch_t *chan)
+{
+ u32 val ;
+ int idx = chan->device->id;
+ volatile xor_regs_t *xor_reg;
+ INFO;
+ switch (idx) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ val = mfdcr(DCR_DMA2P40_SR);
+ if(val & DMA_SR_RI(idx)){
+ printk(KERN_ERR "Err occurred, DMA%d status: 0x%x\n", idx, val);
+ }
+ /* TC reached int, write back to clear */
+ mtdcr(DCR_DMA2P40_SR, val);
+ break;
+ case APM82181_XOR_ID:
+ /* reset status bits to ack*/
+ xor_reg = chan->device->xor_base;
+
+ val = xor_reg->sr;
+ DBG("XOR engine status: 0x%08x\n", val);
+ xor_reg->sr = val;
+
+ if (val & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
+ if (val & XOR_IE_RPTIE_BIT) {
+ /* Read PLB Timeout Error.
+ * Try to resubmit the CB
+ */
+ INFO;
+ xor_reg->cblalr = xor_reg->ccbalr;
+ xor_reg->crsr |= XOR_CRSR_XAE_BIT;
+ } else
+ printk (KERN_ERR "XOR ERR 0x%x status\n", val);
+ break;
+ }
+
+ /* if the XORcore is idle, but there are unprocessed CBs
+ * then refetch the s/w chain here
+ */
+ if (!(xor_reg->sr & XOR_SR_XCP_BIT) && do_xor_refetch) {
+ apm82181_chan_append(chan);
+ }
+ break;
+ }
+}
+
+/*
+ * apm82181_chan_is_busy - get the channel status
+ */
+
+static inline int apm82181_chan_is_busy(apm82181_ch_t *chan)
+{
+ int busy = 0;
+ volatile xor_regs_t *xor_reg = chan->device->xor_base;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ if(mfdcr(DCR_DMA2P40_SR) & DMA_SR_CB(chan->device->id))
+ busy = 1;
+ else
+ busy = 0;
+ break;
+ case APM82181_XOR_ID:
+ /* use the special status bit for the XORcore
+ */
+ busy = (xor_reg->sr & XOR_SR_XCP_BIT) ? 1 : 0;
+ break;
+ default:
+ BUG();
+ }
+
+ return busy;
+}
+
+/**
+ * apm82181_dma_put_desc - put PLB DMA 0/1/2/3 descriptor to FIFO
+ */
+static inline void apm82181_dma_put_desc(apm82181_ch_t *chan,
+ apm82181_desc_t *desc)
+{
+ dma_cdb_t *cdb = desc->hw_desc;
+ u32 sg_cmd = 0;
+
+ /* Enable TC interrupt */
+ if(test_bit(APM82181_DESC_INT, &desc->flags))
+ cdb->cnt.tcie = 1;
+ else
+ cdb->cnt.tcie = 0;
+ /* Not link to next cdb */
+ cdb->sg_hi = 0xffffffff;
+ cdb->sg_lo = 0xffffffff;
+
+ chan_last_sub[chan->device->id] = desc;
+
+ /* Update new cdb addr */
+ mtdcr(DCR_DMA2P40_SGHx(chan->device->id), (u32)(desc->phys >> 32));
+ mtdcr(DCR_DMA2P40_SGLx(chan->device->id), (u32)desc->phys);
+
+ INFO;
+ DBG("slot id: %d addr: %llx\n", desc->idx, desc->phys);
+ DBG("S/G addr H: %08x addr L: %08x\n",
+ mfdcr(DCR_DMA2P40_SGHx(chan->device->id)),
+ mfdcr(DCR_DMA2P40_SGLx(chan->device->id)));
+ ADMA_HEXDUMP(cdb, 96);
+ /* Enable S/G */
+ sg_cmd |= (DMA_SGC_SSG(chan->device->id) | DMA_SGC_EM_ALL);
+ sg_cmd |= DMA_SGC_SGL(chan->device->id, 0); /* S/G addr in PLB */
+
+ mtdcr(DCR_DMA2P40_SGC, sg_cmd);
+ DBG("S/G addr H: %08x addr L: %08x\n",
+ mfdcr(DCR_DMA2P40_SGHx(chan->device->id)),
+ mfdcr(DCR_DMA2P40_SGLx(chan->device->id)));
+ /* need to use variable for logging current CDB */
+ chan->current_cdb_addr = desc->phys;
+
+}
+
+/**
+ * apm82181_chan_append - update the h/w chain in the channel
+ */
+static inline void apm82181_chan_append(apm82181_ch_t *chan)
+{
+ apm82181_desc_t *iter;
+ volatile xor_regs_t *xor_reg;
+ phys_addr_t cur_desc;
+ xor_cb_t *xcb;
+ unsigned long flags;
+ INFO;
+
+ local_irq_save(flags);
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ cur_desc = apm82181_chan_get_current_descriptor(chan);
+ DBG("current_desc %llx\n", cur_desc);
+ if (likely(cur_desc)) {
+ INFO;
+ iter = chan_last_sub[chan->device->id];
+ BUG_ON(!iter);
+ } else {
+ INFO;
+ /* first peer */
+ iter = chan_first_cdb[chan->device->id];
+ BUG_ON(!iter);
+ INFO;
+ apm82181_dma_put_desc(chan, iter);
+ chan->hw_chain_inited = 1;
+ }
+
+ /* is there something new to append */
+ if (!iter->hw_next)
+ break;
+
+ /* flush descriptors from the s/w queue to fifo */
+ list_for_each_entry_continue(iter, &chan->chain, chain_node) {
+ apm82181_dma_put_desc(chan, iter);
+ if (!iter->hw_next)
+ break;
+ }
+ break;
+ case APM82181_XOR_ID:
+ /* update h/w links and refetch */
+ if (!xor_last_submit->hw_next)
+ break;
+ xor_reg = chan->device->xor_base;
+ /* the last linked CDB has to generate an interrupt
+ * that we'd be able to append the next lists to h/w
+ * regardless of the XOR engine state at the moment of
+ * appending of these next lists
+ */
+ xcb = xor_last_linked->hw_desc;
+ xcb->cbc |= XOR_CBCR_CBCE_BIT;
+
+ if (!(xor_reg->sr & XOR_SR_XCP_BIT)) {
+ /* XORcore is idle. Refetch now */
+ do_xor_refetch = 0;
+ apm82181_xor_set_link(xor_last_submit,
+ xor_last_submit->hw_next);
+
+ xor_last_submit = xor_last_linked;
+ xor_reg->crsr |= XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT;
+ } else {
+ /* XORcore is running. Refetch later in the handler */
+ do_xor_refetch = 1;
+ }
+
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * apm82181_chan_get_current_descriptor - get the currently executed descriptor
+ */
+static inline phys_addr_t apm82181_chan_get_current_descriptor(apm82181_ch_t *chan)
+{
+ phys_addr_t curr_cdb_addr;
+ volatile xor_regs_t *xor_reg;
+ int idx = chan->device->id;
+
+ if (unlikely(!chan->hw_chain_inited))
+ /* h/w descriptor chain is not initialized yet */
+ return 0;
+ switch(idx){
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ curr_cdb_addr = chan->current_cdb_addr;
+ break;
+ case APM82181_XOR_ID:
+ xor_reg = chan->device->xor_base;
+ curr_cdb_addr = (dma_addr_t)xor_reg->ccbahr;
+ curr_cdb_addr = (curr_cdb_addr << 32) | xor_reg->ccbalr;
+ break;
+ default:
+ BUG();
+ }
+ return curr_cdb_addr;
+}
+
+
+/******************************************************************************
+ * ADMA device level
+ ******************************************************************************/
+
+static int apm82181_adma_alloc_chan_resources(struct dma_chan *chan);
+static dma_cookie_t apm82181_adma_tx_submit(
+ struct dma_async_tx_descriptor *tx);
+
+static void apm82181_adma_set_dest(
+ apm82181_desc_t *tx,
+ dma_addr_t addr, int index);
+
+/**
+ * apm82181_get_group_entry - get group entry with index idx
+ * @tdesc: is the last allocated slot in the group.
+ */
+static inline apm82181_desc_t *
+apm82181_get_group_entry ( apm82181_desc_t *tdesc, u32 entry_idx)
+{
+ apm82181_desc_t *iter = tdesc->group_head;
+ int i = 0;
+
+ if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
+ printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
+ __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
+ BUG();
+ }
+ list_for_each_entry(iter, &tdesc->group_list, chain_node) {
+ if (i++ == entry_idx)
+ break;
+ }
+ return iter;
+}
+
+/**
+ * apm82181_adma_free_slots - flags descriptor slots for reuse
+ * @slot: Slot to free
+ * Caller must hold &apm82181_chan->lock while calling this function
+ */
+static void apm82181_adma_free_slots(apm82181_desc_t *slot,
+ apm82181_ch_t *chan)
+{
+ int stride = slot->slots_per_op;
+
+ while (stride--) {
+ /*async_tx_clear_ack(&slot->async_tx);*/ /* Don't need to clear. It is hack*/
+ slot->slots_per_op = 0;
+ slot = list_entry(slot->slot_node.next,
+ apm82181_desc_t,
+ slot_node);
+ }
+}
+
+static void
+apm82181_adma_unmap(apm82181_ch_t *chan, apm82181_desc_t *desc)
+{
+ u32 src_cnt, dst_cnt;
+ dma_addr_t addr;
+ /*
+ * get the number of sources & destination
+ * included in this descriptor and unmap
+ * them all
+ */
+ src_cnt = 1;
+ dst_cnt = 1;
+}
+/**
+ * apm82181_adma_run_tx_complete_actions - call functions to be called
+ * upon complete
+ */
+static dma_cookie_t apm82181_adma_run_tx_complete_actions(
+ apm82181_desc_t *desc,
+ apm82181_ch_t *chan,
+ dma_cookie_t cookie)
+{
+ int i;
+ //enum dma_data_direction dir;
+ INFO;
+ BUG_ON(desc->async_tx.cookie < 0);
+ if (desc->async_tx.cookie > 0) {
+ cookie = desc->async_tx.cookie;
+ desc->async_tx.cookie = 0;
+
+ /* call the callback (must not sleep or submit new
+ * operations to this channel)
+ */
+ if (desc->async_tx.callback)
+ desc->async_tx.callback(
+ desc->async_tx.callback_param);
+
+ dma_descriptor_unmap(&desc->async_tx);
+ /* unmap dma addresses
+ * (unmap_single vs unmap_page?)
+ *
+ * actually, ppc's dma_unmap_page() functions are empty, so
+ * the following code is just for the sake of completeness
+ */
+ if (chan && chan->needs_unmap && desc->group_head &&
+ desc->unmap_len) {
+ apm82181_desc_t *unmap = desc->group_head;
+ /* assume 1 slot per op always */
+ u32 slot_count = unmap->slot_cnt;
+
+ /* Run through the group list and unmap addresses */
+ for (i = 0; i < slot_count; i++) {
+ BUG_ON(!unmap);
+ apm82181_adma_unmap(chan, unmap);
+ unmap = unmap->hw_next;
+ }
+ desc->group_head = NULL;
+ }
+ }
+
+ /* run dependent operations */
+ dma_run_dependencies(&desc->async_tx);
+
+ return cookie;
+}
+
+/**
+ * apm82181_adma_clean_slot - clean up CDB slot (if ack is set)
+ */
+static int apm82181_adma_clean_slot(apm82181_desc_t *desc,
+ apm82181_ch_t *chan)
+{
+ /* the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->async_tx))
+ return 0;
+
+ /* leave the last descriptor in the chain
+ * so we can append to it
+ */
+ if (list_is_last(&desc->chain_node, &chan->chain) ||
+ desc->phys == apm82181_chan_get_current_descriptor(chan))
+ return 1;
+
+ dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
+ desc->phys, desc->idx, desc->slots_per_op);
+
+ list_del(&desc->chain_node);
+ apm82181_adma_free_slots(desc, chan);
+ return 0;
+}
+
+/**
+ * __apm82181_adma_slot_cleanup - this is the common clean-up routine
+ * which runs through the channel CDBs list until reach the descriptor
+ * currently processed. When routine determines that all CDBs of group
+ * are completed then corresponding callbacks (if any) are called and slots
+ * are freed.
+ */
+static void __apm82181_adma_slot_cleanup(apm82181_ch_t *chan)
+{
+ apm82181_desc_t *iter, *_iter, *group_start = NULL;
+ dma_cookie_t cookie = 0;
+ phys_addr_t current_desc = apm82181_chan_get_current_descriptor(chan);
+ int busy = apm82181_chan_is_busy(chan);
+ int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
+
+ DBG("apm82181 adma%d: %s\n",
+ chan->device->id, __FUNCTION__);
+ DBG("current_desc %llx\n", current_desc);
+
+ if (!current_desc) {
+ /* There were no transactions yet, so
+ * nothing to clean
+ */
+ return;
+ }
+
+ /* free completed slots from the chain starting with
+ * the oldest descriptor
+ */
+ list_for_each_entry_safe(iter, _iter, &chan->chain,
+ chain_node) {
+ DBG(" cookie: %d slot: %d "
+ "busy: %d this_desc: %llx next_desc: %x cur: %llx ack: %d\n",
+ iter->async_tx.cookie, iter->idx, busy, iter->phys,
+ apm82181_desc_get_link(iter, chan), current_desc,
+ async_tx_test_ack(&iter->async_tx));
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+
+ /* do not advance past the current descriptor loaded into the
+ * hardware channel,subsequent descriptors are either in process
+ * or have not been submitted
+ */
+ if (seen_current)
+ break;
+
+ /* stop the search if we reach the current descriptor and the
+ * channel is busy, or if it appears that the current descriptor
+ * needs to be re-read (i.e. has been appended to)
+ */
+ if (iter->phys == current_desc) {
+ BUG_ON(seen_current++);
+ if (busy || apm82181_desc_get_link(iter, chan)) {
+ /* not all descriptors of the group have
+ * been completed; exit.
+ */
+ break;
+ }
+ }
+
+ /* detect the start of a group transaction */
+ if (!slot_cnt && !slots_per_op) {
+ slot_cnt = iter->slot_cnt;
+ slots_per_op = iter->slots_per_op;
+ if (slot_cnt <= slots_per_op) {
+ slot_cnt = 0;
+ slots_per_op = 0;
+ }
+ }
+
+ if (slot_cnt) {
+ if (!group_start)
+ group_start = iter;
+ slot_cnt -= slots_per_op;
+ }
+
+ /* all the members of a group are complete */
+ if (slots_per_op != 0 && slot_cnt == 0) {
+ apm82181_desc_t *grp_iter, *_grp_iter;
+ int end_of_chain = 0;
+
+ /* clean up the group */
+ slot_cnt = group_start->slot_cnt;
+ grp_iter = group_start;
+ list_for_each_entry_safe_from(grp_iter, _grp_iter,
+ &chan->chain, chain_node) {
+
+ cookie = apm82181_adma_run_tx_complete_actions(
+ grp_iter, chan, cookie);
+
+ slot_cnt -= slots_per_op;
+ end_of_chain = apm82181_adma_clean_slot(
+ grp_iter, chan);
+ if (end_of_chain && slot_cnt) {
+ /* Should wait for ZeroSum complete */
+ if (cookie > 0)
+ chan->common.completed_cookie = cookie;
+ return;
+ }
+
+ if (slot_cnt == 0 || end_of_chain)
+ break;
+ }
+
+ /* the group should be complete at this point */
+ BUG_ON(slot_cnt);
+
+ slots_per_op = 0;
+ group_start = NULL;
+ if (end_of_chain)
+ break;
+ else
+ continue;
+ } else if (slots_per_op) /* wait for group completion */
+ continue;
+
+ cookie = apm82181_adma_run_tx_complete_actions(iter, chan,
+ cookie);
+
+ if (apm82181_adma_clean_slot(iter, chan))
+ break;
+ }
+
+ BUG_ON(!seen_current);
+
+ if (cookie > 0) {
+ chan->common.completed_cookie = cookie;
+ DBG("completed cookie %d\n", cookie);
+ }
+
+}
+
+/**
+ * apm82181_adma_tasklet - clean up watch-dog initiator
+ */
+static void apm82181_adma_tasklet (unsigned long data)
+{
+ apm82181_ch_t *chan = (apm82181_ch_t *) data;
+ spin_lock(&chan->lock);
+ INFO;
+ __apm82181_adma_slot_cleanup(chan);
+ spin_unlock(&chan->lock);
+}
+
+/**
+ * apm82181_adma_slot_cleanup - clean up scheduled initiator
+ */
+static void apm82181_adma_slot_cleanup (apm82181_ch_t *chan)
+{
+ spin_lock_bh(&chan->lock);
+ __apm82181_adma_slot_cleanup(chan);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * apm82181_adma_alloc_slots - allocate free slots (if any)
+ */
+static apm82181_desc_t *apm82181_adma_alloc_slots(
+ apm82181_ch_t *chan, int num_slots,
+ int slots_per_op)
+{
+ apm82181_desc_t *iter = NULL, *_iter, *alloc_start = NULL;
+ struct list_head chain = LIST_HEAD_INIT(chain);
+ int slots_found, retry = 0;
+
+
+ BUG_ON(!num_slots || !slots_per_op);
+ /* start search from the last allocated descrtiptor
+ * if a contiguous allocation can not be found start searching
+ * from the beginning of the list
+ */
+retry:
+ slots_found = 0;
+ if (retry == 0)
+ iter = chan->last_used;
+ else
+ iter = list_entry(&chan->all_slots, apm82181_desc_t,
+ slot_node);
+ prefetch(iter);
+ DBG("---iter at %p idx %d\n ",iter,iter->idx);
+ list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
+ slot_node) {
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+ if (iter->slots_per_op) {
+ slots_found = 0;
+ continue;
+ }
+
+ /* start the allocation if the slot is correctly aligned */
+ if (!slots_found++)
+ alloc_start = iter;
+ if (slots_found == num_slots) {
+ apm82181_desc_t *alloc_tail = NULL;
+ apm82181_desc_t *last_used = NULL;
+ iter = alloc_start;
+ while (num_slots) {
+ int i;
+
+ /* pre-ack all but the last descriptor */
+ if (num_slots != slots_per_op) {
+ async_tx_ack(&iter->async_tx);
+ }
+ list_add_tail(&iter->chain_node, &chain);
+ alloc_tail = iter;
+ iter->async_tx.cookie = 0;
+ iter->hw_next = NULL;
+ iter->flags = 0;
+ iter->slot_cnt = num_slots;
+ for (i = 0; i < slots_per_op; i++) {
+ iter->slots_per_op = slots_per_op - i;
+ last_used = iter;
+ iter = list_entry(iter->slot_node.next,
+ apm82181_desc_t,
+ slot_node);
+ }
+ num_slots -= slots_per_op;
+ }
+ alloc_tail->group_head = alloc_start;
+ alloc_tail->async_tx.cookie = -EBUSY;
+ list_splice(&chain, &alloc_tail->group_list);
+ chan->last_used = last_used;
+ DBG("---slot allocated at %llx idx %d, hw_desc %p tx_ack %d\n",
+ alloc_tail->phys, alloc_tail->idx, alloc_tail->hw_desc,
+ async_tx_test_ack(&alloc_tail->async_tx));
+ return alloc_tail;
+ }
+ }
+ if (!retry++)
+ goto retry;
+#ifdef ADMA_DEBUG
+ static int empty_slot_cnt;
+ if(!(empty_slot_cnt%100))
+ printk(KERN_INFO"No empty slots trying to free some\n");
+ empty_slot_cnt++;
+#endif
+ /* try to free some slots if the allocation fails */
+ tasklet_schedule(&chan->irq_tasklet);
+ return NULL;
+}
+
+/**
+ * apm82181_chan_xor_slot_count - get the number of slots necessary for
+ * XOR operation
+ */
+static inline int apm82181_chan_xor_slot_count(size_t len, int src_cnt,
+ int *slots_per_op)
+{
+ int slot_cnt;
+
+ /* each XOR descriptor provides up to 16 source operands */
+ slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
+
+ if (likely(len <= APM82181_ADMA_XOR_MAX_BYTE_COUNT))
+ return slot_cnt;
+
+ printk(KERN_ERR "%s: len %d > max %d !!\n",
+ __func__, len, APM82181_ADMA_XOR_MAX_BYTE_COUNT);
+ BUG();
+ return slot_cnt;
+}
+
+/**
+ * apm82181_desc_init_null_xor - initialize the descriptor for NULL XOR
+ * pseudo operation
+ */
+static inline void apm82181_desc_init_null_xor(apm82181_desc_t *desc)
+{
+ memset (desc->hw_desc, 0, sizeof(xor_cb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 0;
+ desc->dst_cnt = 1;
+}
+/**
+ * apm82181_chan_set_first_xor_descriptor - initi XORcore chain
+ */
+static inline void apm82181_chan_set_first_xor_descriptor(apm82181_ch_t *chan,
+ apm82181_desc_t *next_desc)
+{
+ volatile xor_regs_t *xor_reg;
+
+ xor_reg = chan->device->xor_base;
+
+ if (xor_reg->sr & XOR_SR_XCP_BIT)
+ printk(KERN_INFO "%s: Warn: XORcore is running "
+ "when try to set the first CDB!\n",
+ __func__);
+
+ xor_last_submit = xor_last_linked = next_desc;
+
+ xor_reg->crsr = XOR_CRSR_64BA_BIT;
+
+ xor_reg->cblalr = next_desc->phys;
+ xor_reg->cblahr = 0;
+ xor_reg->cbcr |= XOR_CBCR_LNK_BIT;
+
+ chan->hw_chain_inited = 1;
+}
+/**
+ * apm82181_chan_start_null_xor - initiate the first XOR operation (DMA engines
+ * use FIFOs (as opposite to chains used in XOR) so this is a XOR
+ * specific operation)
+ */
+static void apm82181_chan_start_null_xor(apm82181_ch_t *chan)
+{
+ apm82181_desc_t *sw_desc, *group_start;
+ dma_cookie_t cookie;
+ int slot_cnt, slots_per_op;
+ volatile xor_regs_t *xor_reg = chan->device->xor_base;
+
+ dev_dbg(chan->device->common.dev,
+ "apm82181 adma%d: %s\n", chan->device->id, __func__);
+ INFO;
+ spin_lock_bh(&chan->lock);
+ slot_cnt = apm82181_chan_xor_slot_count(0, 2, &slots_per_op);
+ sw_desc = apm82181_adma_alloc_slots(chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ INFO;
+ group_start = sw_desc->group_head;
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ async_tx_ack(&sw_desc->async_tx);
+ apm82181_desc_init_null_xor(group_start);
+ INFO;
+
+ cookie = chan->common.cookie;
+ cookie++;
+ if (cookie <= 1)
+ cookie = 2;
+
+ /* initialize the completed cookie to be less than
+ * the most recently used cookie
+ */
+ chan->common.completed_cookie = cookie - 1;
+ chan->common.cookie = sw_desc->async_tx.cookie = cookie;
+
+ /* channel should not be busy */
+ BUG_ON(apm82181_chan_is_busy(chan));
+
+ /* set the descriptor address */
+ apm82181_chan_set_first_xor_descriptor(chan, sw_desc);
+
+ /* run the descriptor */
+ xor_reg->crsr = XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT;
+ } else
+ printk(KERN_ERR "apm82181 adma%d"
+ " failed to allocate null descriptor\n",
+ chan->device->id);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * apm82181_adma_alloc_chan_resources - allocate pools for CDB slots
+ */
+static int apm82181_adma_alloc_chan_resources(struct dma_chan *chan)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *slot = NULL;
+ char *hw_desc;
+ int i, db_sz;
+ int init = apm82181_chan->slots_allocated ? 0 : 1;
+
+ chan->chan_id = apm82181_chan->device->id;
+
+ /* Allocate descriptor slots */
+ i = apm82181_chan->slots_allocated;
+ if (apm82181_chan->device->id != APM82181_XOR_ID)
+ db_sz = sizeof (dma_cdb_t);
+ else
+ db_sz = sizeof (xor_cb_t);
+
+ for (; i < (apm82181_chan->device->pool_size/db_sz); i++) {
+ slot = kzalloc(sizeof(apm82181_desc_t), GFP_KERNEL);
+ if (!slot) {
+ printk(KERN_INFO "APM82181/GT ADMA Channel only initialized"
+ " %d descriptor slots", i--);
+ break;
+ }
+
+ hw_desc = (char *) apm82181_chan->device->dma_desc_pool_virt;
+ slot->hw_desc = (void *) &hw_desc[i * db_sz];
+ dma_async_tx_descriptor_init(&slot->async_tx, chan);
+ slot->async_tx.tx_submit = apm82181_adma_tx_submit;
+ INIT_LIST_HEAD(&slot->chain_node);
+ INIT_LIST_HEAD(&slot->slot_node);
+ INIT_LIST_HEAD(&slot->group_list);
+ slot->phys = apm82181_chan->device->dma_desc_pool + i * db_sz;
+ slot->idx = i;
+ spin_lock_bh(&apm82181_chan->lock);
+ apm82181_chan->slots_allocated++;
+ list_add_tail(&slot->slot_node, &apm82181_chan->all_slots);
+ spin_unlock_bh(&apm82181_chan->lock);
+ }
+
+ if (i && !apm82181_chan->last_used) {
+ apm82181_chan->last_used =
+ list_entry(apm82181_chan->all_slots.next,
+ apm82181_desc_t,
+ slot_node);
+ }
+
+ printk("apm82181 adma%d: allocated %d descriptor slots\n",
+ apm82181_chan->device->id, i);
+
+ /* initialize the channel and the chain with a null operation */
+ if (init) {
+ switch (apm82181_chan->device->id)
+ {
+ apm82181_chan->hw_chain_inited = 0;
+ case APM82181_PDMA0_ID:
+ apm82181_dma_tchan[0] = apm82181_chan;
+ break;
+ case APM82181_PDMA1_ID:
+ apm82181_dma_tchan[1] = apm82181_chan;
+ break;
+ case APM82181_PDMA2_ID:
+ apm82181_dma_tchan[2] = apm82181_chan;
+ break;
+ case APM82181_PDMA3_ID:
+ apm82181_dma_tchan[3] = apm82181_chan;
+ break;
+ case APM82181_XOR_ID:
+ apm82181_dma_tchan[4] = apm82181_chan;
+ apm82181_chan_start_null_xor(apm82181_chan);
+ break;
+ default:
+ BUG();
+ }
+ apm82181_chan->needs_unmap = 1;
+ }
+
+ return (i > 0) ? i : -ENOMEM;
+}
+
+/**
+ * apm82181_desc_assign_cookie - assign a cookie
+ */
+static dma_cookie_t apm82181_desc_assign_cookie(apm82181_ch_t *chan,
+ apm82181_desc_t *desc)
+{
+ dma_cookie_t cookie = chan->common.cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ chan->common.cookie = desc->async_tx.cookie = cookie;
+ return cookie;
+}
+
+
+/**
+ * apm82181_adma_check_threshold - append CDBs to h/w chain if threshold
+ * has been achieved
+ */
+static void apm82181_adma_check_threshold(apm82181_ch_t *chan)
+{
+ dev_dbg(chan->device->common.dev, "apm82181 adma%d: pending: %d\n",
+ chan->device->id, chan->pending);
+ INFO;
+ if (chan->pending >= APM82181_ADMA_THRESHOLD) {
+ chan->pending = 0;
+ apm82181_chan_append(chan);
+ }
+}
+
+/**
+ * apm82181_adma_tx_submit - submit new descriptor group to the channel
+ * (it's not necessary that descriptors will be submitted to the h/w
+ * chains too right now)
+ */
+static dma_cookie_t apm82181_adma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ apm82181_desc_t *sw_desc = tx_to_apm82181_adma_slot(tx);
+ apm82181_ch_t *chan = to_apm82181_adma_chan(tx->chan);
+ apm82181_desc_t *group_start, *old_chain_tail;
+ int slot_cnt;
+ int slots_per_op;
+ dma_cookie_t cookie;
+ group_start = sw_desc->group_head;
+ slot_cnt = group_start->slot_cnt;
+ slots_per_op = group_start->slots_per_op;
+ INFO;
+ spin_lock_bh(&chan->lock);
+ cookie = apm82181_desc_assign_cookie(chan, sw_desc);
+
+ if (unlikely(list_empty(&chan->chain))) {
+ /* first peer */
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ chan_first_cdb[chan->device->id] = group_start;
+ } else {
+ /* isn't first peer, bind CDBs to chain */
+ old_chain_tail = list_entry(chan->chain.prev,
+ apm82181_desc_t, chain_node);
+ list_splice_init(&sw_desc->group_list,
+ &old_chain_tail->chain_node);
+ /* fix up the hardware chain */
+ apm82181_desc_set_link(chan, old_chain_tail, group_start);
+ }
+
+ /* increment the pending count by the number of operations */
+ chan->pending += slot_cnt / slots_per_op;
+ apm82181_adma_check_threshold(chan);
+ spin_unlock_bh(&chan->lock);
+
+ DBG("apm82181 adma%d:cookie: %d slot: %d tx %p\n",
+ chan->device->id, sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
+ return cookie;
+}
+/**
+ * apm82181_adma_prep_dma_xor - prepare CDB for a XOR operation
+ */
+static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_xor(
+ struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
+ unsigned long flags)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+#ifdef ADMA_DEBUG
+ printk("\n%s(%d):\n\tsrc: ", __func__,
+ apm82181_chan->device->id);
+ for (slot_cnt=0; slot_cnt < src_cnt; slot_cnt++)
+ printk("0x%llx ", dma_src[slot_cnt]);
+ printk("\n\tdst: 0x%llx\n", dma_dest);
+#endif
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > APM82181_ADMA_XOR_MAX_BYTE_COUNT));
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d: %s src_cnt: %d len: %u int_en: %d\n",
+ apm82181_chan->device->id, __func__, src_cnt, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ spin_lock_bh(&apm82181_chan->lock);
+ slot_cnt = apm82181_chan_xor_slot_count(len, src_cnt, &slots_per_op);
+ sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ apm82181_desc_init_xor(group_start, src_cnt, flags);
+ apm82181_adma_set_dest(group_start, dma_dest, 0);
+ while (src_cnt--)
+ apm82181_adma_set_src(group_start,
+ dma_src[src_cnt], src_cnt);
+ apm82181_desc_set_byte_count(group_start, apm82181_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&apm82181_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+/**
+ * apm82181_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
+ */
+static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_interrupt(
+ struct dma_chan *chan, unsigned long flags)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d: %s\n", apm82181_chan->device->id,
+ __FUNCTION__);
+ spin_lock_bh(&apm82181_chan->lock);
+ slot_cnt = slots_per_op = 1;
+ sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ apm82181_desc_init_interrupt(group_start, apm82181_chan);
+ group_start->unmap_len = 0;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&apm82181_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * apm82181_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
+ */
+static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > APM82181_ADMA_DMA_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&apm82181_chan->lock);
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d: %s len: %u int_en %d \n",
+ apm82181_chan->device->id, __FUNCTION__, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ slot_cnt = slots_per_op = 1;
+ sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ flags |= DMA_PREP_INTERRUPT;
+ apm82181_desc_init_memcpy(group_start, flags);
+ apm82181_adma_set_dest(group_start, dma_dest, 0);
+ apm82181_adma_set_src(group_start, dma_src, 0);
+ apm82181_desc_set_byte_count(group_start, apm82181_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&apm82181_chan->lock);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+
+/**
+ * apm82181_adma_set_dest - set destination address into descriptor
+ */
+static void apm82181_adma_set_dest(apm82181_desc_t *sw_desc,
+ dma_addr_t addr, int index)
+{
+ apm82181_ch_t *chan = to_apm82181_adma_chan(sw_desc->async_tx.chan);
+ BUG_ON(index >= sw_desc->dst_cnt);
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ /* to do: support transfers lengths >
+ * APM82181_ADMA_DMA/XOR_MAX_BYTE_COUNT
+ */
+ apm82181_desc_set_dest_addr(sw_desc->group_head,
+ // chan, 0x8, addr, index); // Enabling HB bus
+ chan, addr, index);
+ break;
+ case APM82181_XOR_ID:
+ sw_desc = apm82181_get_group_entry(sw_desc, index);
+ apm82181_desc_set_dest_addr(sw_desc, chan,
+ addr, index);
+ break;
+ default:
+ BUG();
+ }
+}
+
+
+/**
+ * apm82181_adma_free_chan_resources - free the resources allocated
+ */
+static void apm82181_adma_free_chan_resources(struct dma_chan *chan)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *iter, *_iter;
+ int in_use_descs = 0;
+
+ apm82181_adma_slot_cleanup(apm82181_chan);
+
+ spin_lock_bh(&apm82181_chan->lock);
+ list_for_each_entry_safe(iter, _iter, &apm82181_chan->chain,
+ chain_node) {
+ in_use_descs++;
+ list_del(&iter->chain_node);
+ }
+ list_for_each_entry_safe_reverse(iter, _iter,
+ &apm82181_chan->all_slots, slot_node) {
+ list_del(&iter->slot_node);
+ kfree(iter);
+ apm82181_chan->slots_allocated--;
+ }
+ apm82181_chan->last_used = NULL;
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d %s slots_allocated %d\n",
+ apm82181_chan->device->id,
+ __FUNCTION__, apm82181_chan->slots_allocated);
+ spin_unlock_bh(&apm82181_chan->lock);
+
+ /* one is ok since we left it on there on purpose */
+ if (in_use_descs > 1)
+ printk(KERN_ERR "GT: Freeing %d in use descriptors!\n",
+ in_use_descs - 1);
+}
+
+/**
+ * apm82181_adma_tx_status - poll the status of an ADMA transaction
+ * @chan: ADMA channel handle
+ * @cookie: ADMA transaction identifier
+ */
+static enum dma_status apm82181_adma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ apm82181_adma_slot_cleanup(apm82181_chan);
+
+ return dma_cookie_status(chan, cookie, txstate);
+}
+
+/**
+ * apm82181_adma_eot_handler - end of transfer interrupt handler
+ */
+static irqreturn_t apm82181_adma_eot_handler(int irq, void *data)
+{
+ apm82181_ch_t *chan = data;
+
+ dev_dbg(chan->device->common.dev,
+ "apm82181 adma%d: %s\n", chan->device->id, __FUNCTION__);
+ INFO;
+ if(chan->device->id == APM82181_XOR_ID)
+ tasklet_schedule(&chan->irq_tasklet);
+ apm82181_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * apm82181_adma_err_handler - DMA error interrupt handler;
+ * do the same things as a eot handler
+ */
+#if 0
+static irqreturn_t apm82181_adma_err_handler(int irq, void *data)
+{
+ apm82181_ch_t *chan = data;
+ dev_dbg(chan->device->common.dev,
+ "apm82181 adma%d: %s\n", chan->device->id, __FUNCTION__);
+ tasklet_schedule(&chan->irq_tasklet);
+ apm82181_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+#endif
+/**
+ * apm82181_test_callback - called when test operation has been done
+ */
+static void apm82181_test_callback (void *unused)
+{
+ complete(&apm82181_r5_test_comp);
+}
+
+/**
+ * apm82181_adma_issue_pending - flush all pending descriptors to h/w
+ */
+static void apm82181_adma_issue_pending(struct dma_chan *chan)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+
+ DBG("apm82181 adma%d: %s %d \n", apm82181_chan->device->id,
+ __FUNCTION__, apm82181_chan->pending);
+ if (apm82181_chan->pending) {
+ apm82181_chan->pending = 0;
+ apm82181_chan_append(apm82181_chan);
+ }
+}
+
+static inline void xor_hw_init (apm82181_dev_t *adev)
+{
+ volatile xor_regs_t *xor_reg = adev->xor_base;
+ /* Reset XOR */
+ xor_reg->crsr = XOR_CRSR_XASR_BIT;
+ xor_reg->crrr = XOR_CRSR_64BA_BIT;
+
+ /* enable XOR engine interrupts */
+ xor_reg->ier = XOR_IE_CBCIE_BIT |
+ XOR_IE_ICBIE_BIT | XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT;
+}
+
+/*
+ * Per channel probe
+ */
+static int apm82181_dma_per_chan_probe(struct platform_device *ofdev)
+{
+ int ret = 0, irq;
+ const u32 *index, *dcr_regs, *pool_size;
+ apm82181_plb_dma_t *pdma;
+ apm82181_dev_t *adev;
+ apm82181_ch_t *chan;
+ struct device_node *np = ofdev->dev.of_node;
+ struct resource res;
+ int len;
+
+ INFO;
+ pdma = dev_get_drvdata(ofdev->dev.parent);
+ BUG_ON(!pdma);
+ if ((adev = kzalloc(sizeof(*adev), GFP_KERNEL)) == NULL) {
+ printk("ERROR:No Free memory for allocating dma channels\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ adev->dev = &ofdev->dev;
+ index = of_get_property(np, "cell-index", NULL);
+ if(!index) {
+ printk(KERN_ERR "adma-channel: Device node %s has missing or invalid "
+ "cell-index property\n", np->full_name);
+ goto err;
+ }
+ adev->id = (int)*index;
+ /* The XOR engine/PLB DMA 4 channels have different resources/pool_sizes */
+ if (adev->id != APM82181_XOR_ID){
+ dcr_regs = of_get_property(np, "dcr-reg", &len);
+ if (!dcr_regs || (len != 2 * sizeof(u32))) {
+ printk(KERN_ERR "plb_dma channel%d: Can't get DCR register base !",
+ adev->id);
+ goto err;
+ }
+ adev->dcr_base = dcr_regs[0];
+
+ pool_size = of_get_property(np, "pool_size", NULL);
+ if(!pool_size) {
+ printk(KERN_ERR "plb_dma channel%d: Device node %s has missing or "
+ "invalid pool_size property\n", adev->id, np->full_name);
+ goto err;
+ }
+ adev->pool_size = *pool_size;
+ } else {
+ if (of_address_to_resource(np, 0, &res)) {
+ printk(KERN_ERR "adma_xor channel%d %s: could not get resource address.\n",
+ adev->id,np->full_name);
+ goto err;
+ }
+
+ DBG("XOR resource start = %llx end = %llx\n", res.start, res.end);
+ adev->xor_base = ioremap(res.start, res.end - res.start + 1);
+ if (!adev->xor_base){
+ printk(KERN_ERR "XOR engine registers memory mapping failed.\n");
+ goto err;
+ }
+ adev->pool_size = PAGE_SIZE << 1;
+ }
+
+ adev->pdma = pdma;
+ adev->ofdev = ofdev;
+ dev_set_drvdata(&(ofdev->dev),adev);
+
+ switch (adev->id){
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ dma_cap_set(DMA_MEMCPY,adev->cap_mask);
+ break;
+ case APM82181_XOR_ID:
+ dma_cap_set(DMA_XOR,adev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT,adev->cap_mask);
+ break;
+ default:
+ BUG();
+ }
+ /* XOR h/w configuration */
+ if(adev->id == APM82181_XOR_ID)
+ xor_hw_init(adev);
+ /* allocate coherent memory for hardware descriptors
+ * note: writecombine gives slightly better performance, but
+ * requires that we explicitly drain the write buffer
+ */
+ if ((adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
+ adev->pool_size, &adev->dma_desc_pool, GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+
+ adev->common.cap_mask = adev->cap_mask;
+ INIT_LIST_HEAD(&adev->common.channels);
+ /* set base routines */
+ adev->common.device_alloc_chan_resources =
+ apm82181_adma_alloc_chan_resources;
+ adev->common.device_free_chan_resources =
+ apm82181_adma_free_chan_resources;
+ adev->common.device_tx_status = apm82181_adma_tx_status;
+ adev->common.device_issue_pending = apm82181_adma_issue_pending;
+ adev->common.dev = &ofdev->dev;
+
+ /* set prep routines based on capability */
+ if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_memcpy =
+ apm82181_adma_prep_dma_memcpy;
+ }
+
+ if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_interrupt =
+ apm82181_adma_prep_dma_interrupt;
+ }
+
+ if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
+ adev->common.max_xor = XOR_MAX_OPS;
+ adev->common.device_prep_dma_xor =
+ apm82181_adma_prep_dma_xor;
+ }
+
+ /* create a channel */
+ if ((chan = kzalloc(sizeof(*chan), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_chan_alloc;
+ }
+ tasklet_init(&chan->irq_tasklet, apm82181_adma_tasklet,
+ (unsigned long)chan);
+
+ irq = irq_of_parse_and_map(np, 0);
+ switch (adev->id){
+ case 0:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan0", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ case 1:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan1", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ case 2:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan2", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ case 3:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan3", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ case 4:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-xor", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ spin_lock_init(&chan->lock);
+ chan->device = adev;
+ INIT_LIST_HEAD(&chan->chain);
+ INIT_LIST_HEAD(&chan->all_slots);
+ chan->common.device = &adev->common;
+ list_add_tail(&chan->common.device_node, &adev->common.channels);
+ adev->common.chancnt++;
+
+ printk( "AMCC(R) APM82181 ADMA Engine found [%d]: "
+ "( capabilities: %s%s%s%s%s%s)\n",
+ adev->id,
+ dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq_xor " : "",
+ dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " :
+ "",
+ dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " :
+ "",
+ dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
+ dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "int " : "");
+ INFO;
+ ret = dma_async_device_register(&adev->common);
+ if (ret) {
+ dev_err(&ofdev->dev, "failed to register dma async device");
+ goto err_irq;
+ }
+ INFO;
+
+ goto out;
+err_irq:
+ kfree(chan);
+err_chan_alloc:
+ dma_free_coherent(&ofdev->dev, adev->pool_size,
+ adev->dma_desc_pool_virt, adev->dma_desc_pool);
+err_dma_alloc:
+ if (adev->xor_base)
+ iounmap(adev->xor_base);
+err:
+ kfree(adev);
+out:
+ return ret;
+}
+
+static struct of_device_id dma_4chan_match[] =
+{
+ {
+ .compatible = "amcc,apm82181-adma",
+ },
+ {},
+};
+
+static struct of_device_id dma_per_chan_match[] = {
+ {.compatible = "amcc,apm82181-dma-4channel",},
+ {.compatible = "amcc,xor",},
+ {},
+};
+/*
+ * apm82181_adma_probe - probe the asynch device
+ */
+static int apm82181_pdma_probe(struct platform_device *ofdev)
+{
+ int ret = 0;
+ apm82181_plb_dma_t *pdma;
+
+ if ((pdma = kzalloc(sizeof(*pdma), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pdma->dev = &ofdev->dev;
+ pdma->ofdev = ofdev;
+ printk(PPC4XX_EDMA "Probing AMCC APM82181 ADMA engines...\n");
+
+ dev_set_drvdata(&(ofdev->dev),pdma);
+ of_platform_bus_probe(ofdev->dev.of_node, dma_per_chan_match,&ofdev->dev);
+
+out:
+ return ret;
+}
+
+/*
+ * apm82181_test_xor - test are RAID-5 XOR capability enabled successfully.
+ * For this we just perform one DMA XOR operation with the 3 sources
+ * to a destination
+ */
+static int apm82181_test_xor (apm82181_ch_t *chan)
+{
+ apm82181_desc_t *sw_desc, *group_start;
+ struct page *pg_src[3], *pg_dest;
+ char *a;
+ dma_addr_t dma_src_addr[3];
+ dma_addr_t dma_dst_addr;
+ int rval = -EFAULT, i;
+ int len = PAGE_SIZE, src_cnt = 3;
+ int slot_cnt, slots_per_op;
+ INFO;
+ printk("ADMA channel %d XOR testing\n",chan->device->id);
+ for(i = 0; i < 3; i++){
+ pg_src[i] = alloc_page(GFP_KERNEL);
+ if (!pg_src[i])
+ return -ENOMEM;
+ }
+ pg_dest = alloc_page(GFP_KERNEL);
+ if (!pg_dest)
+ return -ENOMEM;
+ /* Fill the test page with ones */
+ memset(page_address(pg_src[0]), 0xDA, len);
+ memset(page_address(pg_src[1]), 0xDA, len);
+ memset(page_address(pg_src[2]), 0x00, len);
+ memset(page_address(pg_dest), 0xA5, len);
+ for(i = 0; i < 3; i++){
+ a = page_address(pg_src[i]);
+ printk("The virtual addr of src %d =%x\n",i, (unsigned int)a);
+ MEM_HEXDUMP(a,50);
+ }
+ a = page_address(pg_dest);
+ printk("The virtual addr of dest=%x\n", (unsigned int)a);
+ MEM_HEXDUMP(a,50);
+
+ for(i = 0; i < 3; i++){
+ dma_src_addr[i] = dma_map_page(chan->device->dev, pg_src[i], 0, len,
+ DMA_BIDIRECTIONAL);
+ }
+ dma_dst_addr = dma_map_page(chan->device->dev, pg_dest, 0, len,
+ DMA_BIDIRECTIONAL);
+ printk("dma_src_addr[0]: %llx; dma_src_addr[1]: %llx;\n "
+ "dma_src_addr[2]: %llx; dma_dst_addr %llx, len: %x\n", dma_src_addr[0],
+ dma_src_addr[1], dma_src_addr[2], dma_dst_addr, len);
+
+ spin_lock_bh(&chan->lock);
+ slot_cnt = apm82181_chan_xor_slot_count(len, src_cnt, &slots_per_op);
+ sw_desc = apm82181_adma_alloc_slots(chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ apm82181_desc_init_xor(group_start, src_cnt, DMA_PREP_INTERRUPT);
+ /* Setup addresses */
+ while (src_cnt--)
+ apm82181_adma_set_src(group_start,
+ dma_src_addr[src_cnt], src_cnt);
+ apm82181_adma_set_dest(group_start, dma_dst_addr, 0);
+ apm82181_desc_set_byte_count(group_start, chan, len);
+ sw_desc->unmap_len = PAGE_SIZE;
+ } else {
+ rval = -EFAULT;
+ spin_unlock_bh(&chan->lock);
+ goto exit;
+ }
+ spin_unlock_bh(&chan->lock);
+
+ printk("Submit CDB...\n");
+ MEM_HEXDUMP(sw_desc->hw_desc, 96);
+ async_tx_ack(&sw_desc->async_tx);
+ sw_desc->async_tx.callback = apm82181_test_callback;
+ sw_desc->async_tx.callback_param = NULL;
+
+ init_completion(&apm82181_r5_test_comp);
+ apm82181_adma_tx_submit(&sw_desc->async_tx);
+ apm82181_adma_issue_pending(&chan->common);
+ //wait_for_completion(&apm82181_r5_test_comp);
+ /* wait for a while so that dma transaction finishes */
+ mdelay(100);
+ /* Now check if the test page zeroed */
+ a = page_address(pg_dest);
+ /* XOR result at destination */
+ MEM_HEXDUMP(a,50);
+ if ((*(u32*)a) == 0x00000000 && memcmp(a, a+4, PAGE_SIZE-4)==0) {
+ /* page dest XOR is corect as expected - RAID-5 enabled */
+ rval = 0;
+ } else {
+ /* RAID-5 was not enabled */
+ rval = -EINVAL;
+ }
+
+exit:
+ dma_unmap_page(chan->device->dev, dma_src_addr[0], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(chan->device->dev, dma_src_addr[1], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(chan->device->dev, dma_src_addr[2], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(chan->device->dev, dma_dst_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(pg_src[0]);
+ __free_page(pg_src[1]);
+ __free_page(pg_src[2]);
+ __free_page(pg_dest);
+ return rval;
+}
+
+
+/*
+ * apm82181_test_dma - test are RAID-5 capabilities enabled successfully.
+ * For this we just perform one WXOR operation with the same source
+ * and destination addresses, the GF-multiplier is 1; so if RAID-5
+ o/of_platform_driver_unregister(&apm82181_pdma_driver);
+ * capabilities are enabled then we'll get src/dst filled with zero.
+ */
+static int apm82181_test_dma (apm82181_ch_t *chan)
+{
+ apm82181_desc_t *sw_desc;
+ struct page *pg_src, *pg_dest;
+ char *a, *d;
+ dma_addr_t dma_src_addr;
+ dma_addr_t dma_dst_addr;
+ int rval = -EFAULT;
+ int len = PAGE_SIZE;
+
+ printk("PLB DMA channel %d memcpy testing\n",chan->device->id);
+ pg_src = alloc_page(GFP_KERNEL);
+ if (!pg_src)
+ return -ENOMEM;
+ pg_dest = alloc_page(GFP_KERNEL);
+ if (!pg_dest)
+ return -ENOMEM;
+ /* Fill the test page with ones */
+ memset(page_address(pg_src), 0x77, len);
+ memset(page_address(pg_dest), 0xa5, len);
+ a = page_address(pg_src);
+ printk("The virtual addr of src =%x\n", (unsigned int)a);
+ MEM_HEXDUMP(a,50);
+ a = page_address(pg_dest);
+ printk("The virtual addr of dest=%x\n", (unsigned int)a);
+ MEM_HEXDUMP(a,50);
+ dma_src_addr = dma_map_page(chan->device->dev, pg_src, 0, len,
+ DMA_BIDIRECTIONAL);
+ dma_dst_addr = dma_map_page(chan->device->dev, pg_dest, 0, len,
+ DMA_BIDIRECTIONAL);
+ printk("dma_src_addr: %llx; dma_dst_addr %llx\n", dma_src_addr, dma_dst_addr);
+
+ spin_lock_bh(&chan->lock);
+ sw_desc = apm82181_adma_alloc_slots(chan, 1, 1);
+ if (sw_desc) {
+ /* 1 src, 1 dst, int_ena */
+ apm82181_desc_init_memcpy(sw_desc, DMA_PREP_INTERRUPT);
+ //apm82181_desc_init_memcpy(sw_desc, 0);
+ /* Setup adresses */
+ apm82181_adma_set_src(sw_desc, dma_src_addr, 0);
+ apm82181_adma_set_dest(sw_desc, dma_dst_addr, 0);
+ apm82181_desc_set_byte_count(sw_desc, chan, len);
+ sw_desc->unmap_len = PAGE_SIZE;
+ } else {
+ rval = -EFAULT;
+ spin_unlock_bh(&chan->lock);
+ goto exit;
+ }
+ spin_unlock_bh(&chan->lock);
+
+ printk("Submit CDB...\n");
+ MEM_HEXDUMP(sw_desc->hw_desc, 96);
+ async_tx_ack(&sw_desc->async_tx);
+ sw_desc->async_tx.callback = apm82181_test_callback;
+ sw_desc->async_tx.callback_param = NULL;
+
+ init_completion(&apm82181_r5_test_comp);
+ apm82181_adma_tx_submit(&sw_desc->async_tx);
+ apm82181_adma_issue_pending(&chan->common);
+ //wait_for_completion(&apm82181_r5_test_comp);
+
+ a = page_address(pg_src);
+ d = page_address(pg_dest);
+ if (!memcmp(a, d, len)) {
+ rval = 0;
+ } else {
+ rval = -EINVAL;
+ }
+
+ a = page_address(pg_src);
+ printk("\nAfter DMA done:");
+ printk("\nsrc %x value:\n", (unsigned int)a);
+ MEM_HEXDUMP(a,96);
+ a = page_address(pg_dest);
+ printk("\ndest%x value:\n", (unsigned int)a);
+ MEM_HEXDUMP(a,96);
+
+exit:
+ __free_page(pg_src);
+ __free_page(pg_dest);
+ return rval;
+}
+
+static struct platform_driver apm82181_pdma_driver = {
+ .driver = {
+ .name = "apm82181_plb_dma",
+ .owner = THIS_MODULE,
+ .of_match_table = dma_4chan_match,
+ },
+ .probe = apm82181_pdma_probe,
+ //.remove = apm82181_pdma_remove,
+};
+struct platform_driver apm82181_dma_per_chan_driver = {
+ .driver = {
+ .name = "apm82181-dma-4channel",
+ .owner = THIS_MODULE,
+ .of_match_table = dma_per_chan_match,
+ },
+ .probe = apm82181_dma_per_chan_probe,
+};
+
+static int __init apm82181_adma_per_chan_init (void)
+{
+ int rval;
+ rval = platform_driver_register(&apm82181_dma_per_chan_driver);
+ return rval;
+}
+
+static int __init apm82181_adma_init (void)
+{
+ int rval;
+ struct proc_dir_entry *p;
+
+ rval = platform_driver_register(&apm82181_pdma_driver);
+
+ return rval;
+}
+
+#if 0
+static void __exit apm82181_adma_exit (void)
+{
+ of_unregister_platform_driver(&apm82181_pdma_driver);
+ return;
+}
+module_exit(apm82181_adma_exit);
+#endif
+
+module_init(apm82181_adma_per_chan_init);
+module_init(apm82181_adma_init);
+
+MODULE_AUTHOR("Tai Tri Nguyen<ttnguyen@appliedmicro.com>");
+MODULE_DESCRIPTION("APM82181 ADMA Engine Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ppc4xx/ppc460ex_4chan_dma.c b/drivers/dma/ppc4xx/ppc460ex_4chan_dma.c
new file mode 100644
index 00000000000..821e279e0b7
--- /dev/null
+++ b/drivers/dma/ppc4xx/ppc460ex_4chan_dma.c
@@ -0,0 +1,1110 @@
+/*
+ * Copyright(c) 2008 Applied Micro Circuits Corporation(AMCC). All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <asm/dcr-regs.h>
+#include <asm/dcr.h>
+#include "ppc460ex_4chan_dma.h"
+
+
+
+#ifdef DEBUG_TEST
+#define dma_pr printk
+#else
+#define dma_pr
+#endif
+#define TEST_SIZE 12
+
+
+ppc460ex_plb_dma_dev_t *adev;
+
+
+
+int ppc460ex_get_dma_channel(void)
+{
+ int i;
+ unsigned int status = 0;
+ status = mfdcr(DCR_DMA2P40_SR);
+
+ for(i=0; i<MAX_PPC460EX_DMA_CHANNELS; i++) {
+ if ((status & (1 >> (20+i))) == 0)
+ return i;
+ }
+ return -ENODEV;
+}
+
+
+int ppc460ex_get_dma_status(void)
+{
+ return (mfdcr(DCR_DMA2P40_SR));
+
+}
+
+
+int ppc460ex_set_src_addr(int ch_id, phys_addr_t src_addr)
+{
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk("%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+
+#ifdef PPC4xx_DMA_64BIT
+ mtdcr(DCR_DMA2P40_SAH0 + ch_id*8, src_addr >> 32);
+#endif
+ mtdcr(DCR_DMA2P40_SAL0 + ch_id*8, (u32)src_addr);
+
+ return DMA_STATUS_GOOD;
+}
+
+int ppc460ex_set_dst_addr(int ch_id, phys_addr_t dst_addr)
+{
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+#ifdef PPC4xx_DMA_64BIT
+ mtdcr(DCR_DMA2P40_DAH0 + ch_id*8, dst_addr >> 32);
+#endif
+ mtdcr(DCR_DMA2P40_DAL0 + ch_id*8, (u32)dst_addr);
+
+ return DMA_STATUS_GOOD;
+}
+
+
+
+/*
+ * Sets the dma mode for single DMA transfers only.
+ * For scatter/gather transfers, the mode is passed to the
+ * alloc_dma_handle() function as one of the parameters.
+ *
+ * The mode is simply saved and used later. This allows
+ * the driver to call set_dma_mode() and set_dma_addr() in
+ * any order.
+ *
+ * Valid mode values are:
+ *
+ * DMA_MODE_READ peripheral to memory
+ * DMA_MODE_WRITE memory to peripheral
+ * DMA_MODE_MM memory to memory
+ * DMA_MODE_MM_DEVATSRC device-paced memory to memory, device at src
+ * DMA_MODE_MM_DEVATDST device-paced memory to memory, device at dst
+ */
+int ppc460ex_set_dma_mode(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int mode)
+{
+
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk("%s: bad channel %d\n", __FUNCTION__, dma_chan->chan_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ dma_chan->mode = mode;
+ return DMA_STATUS_GOOD;
+}
+
+
+
+
+/*
+ * Sets the DMA Count register. Note that 'count' is in bytes.
+ * However, the DMA Count register counts the number of "transfers",
+ * where each transfer is equal to the bus width. Thus, count
+ * MUST be a multiple of the bus width.
+ */
+void ppc460ex_set_dma_count(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int count)
+{
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+//#ifdef DEBUG_4xxDMA
+
+ {
+ int error = 0;
+ switch (dma_chan->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (count & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if (count & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if (count & 0x7)
+ error = 1;
+ break;
+
+ case PW_128:
+ if (count & 0xf)
+ error = 1;
+ break;
+ default:
+ printk("set_dma_count: invalid bus width: 0x%x\n",
+ dma_chan->pwidth);
+ return;
+ }
+ if (error)
+ printk
+ ("Warning: set_dma_count count 0x%x bus width %d\n",
+ count, dma_chan->pwidth);
+ }
+//#endif
+ count = count >> dma_chan->shift;
+ //count = 10;
+ mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), count);
+
+}
+
+
+
+
+/*
+ * Enables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be enabled, if
+ * they were previously disabled.
+ */
+int ppc460ex_enable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ dma_chan->int_enable = 1;
+
+
+ control = mfdcr(DCR_DMA2P40_CR0);
+ control |= DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCR_DMA2P40_CR0, control);
+
+
+
+#if 1
+ control = mfdcr(DCR_DMA2P40_CTC0);
+ control |= DMA_CTC_TCIE | DMA_CTC_ETIE| DMA_CTC_EIE;
+ mtdcr(DCR_DMA2P40_CTC0, control);
+
+#endif
+
+
+ return DMA_STATUS_GOOD;
+
+}
+
+
+/*
+ * Disables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be disabled, if
+ * they were previously enabled.
+ */
+int ppc460ex_disable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ dma_chan->int_enable = 0;
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ control &= ~DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+
+ return DMA_STATUS_GOOD;
+}
+
+
+/*
+ * This function returns the channel configuration.
+ */
+int ppc460ex_get_channel_config(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id,
+ ppc460ex_plb_dma_ch_t *p_dma_ch)
+{
+ unsigned int polarity;
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ memcpy(p_dma_ch, dma_chan, sizeof(ppc460ex_plb_dma_ch_t));
+
+ polarity = mfdcr(DCR_DMA2P40_POL);
+
+ p_dma_ch->polarity = polarity & GET_DMA_POLARITY(ch_id);
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+
+ p_dma_ch->cp = GET_DMA_PRIORITY(control);
+ p_dma_ch->pwidth = GET_DMA_PW(control);
+ p_dma_ch->psc = GET_DMA_PSC(control);
+ p_dma_ch->pwc = GET_DMA_PWC(control);
+ p_dma_ch->phc = GET_DMA_PHC(control);
+ p_dma_ch->ce = GET_DMA_CE_ENABLE(control);
+ p_dma_ch->int_enable = GET_DMA_CIE_ENABLE(control);
+ p_dma_ch->shift = GET_DMA_PW(control);
+ p_dma_ch->pf = GET_DMA_PREFETCH(control);
+
+ return DMA_STATUS_GOOD;
+
+}
+
+/*
+ * Sets the priority for the DMA channel dmanr.
+ * Since this is setup by the hardware init function, this function
+ * can be used to dynamically change the priority of a channel.
+ *
+ * Acceptable priorities:
+ *
+ * PRIORITY_LOW
+ * PRIORITY_MID_LOW
+ * PRIORITY_MID_HIGH
+ * PRIORITY_HIGH
+ *
+ */
+int ppc460ex_set_channel_priority(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id,
+ unsigned int priority)
+{
+ unsigned int control;
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ if ((priority != PRIORITY_LOW) &&
+ (priority != PRIORITY_MID_LOW) &&
+ (priority != PRIORITY_MID_HIGH) && (priority != PRIORITY_HIGH)) {
+ printk("%s:bad priority: 0x%x\n", __FUNCTION__, priority);
+ }
+
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ control |= SET_DMA_PRIORITY(priority);
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Returns the width of the peripheral attached to this channel. This assumes
+ * that someone who knows the hardware configuration, boot code or some other
+ * init code, already set the width.
+ *
+ * The return value is one of:
+ * PW_8
+ * PW_16
+ * PW_32
+ * PW_64
+ *
+ * The function returns 0 on error.
+ */
+unsigned int ppc460ex_get_peripheral_width(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ return (GET_DMA_PW(control));
+}
+
+/*
+ * Enables the burst on the channel (BTEN bit in the control/count register)
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int ppc460ex_enable_burst(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int ctc;
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ ctc = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)) | DMA_CTC_BTEN;
+ mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), ctc);
+ return DMA_STATUS_GOOD;
+}
+
+
+/*
+ * Disables the burst on the channel (BTEN bit in the control/count register)
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int ppc460ex_disable_burst(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int ctc;
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ ctc = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)) &~ DMA_CTC_BTEN;
+ mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), ctc);
+ return DMA_STATUS_GOOD;
+}
+
+
+/*
+ * Sets the burst size (number of peripheral widths) for the channel
+ * (BSIZ bits in the control/count register))
+ * must be one of:
+ * DMA_CTC_BSIZ_2
+ * DMA_CTC_BSIZ_4
+ * DMA_CTC_BSIZ_8
+ * DMA_CTC_BSIZ_16
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int ppc460ex_set_burst_size(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id,
+ unsigned int bsize)
+{
+ unsigned int ctc;
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ ctc = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)) &~ DMA_CTC_BSIZ_MSK;
+ ctc |= (bsize & DMA_CTC_BSIZ_MSK);
+ mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), ctc);
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Returns the number of bytes left to be transferred.
+ * After a DMA transfer, this should return zero.
+ * Reading this while a DMA transfer is still in progress will return
+ * unpredictable results.
+ */
+int ppc460ex_get_dma_residue(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int count;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ count = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8));
+ count &= DMA_CTC_TC_MASK ;
+
+ return (count << dma_chan->shift);
+
+}
+
+
+/*
+ * Configures a DMA channel, including the peripheral bus width, if a
+ * peripheral is attached to the channel, the polarity of the DMAReq and
+ * DMAAck signals, etc. This information should really be setup by the boot
+ * code, since most likely the configuration won't change dynamically.
+ * If the kernel has to call this function, it's recommended that it's
+ * called from platform specific init code. The driver should not need to
+ * call this function.
+ */
+int ppc460ex_init_dma_channel(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id,
+ ppc460ex_plb_dma_ch_t *p_init)
+{
+ unsigned int polarity;
+ uint32_t control = 0;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+
+ DMA_MODE_READ = (unsigned long) DMA_TD; /* Peripheral to Memory */
+ DMA_MODE_WRITE = 0; /* Memory to Peripheral */
+
+ if (!p_init) {
+ printk("%s: NULL p_init\n", __FUNCTION__);
+ return DMA_STATUS_NULL_POINTER;
+ }
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#if DCR_DMA2P40_POL > 0
+ polarity = mfdcr(DCR_DMA2P40_POL);
+#else
+ polarity = 0;
+#endif
+
+ p_init->int_enable = 0;
+ p_init->buffer_enable = 1;
+ p_init->etd_output = 1;
+ p_init->tce_enable = 1;
+ p_init->pl = 0;
+ p_init->dai = 1;
+ p_init->sai = 1;
+ /* Duc Dang: make channel priority to 2, original is 3 */
+ p_init->cp = 2;
+ p_init->pwidth = PW_8;
+ p_init->psc = 0;
+ p_init->pwc = 0;
+ p_init->phc = 0;
+ p_init->pf = 1;
+
+
+ /* Setup the control register based on the values passed to
+ * us in p_init. Then, over-write the control register with this
+ * new value.
+ */
+#if 0
+ control |= SET_DMA_CONTROL;
+#endif
+ control = SET_DMA_CONTROL;
+ /* clear all polarity signals and then "or" in new signal levels */
+
+//PMB - Workaround
+ //control = 0x81A2CD80;
+ //control = 0x81A00180;
+
+
+ polarity &= ~GET_DMA_POLARITY(ch_id);
+ polarity |= p_init->polarity;
+
+#if DCR_DMA2P40_POL > 0
+ mtdcr(DCR_DMA2P40_POL, polarity);
+#endif
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+
+ /* save these values in our dma channel structure */
+ //memcpy(dma_chan, p_init, sizeof(ppc460ex_plb_dma_ch_t));
+ /*
+ * The peripheral width values written in the control register are:
+ * PW_8 0
+ * PW_16 1
+ * PW_32 2
+ * PW_64 3
+ * PW_128 4
+ *
+ * Since the DMA count register takes the number of "transfers",
+ * we need to divide the count sent to us in certain
+ * functions by the appropriate number. It so happens that our
+ * right shift value is equal to the peripheral width value.
+ */
+ dma_chan->shift = p_init->pwidth;
+ dma_chan->sai = p_init->sai;
+ dma_chan->dai = p_init->dai;
+ dma_chan->tce_enable = p_init->tce_enable;
+ dma_chan->mode = DMA_MODE_MM;
+ /*
+ * Save the control word for easy access.
+ */
+ dma_chan->control = control;
+ mtdcr(DCR_DMA2P40_SR, 0xffffffff);
+
+
+ return DMA_STATUS_GOOD;
+}
+
+
+int ppc460ex_enable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+ unsigned int status_bits[] = { DMA_CS0 | DMA_TS0 | DMA_CH0_ERR,
+ DMA_CS1 | DMA_TS1 | DMA_CH1_ERR};
+
+ if (dma_chan->in_use) {
+ printk("%s:enable_dma: channel %d in use\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_CHANNEL_NOTFREE;
+ }
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+#if 0
+ if (dma_chan->mode == DMA_MODE_READ) {
+ /* peripheral to memory */
+ ppc460ex_set_src_addr(ch_id, 0);
+ ppc460ex_set_dst_addr(ch_id, dma_chan->addr);
+ } else if (dma_chan->mode == DMA_MODE_WRITE) {
+ /* memory to peripheral */
+ ppc460ex_set_src_addr(ch_id, dma_chan->addr);
+ ppc460ex_set_dst_addr(ch_id, 0);
+ }
+#endif
+ /* for other xfer modes, the addresses are already set */
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ control &= ~(DMA_TM_MASK | DMA_TD); /* clear all mode bits */
+ if (dma_chan->mode == DMA_MODE_MM) {
+ /* software initiated memory to memory */
+ control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE;
+ control |= DMA_MODE_MM;
+ if (dma_chan->dai) {
+ control |= DMA_DAI;
+ }
+ if (dma_chan->sai) {
+ control |= DMA_SAI;
+ }
+ }
+
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+ /*
+ * Clear the CS, TS, RI bits for the channel from DMASR. This
+ * has been observed to happen correctly only after the mode and
+ * ETD/DCE bits in DMACRx are set above. Must do this before
+ * enabling the channel.
+ */
+ mtdcr(DCR_DMA2P40_SR, status_bits[ch_id]);
+ /*
+ * For device-paced transfers, Terminal Count Enable apparently
+ * must be on, and this must be turned on after the mode, etc.
+ * bits are cleared above (at least on Redwood-6).
+ */
+
+ if ((dma_chan->mode == DMA_MODE_MM_DEVATDST) ||
+ (dma_chan->mode == DMA_MODE_MM_DEVATSRC))
+ control |= DMA_TCE_ENABLE;
+
+ /*
+ * Now enable the channel.
+ */
+
+ control |= (dma_chan->mode | DMA_CE_ENABLE);
+ control |= DMA_BEN;
+ //control = 0xc4effec0;
+
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+ dma_chan->in_use = 1;
+ return 0;
+
+}
+
+
+void
+ppc460ex_disable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (!dma_chan->in_use) {
+ printk("disable_dma: channel %d not in use\n", ch_id);
+ return;
+ }
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk("disable_dma: bad channel: %d\n", ch_id);
+ return;
+ }
+
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ control &= ~DMA_CE_ENABLE;
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+
+ dma_chan->in_use = 0;
+}
+
+
+
+
+/*
+ * Clears the channel status bits
+ */
+int ppc460ex_clear_dma_status(unsigned int ch_id)
+{
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk("KERN_ERR %s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ mtdcr(DCR_DMA2P40_SR, ((u32)DMA_CH0_ERR | (u32)DMA_CS0 | (u32)DMA_TS0) >> ch_id);
+ return DMA_STATUS_GOOD;
+
+}
+
+
+/**
+ * ppc460ex_dma_eot_handler - end of transfer interrupt handler
+ */
+irqreturn_t ppc460ex_4chan_dma_eot_handler(int irq, void *data)
+{
+ unsigned int data_read = 0;
+ unsigned int try_cnt = 0;
+
+ //printk("transfer complete\n");
+ data_read = mfdcr(DCR_DMA2P40_SR);
+ //printk("%s: status 0x%08x\n", __FUNCTION__, data_read);
+
+ do{
+ //while bit 3 TC done is 0
+ data_read = mfdcr(DCR_DMA2P40_SR);
+ if (data_read & 0x00800000 ) {printk("test FAIL\n"); } //see if error bit is set
+ }while(((data_read & 0x80000000) != 0x80000000) && ++try_cnt <= 10);// TC is now 0
+
+ data_read = mfdcr(DCR_DMA2P40_SR);
+ while (data_read & 0x00000800){ //while channel is busy
+ data_read = mfdcr(DCR_DMA2P40_SR);
+ printk("%s: status for busy 0x%08x\n", __FUNCTION__, data_read);
+ }
+ mtdcr(DCR_DMA2P40_SR, 0xffffffff);
+
+
+
+ return IRQ_HANDLED;
+}
+
+
+
+static struct of_device_id dma_per_chan_match[] = {
+ {
+ .compatible = "amcc,dma-4channel",
+ },
+ {},
+};
+
+
+
+
+#if 0
+/*** test code ***/
+static int ppc460ex_dma_memcpy_self_test(ppc460ex_plb_dma_dev_t *device, unsigned int dma_ch_id)
+{
+ ppc460ex_plb_dma_ch_t p_init;
+ int res = 0, i;
+ unsigned int control;
+ phys_addr_t *src;
+ phys_addr_t *dest;
+
+ phys_addr_t *gap;
+
+ phys_addr_t dma_dest, dma_src;
+
+ src = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+ gap = kzalloc(200, GFP_KERNEL);
+ if (!gap)
+ return -ENOMEM;
+
+
+
+ dest = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!dest) {
+ kfree(src);
+ return -ENOMEM;
+ }
+
+ printk("src = 0x%08x\n", (unsigned int)src);
+ printk("gap = 0x%08x\n", (unsigned int)gap);
+ printk("dest = 0x%08x\n", (unsigned int)dest);
+
+ /* Fill in src buffer */
+ for (i = 0; i < TEST_SIZE; i++)
+ ((u8*)src)[i] = (u8)i;
+
+ printk("dump src\n");
+ DMA_HEXDUMP(src, TEST_SIZE);
+ DMA_HEXDUMP(dest, TEST_SIZE);
+#if 1
+ dma_src = dma_map_single(p_init.device->dev, src, TEST_SIZE,
+ DMA_TO_DEVICE);
+ dma_dest = dma_map_single(p_init.device->dev, dest, TEST_SIZE,
+ DMA_FROM_DEVICE);
+#endif
+ printk("%s:channel = %d chan 0x%08x\n", __FUNCTION__, device->chan[dma_ch_id]->chan_id,
+ (unsigned int)(device->chan));
+
+ p_init.polarity = 0;
+ p_init.pwidth = PW_32;
+ p_init.in_use = 0;
+ p_init.sai = 1;
+ p_init.dai = 1;
+ res = ppc460ex_init_dma_channel(device, dma_ch_id, &p_init);
+
+ if (res) {
+ printk("%32s: init_dma_channel return %d\n",
+ __FUNCTION__, res);
+ }
+ ppc460ex_clear_dma_status(dma_ch_id);
+
+ ppc460ex_set_src_addr(dma_ch_id, dma_src);
+ ppc460ex_set_dst_addr(dma_ch_id, dma_dest);
+
+ ppc460ex_set_dma_mode(device, dma_ch_id, DMA_MODE_MM);
+ ppc460ex_set_dma_count(device, dma_ch_id, TEST_SIZE);
+
+ res = ppc460ex_enable_dma_interrupt(device, dma_ch_id);
+ if (res) {
+ printk("%32s: en/disable_dma_interrupt\n",
+ __FUNCTION__);
+ }
+
+
+ if (dma_ch_id == 0)
+ control = mfdcr(DCR_DMA2P40_CR0);
+ else if (dma_ch_id == 1)
+ control = mfdcr(DCR_DMA2P40_CR1);
+
+
+ control &= ~(SET_DMA_BEN(1));
+ control &= ~(SET_DMA_PSC(3));
+ control &= ~(SET_DMA_PWC(0x3f));
+ control &= ~(SET_DMA_PHC(0x7));
+ control &= ~(SET_DMA_PL(1));
+
+
+
+ if (dma_ch_id == 0)
+ mtdcr(DCR_DMA2P40_CR0, control);
+ else if (dma_ch_id == 1)
+ mtdcr(DCR_DMA2P40_CR1, control);
+
+
+ ppc460ex_enable_dma(device, dma_ch_id);
+
+
+ if (memcmp(src, dest, TEST_SIZE)) {
+ printk("Self-test copy failed compare, disabling\n");
+ res = -ENODEV;
+ goto out;
+ }
+
+
+ return 0;
+
+ out: kfree(src);
+ kfree(dest);
+ return res;
+
+}
+
+
+
+static int test1(void)
+{
+ void *src, *dest;
+ void *src1, *dest1;
+ int i;
+ unsigned int chan;
+
+ src = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+
+ dest = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!dest) {
+ kfree(src);
+ return -ENOMEM;
+ }
+
+ src1 = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!src1)
+ return -ENOMEM;
+
+ dest1 = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!dest1) {
+ kfree(src1);
+ return -ENOMEM;
+ }
+
+ /* Fill in src buffer */
+ for (i = 0; i < TEST_SIZE; i++)
+ ((u8*)src)[i] = (u8)i;
+
+ /* Fill in src buffer */
+ for (i = 0; i < TEST_SIZE; i++)
+ ((u8*)src1)[i] = (u8)0xaa;
+
+#ifdef DEBUG_TEST
+ DMA_HEXDUMP(src, TEST_SIZE);
+ DMA_HEXDUMP(dest, TEST_SIZE);
+ DMA_HEXDUMP(src1, TEST_SIZE);
+ DMA_HEXDUMP(dest1, TEST_SIZE);
+#endif
+ chan = ppc460ex_get_dma_channel();
+
+#ifdef ENABLE_SGL
+ test_sgdma_memcpy(src, dest, src1, dest1, TEST_SIZE, chan);
+#endif
+ test_dma_memcpy(src, dest, TEST_SIZE, chan);
+
+
+ out: kfree(src);
+ kfree(dest);
+ kfree(src1);
+ kfree(dest1);
+
+ return 0;
+
+}
+#endif
+
+
+
+/*******************************************************************************
+ * Module Initialization Routine
+ *******************************************************************************
+ */
+int ppc460ex_dma_per_chan_probe(struct platform_device *ofdev)
+{
+ int ret=0;
+ //ppc460ex_plb_dma_dev_t *adev;
+ ppc460ex_plb_dma_ch_t *new_chan;
+ int err;
+
+
+
+ adev = dev_get_drvdata(ofdev->dev.parent);
+ BUG_ON(!adev);
+ /* create a device */
+ if ((new_chan = kzalloc(sizeof(*new_chan), GFP_KERNEL)) == NULL) {
+ printk("ERROR:No Free memory for allocating dma channels\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ err = of_address_to_resource(ofdev->dev.of_node,0,&new_chan->reg);
+ if (err) {
+ printk("ERROR:Can't get %s property reg\n", __FUNCTION__);
+ goto err;
+ }
+ new_chan->device = adev;
+ new_chan->reg_base = ioremap(new_chan->reg.start,new_chan->reg.end - new_chan->reg.start + 1);
+#if 1
+ printk("PPC460ex PLB DMA engine @0x%02X_%08X size %d\n",
+ (u32)(new_chan->reg.start >> 32),
+ (u32)new_chan->reg.start,
+ (u32)(new_chan->reg.end - new_chan->reg.start + 1));
+#endif
+
+ switch(new_chan->reg.start) {
+ case 0x100:
+ new_chan->chan_id = 0;
+ break;
+ case 0x108:
+ new_chan->chan_id = 1;
+ break;
+ case 0x110:
+ new_chan->chan_id = 2;
+ break;
+ case 0x118:
+ new_chan->chan_id = 3;
+ break;
+ }
+ new_chan->chan_id = ((new_chan->reg.start - 0x100)& 0xfff) >> 3;
+ printk("new_chan->chan_id 0x%x\n",new_chan->chan_id);
+ adev->chan[new_chan->chan_id] = new_chan;
+ printk("new_chan->chan->chan_id 0x%x\n",adev->chan[new_chan->chan_id]->chan_id);
+ //adev->chan[new_chan->chan_id]->reg_base = new_chan->reg_base;
+
+ return 0;
+
+ err:
+ return ret;
+
+}
+
+int ppc460ex_dma_4chan_probe(struct platform_device *ofdev)
+{
+ int ret=0, irq = 0;
+ //ppc460ex_plb_dma_dev_t *adev;
+ ppc460ex_plb_dma_ch_t *chan = NULL;
+ struct device_node *np = ofdev->dev.of_node;
+
+ /* create a device */
+ if ((adev = kzalloc(sizeof(*adev), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_adev_alloc;
+ }
+ adev->dev = &ofdev->dev;
+#if !defined(CONFIG_APM821xx)
+ err = of_address_to_resource(np,0,&adev->reg);
+ if(err) {
+ printk(KERN_ERR"Can't get %s property 'reg'\n",ofdev->node->full_name);
+ }
+#endif
+ printk(KERN_INFO"Probing AMCC DMA driver\n");
+#if !defined(CONFIG_APM821xx)
+ adev->reg_base = ioremap(adev->reg.start, adev->reg.end - adev->reg.start + 1);
+#endif
+
+#if 1
+ irq = of_irq_to_resource(np, 0, NULL);
+ if (irq >= 0) {
+ ret = request_irq(irq, ppc460ex_4chan_dma_eot_handler,
+ IRQF_DISABLED, "Peripheral DMA0-1", chan);
+ if (ret) {
+ ret = -EIO;
+ goto err_irq;
+ }
+ //irq = platform_get_irq(adev, 0);
+ /* only DMA engines have a separate err IRQ
+ * so it's Ok if irq < 0 in XOR case
+ */
+ } else
+ ret = -ENXIO;
+
+#if !defined(CONFIG_APM821xx)
+ printk("PPC4xx PLB DMA engine @0x%02X_%08X size %d IRQ %d \n",
+ (u32)(adev->reg.start >> 32),
+ (u32)adev->reg.start,
+ (u32)(adev->reg.end - adev->reg.start + 1),
+ irq);
+#else
+ printk("PPC4xx PLB DMA engine IRQ %d\n", irq);
+#endif
+#endif
+ dev_set_drvdata(&(ofdev->dev),adev);
+ of_platform_bus_probe(np,dma_per_chan_match,&ofdev->dev);
+
+
+ //ppc460ex_dma_memcpy_self_test(adev, 0);
+ //test1();
+
+
+ return 0;
+
+
+err_adev_alloc:
+ //release_mem_region(adev->reg.start, adev->reg.end - adev->reg.start);
+err_irq:
+ kfree(chan);
+
+ return ret;
+}
+
+
+static struct of_device_id dma_4chan_match[] = {
+ {
+ .compatible = "amcc,dma",
+ },
+ {},
+};
+
+struct platform_driver ppc460ex_dma_4chan_driver = {
+ .driver = {
+ .name = "plb_dma",
+ .owner = THIS_MODULE,
+ .of_match_table = dma_4chan_match,
+ },
+ .probe = ppc460ex_dma_4chan_probe,
+};
+
+struct platform_driver ppc460ex_dma_per_chan_driver = {
+ .driver = {
+ .name = "dma-4channel",
+ .owner = THIS_MODULE,
+ .of_match_table = dma_per_chan_match,
+ },
+ .probe = ppc460ex_dma_per_chan_probe,
+};
+
+
+static int __init mod_init (void)
+{
+ printk("%s:%d\n", __FUNCTION__, __LINE__);
+ return platform_driver_register(&ppc460ex_dma_4chan_driver);
+ printk("here 2\n");
+}
+
+static void __exit mod_exit(void)
+{
+ platform_driver_unregister(&ppc460ex_dma_4chan_driver);
+}
+
+static int __init ppc460ex_dma_per_chan_init (void)
+{
+ printk("%s:%d\n", __FUNCTION__, __LINE__);
+ return platform_driver_register(&ppc460ex_dma_per_chan_driver);
+ printk("here 3\n");
+}
+
+static void __exit ppc460ex_dma_per_chan_exit(void)
+{
+ platform_driver_unregister(&ppc460ex_dma_per_chan_driver);
+}
+
+subsys_initcall(ppc460ex_dma_per_chan_init);
+subsys_initcall(mod_init);
+
+//module_exit(mod_exit);
+
+//module_exit(ppc460ex_dma_per_chan_exit);
+
+MODULE_DESCRIPTION("AMCC PPC460EX 4 channel Engine Driver");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL_GPL(ppc460ex_get_dma_status);
+EXPORT_SYMBOL_GPL(ppc460ex_set_src_addr);
+EXPORT_SYMBOL_GPL(ppc460ex_set_dst_addr);
+EXPORT_SYMBOL_GPL(ppc460ex_set_dma_mode);
+EXPORT_SYMBOL_GPL(ppc460ex_set_dma_count);
+EXPORT_SYMBOL_GPL(ppc460ex_enable_dma_interrupt);
+EXPORT_SYMBOL_GPL(ppc460ex_init_dma_channel);
+EXPORT_SYMBOL_GPL(ppc460ex_enable_dma);
+EXPORT_SYMBOL_GPL(ppc460ex_disable_dma);
+EXPORT_SYMBOL_GPL(ppc460ex_clear_dma_status);
+EXPORT_SYMBOL_GPL(ppc460ex_get_dma_residue);
+EXPORT_SYMBOL_GPL(ppc460ex_disable_dma_interrupt);
+EXPORT_SYMBOL_GPL(ppc460ex_get_channel_config);
+EXPORT_SYMBOL_GPL(ppc460ex_set_channel_priority);
+EXPORT_SYMBOL_GPL(ppc460ex_get_peripheral_width);
+EXPORT_SYMBOL_GPL(ppc460ex_enable_burst);
+EXPORT_SYMBOL_GPL(ppc460ex_disable_burst);
+EXPORT_SYMBOL_GPL(ppc460ex_set_burst_size);
+
+/************************************************************************/
diff --git a/drivers/dma/ppc4xx/ppc460ex_4chan_dma.h b/drivers/dma/ppc4xx/ppc460ex_4chan_dma.h
new file mode 100644
index 00000000000..c9448f34de4
--- /dev/null
+++ b/drivers/dma/ppc4xx/ppc460ex_4chan_dma.h
@@ -0,0 +1,531 @@
+
+
+#include <linux/types.h>
+
+
+
+
+#define DMA_HEXDUMP(b, l) \
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, (b), (l), false);
+
+
+#define MAX_PPC460EX_DMA_CHANNELS 4
+
+
+#define DCR_DMA0_BASE 0x200
+#define DCR_DMA1_BASE 0x208
+#define DCR_DMA2_BASE 0x210
+#define DCR_DMA3_BASE 0x218
+#define DCR_DMASR_BASE 0x220
+
+
+
+
+
+
+/* DMA Registers */
+#define DCR_DMA2P40_CR0 (DCR_DMA0_BASE + 0x0) /* DMA Channel Control 0 */
+#define DCR_DMA2P40_CTC0 (DCR_DMA0_BASE + 0x1) /* DMA Count 0 */
+#define DCR_DMA2P40_SAH0 (DCR_DMA0_BASE + 0x2) /* DMA Src Addr High 0 */
+#define DCR_DMA2P40_SAL0 (DCR_DMA0_BASE + 0x3) /* DMA Src Addr Low 0 */
+#define DCR_DMA2P40_DAH0 (DCR_DMA0_BASE + 0x4) /* DMA Dest Addr High 0 */
+#define DCR_DMA2P40_DAL0 (DCR_DMA0_BASE + 0x5) /* DMA Dest Addr Low 0 */
+#define DCR_DMA2P40_SGH0 (DCR_DMA0_BASE + 0x6) /* DMA SG Desc Addr High 0 */
+#define DCR_DMA2P40_SGL0 (DCR_DMA0_BASE + 0x7) /* DMA SG Desc Addr Low 0 */
+
+#define DCR_DMA2P40_CR1 (DCR_DMA1_BASE + 0x0) /* DMA Channel Control 1 */
+#define DCR_DMA2P40_CTC1 (DCR_DMA1_BASE + 0x1) /* DMA Count 1 */
+#define DCR_DMA2P40_SAH1 (DCR_DMA1_BASE + 0x2) /* DMA Src Addr High 1 */
+#define DCR_DMA2P40_SAL1 (DCR_DMA1_BASE + 0x3) /* DMA Src Addr Low 1 */
+#define DCR_DMA2P40_DAH1 (DCR_DMA1_BASE + 0x4) /* DMA Dest Addr High 1 */
+#define DCR_DMA2P40_DAL1 (DCR_DMA1_BASE + 0x5) /* DMA Dest Addr Low 1 */
+#define DCR_DMA2P40_SGH1 (DCR_DMA1_BASE + 0x6) /* DMA SG Desc Addr High 1 */
+#define DCR_DMA2P40_SGL1 (DCR_DMA1_BASE + 0x7) /* DMA SG Desc Addr Low 1 */
+
+#define DCR_DMA2P40_CR2 (DCR_DMA2_BASE + 0x0) /* DMA Channel Control 2 */
+#define DCR_DMA2P40_CTC2 (DCR_DMA2_BASE + 0x1) /* DMA Count 2 */
+#define DCR_DMA2P40_SAH2 (DCR_DMA2_BASE + 0x2) /* DMA Src Addr High 2 */
+#define DCR_DMA2P40_SAL2 (DCR_DMA2_BASE + 0x3) /* DMA Src Addr Low 2 */
+#define DCR_DMA2P40_DAH2 (DCR_DMA2_BASE + 0x4) /* DMA Dest Addr High 2 */
+#define DCR_DMA2P40_DAL2 (DCR_DMA2_BASE + 0x5) /* DMA Dest Addr Low 2 */
+#define DCR_DMA2P40_SGH2 (DCR_DMA2_BASE + 0x6) /* DMA SG Desc Addr High 2 */
+#define DCR_DMA2P40_SGL2 (DCR_DMA2_BASE + 0x7) /* DMA SG Desc Addr Low 2 */
+
+#define DCR_DMA2P40_CR3 (DCR_DMA3_BASE + 0x0) /* DMA Channel Control 3 */
+#define DCR_DMA2P40_CTC3 (DCR_DMA3_BASE + 0x1) /* DMA Count 3 */
+#define DCR_DMA2P40_SAH3 (DCR_DMA3_BASE + 0x2) /* DMA Src Addr High 3 */
+#define DCR_DMA2P40_SAL3 (DCR_DMA3_BASE + 0x3) /* DMA Src Addr Low 3 */
+#define DCR_DMA2P40_DAH3 (DCR_DMA3_BASE + 0x4) /* DMA Dest Addr High 3 */
+#define DCR_DMA2P40_DAL3 (DCR_DMA3_BASE + 0x5) /* DMA Dest Addr Low 3 */
+#define DCR_DMA2P40_SGH3 (DCR_DMA3_BASE + 0x6) /* DMA SG Desc Addr High 3 */
+#define DCR_DMA2P40_SGL3 (DCR_DMA3_BASE + 0x7) /* DMA SG Desc Addr Low 3 */
+
+#define DCR_DMA2P40_SR (DCR_DMASR_BASE + 0x0) /* DMA Status Register */
+#define DCR_DMA2P40_SGC (DCR_DMASR_BASE + 0x3) /* DMA Scatter/Gather Command */
+#define DCR_DMA2P40_SLP (DCR_DMASR_BASE + 0x5) /* DMA Sleep Register */
+#define DCR_DMA2P40_POL (DCR_DMASR_BASE + 0x6) /* DMA Polarity Register */
+
+
+
+/*
+ * Function return status codes
+ * These values are used to indicate whether or not the function
+ * call was successful, or a bad/invalid parameter was passed.
+ */
+#define DMA_STATUS_GOOD 0
+#define DMA_STATUS_BAD_CHANNEL 1
+#define DMA_STATUS_BAD_HANDLE 2
+#define DMA_STATUS_BAD_MODE 3
+#define DMA_STATUS_NULL_POINTER 4
+#define DMA_STATUS_OUT_OF_MEMORY 5
+#define DMA_STATUS_SGL_LIST_EMPTY 6
+#define DMA_STATUS_GENERAL_ERROR 7
+#define DMA_STATUS_CHANNEL_NOTFREE 8
+
+#define DMA_CHANNEL_BUSY 0x80000000
+
+/*
+ * These indicate status as returned from the DMA Status Register.
+ */
+#define DMA_STATUS_NO_ERROR 0
+#define DMA_STATUS_CS 1 /* Count Status */
+#define DMA_STATUS_TS 2 /* Transfer Status */
+#define DMA_STATUS_DMA_ERROR 3 /* DMA Error Occurred */
+#define DMA_STATUS_DMA_BUSY 4 /* The channel is busy */
+
+/*
+ * DMA Channel Control Registers
+ */
+#ifdef CONFIG_44x
+#define PPC4xx_DMA_64BIT
+#define DMA_CR_OFFSET 1
+#else
+#define DMA_CR_OFFSET 0
+#endif
+
+#define DMA_CE_ENABLE (1<<31) /* DMA Channel Enable */
+#define SET_DMA_CE_ENABLE(x) (((x)&0x1)<<31)
+#define GET_DMA_CE_ENABLE(x) (((x)&DMA_CE_ENABLE)>>31)
+
+#define DMA_CIE_ENABLE (1<<30) /* DMA Channel Interrupt Enable */
+#define SET_DMA_CIE_ENABLE(x) (((x)&0x1)<<30)
+#define GET_DMA_CIE_ENABLE(x) (((x)&DMA_CIE_ENABLE)>>30)
+
+#define DMA_TD (1<<29)
+#define SET_DMA_TD(x) (((x)&0x1)<<29)
+#define GET_DMA_TD(x) (((x)&DMA_TD)>>29)
+
+#define DMA_PL (1<<28) /* Peripheral Location */
+#define SET_DMA_PL(x) (((x)&0x1)<<28)
+#define GET_DMA_PL(x) (((x)&DMA_PL)>>28)
+
+#define EXTERNAL_PERIPHERAL 0
+#define INTERNAL_PERIPHERAL 1
+
+#define SET_DMA_PW(x) (((x)&0x7)<<(26-DMA_CR_OFFSET)) /* Peripheral Width */
+#define DMA_PW_MASK SET_DMA_PW(7)
+#define PW_8 0
+#define PW_16 1
+#define PW_32 2
+#define PW_64 3
+#define PW_128 4
+
+
+#define GET_DMA_PW(x) (((x)&DMA_PW_MASK)>>(26-DMA_CR_OFFSET))
+
+#define DMA_DAI (1<<(25-DMA_CR_OFFSET)) /* Destination Address Increment */
+#define SET_DMA_DAI(x) (((x)&0x1)<<(25-DMA_CR_OFFSET))
+
+#define DMA_SAI (1<<(24-DMA_CR_OFFSET)) /* Source Address Increment */
+#define SET_DMA_SAI(x) (((x)&0x1)<<(24-DMA_CR_OFFSET))
+
+#define DMA_BEN (1<<(23-DMA_CR_OFFSET)) /* Buffer Enable */
+#define SET_DMA_BEN(x) (((x)&0x1)<<(23-DMA_CR_OFFSET))
+
+#define SET_DMA_TM(x) (((x)&0x3)<<(21-DMA_CR_OFFSET)) /* Transfer Mode */
+#define DMA_TM_MASK SET_DMA_TM(3)
+#define TM_PERIPHERAL 0 /* Peripheral */
+#define TM_RESERVED 1 /* Reserved */
+#define TM_S_MM 2 /* Memory to Memory */
+#define TM_D_MM 3 /* Device Paced Memory to Memory */
+#define GET_DMA_TM(x) (((x)&DMA_TM_MASK)>>(21-DMA_CR_OFFSET))
+
+#define SET_DMA_PSC(x) (((x)&0x3)<<(19-DMA_CR_OFFSET)) /* Peripheral Setup Cycles */
+#define DMA_PSC_MASK SET_DMA_PSC(3)
+#define GET_DMA_PSC(x) (((x)&DMA_PSC_MASK)>>(19-DMA_CR_OFFSET))
+
+#define SET_DMA_PWC(x) (((x)&0x3F)<<(13-DMA_CR_OFFSET)) /* Peripheral Wait Cycles */
+#define DMA_PWC_MASK SET_DMA_PWC(0x3F)
+#define GET_DMA_PWC(x) (((x)&DMA_PWC_MASK)>>(13-DMA_CR_OFFSET))
+
+#define SET_DMA_PHC(x) (((x)&0x7)<<(10-DMA_CR_OFFSET)) /* Peripheral Hold Cycles */
+#define DMA_PHC_MASK SET_DMA_PHC(0x7)
+#define GET_DMA_PHC(x) (((x)&DMA_PHC_MASK)>>(10-DMA_CR_OFFSET))
+
+#define DMA_ETD_OUTPUT (1<<(9-DMA_CR_OFFSET)) /* EOT pin is a TC output */
+#define SET_DMA_ETD(x) (((x)&0x1)<<(9-DMA_CR_OFFSET))
+
+#define DMA_TCE_ENABLE (1<<(8-DMA_CR_OFFSET))
+#define SET_DMA_TCE(x) (((x)&0x1)<<(8-DMA_CR_OFFSET))
+
+#define DMA_DEC (1<<(2)) /* Address Decrement */
+#define SET_DMA_DEC(x) (((x)&0x1)<<2)
+#define GET_DMA_DEC(x) (((x)&DMA_DEC)>>2)
+
+
+/*
+ * Transfer Modes
+ * These modes are defined in a way that makes it possible to
+ * simply "or" in the value in the control register.
+ */
+
+#define DMA_MODE_MM (SET_DMA_TM(TM_S_MM)) /* memory to memory */
+
+ /* Device-paced memory to memory, */
+ /* device is at source address */
+#define DMA_MODE_MM_DEVATSRC (DMA_TD | SET_DMA_TM(TM_D_MM))
+
+ /* Device-paced memory to memory, */
+ /* device is at destination address */
+#define DMA_MODE_MM_DEVATDST (SET_DMA_TM(TM_D_MM))
+
+#define SGL_LIST_SIZE 16384
+#define DMA_PPC4xx_SIZE SGL_LIST_SIZE
+
+#define SET_DMA_PRIORITY(x) (((x)&0x3)<<(6-DMA_CR_OFFSET)) /* DMA Channel Priority */
+#define DMA_PRIORITY_MASK SET_DMA_PRIORITY(3)
+#define PRIORITY_LOW 0
+#define PRIORITY_MID_LOW 1
+#define PRIORITY_MID_HIGH 2
+#define PRIORITY_HIGH 3
+#define GET_DMA_PRIORITY(x) (((x)&DMA_PRIORITY_MASK)>>(6-DMA_CR_OFFSET))
+
+
+#define SET_DMA_PREFETCH(x) (((x)&0x3)<<(4-DMA_CR_OFFSET)) /* Memory Read Prefetch */
+#define DMA_PREFETCH_MASK SET_DMA_PREFETCH(3)
+#define PREFETCH_1 0 /* Prefetch 1 Double Word */
+#define PREFETCH_2 1
+#define PREFETCH_4 2
+#define GET_DMA_PREFETCH(x) (((x)&DMA_PREFETCH_MASK)>>(4-DMA_CR_OFFSET))
+
+#define DMA_PCE (1<<(3-DMA_CR_OFFSET)) /* Parity Check Enable */
+#define SET_DMA_PCE(x) (((x)&0x1)<<(3-DMA_CR_OFFSET))
+#define GET_DMA_PCE(x) (((x)&DMA_PCE)>>(3-DMA_CR_OFFSET))
+
+/*
+ * DMA Polarity Configuration Register
+ */
+#define DMAReq_ActiveLow(chan) (1<<(31-(chan*3)))
+#define DMAAck_ActiveLow(chan) (1<<(30-(chan*3)))
+#define EOT_ActiveLow(chan) (1<<(29-(chan*3))) /* End of Transfer */
+
+/*
+ * DMA Sleep Mode Register
+ */
+#define SLEEP_MODE_ENABLE (1<<21)
+
+/*
+ * DMA Status Register
+ */
+#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
+#define DMA_CS1 (1<<30)
+#define DMA_CS2 (1<<29)
+#define DMA_CS3 (1<<28)
+
+#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
+#define DMA_TS1 (1<<26)
+#define DMA_TS2 (1<<25)
+#define DMA_TS3 (1<<24)
+
+#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
+#define DMA_CH1_ERR (1<<22)
+#define DMA_CH2_ERR (1<<21)
+#define DMA_CH3_ERR (1<<20)
+
+#define DMA_IN_DMA_REQ0 (1<<19) /* Internal DMA Request is pending */
+#define DMA_IN_DMA_REQ1 (1<<18)
+#define DMA_IN_DMA_REQ2 (1<<17)
+#define DMA_IN_DMA_REQ3 (1<<16)
+
+#define DMA_EXT_DMA_REQ0 (1<<15) /* External DMA Request is pending */
+#define DMA_EXT_DMA_REQ1 (1<<14)
+#define DMA_EXT_DMA_REQ2 (1<<13)
+#define DMA_EXT_DMA_REQ3 (1<<12)
+
+#define DMA_CH0_BUSY (1<<11) /* DMA Channel 0 Busy */
+#define DMA_CH1_BUSY (1<<10)
+#define DMA_CH2_BUSY (1<<9)
+#define DMA_CH3_BUSY (1<<8)
+
+#define DMA_SG0 (1<<7) /* DMA Channel 0 Scatter/Gather in progress */
+#define DMA_SG1 (1<<6)
+#define DMA_SG2 (1<<5)
+#define DMA_SG3 (1<<4)
+
+/* DMA Channel Count Register */
+#define DMA_CTC_TCIE (1<<29) /* Terminal Count Interrupt Enable */
+#define DMA_CTC_ETIE (1<<28) /* EOT Interupt Enable */
+#define DMA_CTC_EIE (1<<27) /* Error Interrupt Enable */
+#define DMA_CTC_BTEN (1<<23) /* Burst Enable/Disable bit */
+#define DMA_CTC_BSIZ_MSK (3<<21) /* Mask of the Burst size bits */
+#define DMA_CTC_BSIZ_2 (0)
+#define DMA_CTC_BSIZ_4 (1<<21)
+#define DMA_CTC_BSIZ_8 (2<<21)
+#define DMA_CTC_BSIZ_16 (3<<21)
+#define DMA_CTC_TC_MASK 0xFFFFF
+
+/*
+ * DMA SG Command Register
+ */
+#define SSG_ENABLE(chan) (1<<(31-chan)) /* Start Scatter Gather */
+#define SSG_MASK_ENABLE(chan) (1<<(15-chan)) /* Enable writing to SSG0 bit */
+
+
+/*
+ * DMA Scatter/Gather Descriptor Bit fields
+ */
+#define SG_LINK (1<<31) /* Link */
+#define SG_TCI_ENABLE (1<<29) /* Enable Terminal Count Interrupt */
+#define SG_ETI_ENABLE (1<<28) /* Enable End of Transfer Interrupt */
+#define SG_ERI_ENABLE (1<<27) /* Enable Error Interrupt */
+#define SG_COUNT_MASK 0xFFFF /* Count Field */
+
+#define SET_DMA_CONTROL \
+ (SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \
+ SET_DMA_BEN(p_init->buffer_enable) | /* buffer enable */\
+ SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */ \
+ SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */ \
+ SET_DMA_PL(p_init->pl) | /* peripheral location */ \
+ SET_DMA_DAI(p_init->dai) | /* dest addr increment */ \
+ SET_DMA_SAI(p_init->sai) | /* src addr increment */ \
+ SET_DMA_PRIORITY(p_init->cp) | /* channel priority */ \
+ SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */ \
+ SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */ \
+ SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */ \
+ SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */ \
+ SET_DMA_PREFETCH(p_init->pf) /* read prefetch */)
+
+#define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) | DMAAck_ActiveLow(chan) | EOT_ActiveLow(chan))
+
+
+/**
+ * struct ppc460ex_dma_device - internal representation of an DMA device
+ * @pdev: Platform device
+ * @id: HW DMA Device selector
+ * @dma_desc_pool: base of DMA descriptor region (DMA address)
+ * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
+ * @common: embedded struct dma_device
+ */
+typedef struct ppc460ex_plb_dma_device {
+ //struct platform_device *pdev;
+ void __iomem *reg_base;
+ struct device *dev;
+ struct resource reg; /* Resource for register */
+ int id;
+ struct ppc460ex_plb_dma_chan *chan[MAX_PPC460EX_DMA_CHANNELS];
+ wait_queue_head_t queue;
+} ppc460ex_plb_dma_dev_t;
+
+typedef uint32_t sgl_handle_t;
+/**
+ * struct ppc460ex_dma_chan - internal representation of an ADMA channel
+ * @lock: serializes enqueue/dequeue operations to the slot pool
+ * @device: parent device
+ * @chain: device chain view of the descriptors
+ * @common: common dmaengine channel object members
+ * @all_slots: complete domain of slots usable by the channel
+ * @reg: Resource for register
+ * @pending: allows batching of hardware operations
+ * @completed_cookie: identifier for the most recently completed operation
+ * @slots_allocated: records the actual size of the descriptor slot pool
+ * @hw_chain_inited: h/w descriptor chain initialization flag
+ * @irq_tasklet: bottom half where ppc460ex_adma_slot_cleanup runs
+ * @needs_unmap: if buffers should not be unmapped upon final processing
+ */
+typedef struct ppc460ex_plb_dma_chan {
+ void __iomem *reg_base;
+ struct ppc460ex_plb_dma_device *device;
+ struct timer_list cleanup_watchdog;
+ struct resource reg; /* Resource for register */
+ unsigned int chan_id;
+ struct tasklet_struct irq_tasklet;
+ sgl_handle_t *phandle;
+ unsigned short in_use; /* set when channel is being used, clr when
+ * available.
+ */
+ /*
+ * Valid polarity settings:
+ * DMAReq_ActiveLow(n)
+ * DMAAck_ActiveLow(n)
+ * EOT_ActiveLow(n)
+ *
+ * n is 0 to max dma chans
+ */
+ unsigned int polarity;
+
+ char buffer_enable; /* Boolean: buffer enable */
+ char tce_enable; /* Boolean: terminal count enable */
+ char etd_output; /* Boolean: eot pin is a tc output */
+ char pce; /* Boolean: parity check enable */
+
+ /*
+ * Peripheral location:
+ * INTERNAL_PERIPHERAL (UART0 on the 405GP)
+ * EXTERNAL_PERIPHERAL
+ */
+ char pl; /* internal/external peripheral */
+
+ /*
+ * Valid pwidth settings:
+ * PW_8
+ * PW_16
+ * PW_32
+ * PW_64
+ */
+ unsigned int pwidth;
+
+ char dai; /* Boolean: dst address increment */
+ char sai; /* Boolean: src address increment */
+
+ /*
+ * Valid psc settings: 0-3
+ */
+ unsigned int psc; /* Peripheral Setup Cycles */
+
+ /*
+ * Valid pwc settings:
+ * 0-63
+ */
+ unsigned int pwc; /* Peripheral Wait Cycles */
+
+ /*
+ * Valid phc settings:
+ * 0-7
+ */
+ unsigned int phc; /* Peripheral Hold Cycles */
+
+ /*
+ * Valid cp (channel priority) settings:
+ * PRIORITY_LOW
+ * PRIORITY_MID_LOW
+ * PRIORITY_MID_HIGH
+ * PRIORITY_HIGH
+ */
+ unsigned int cp; /* channel priority */
+
+ /*
+ * Valid pf (memory read prefetch) settings:
+ *
+ * PREFETCH_1
+ * PREFETCH_2
+ * PREFETCH_4
+ */
+ unsigned int pf; /* memory read prefetch */
+
+ /*
+ * Boolean: channel interrupt enable
+ * NOTE: for sgl transfers, only the last descriptor will be setup to
+ * interrupt.
+ */
+ char int_enable;
+
+ char shift; /* easy access to byte_count shift, based on */
+ /* the width of the channel */
+
+ uint32_t control; /* channel control word */
+
+ /* These variabled are used ONLY in single dma transfers */
+ unsigned int mode; /* transfer mode */
+ phys_addr_t addr;
+ char ce; /* channel enable */
+ char int_on_final_sg;/* for scatter/gather - only interrupt on last sg */
+
+} ppc460ex_plb_dma_ch_t;
+
+/*
+ * PPC44x DMA implementations have a slightly different
+ * descriptor layout. Probably moved about due to the
+ * change to 64-bit addresses and link pointer. I don't
+ * know why they didn't just leave control_count after
+ * the dst_addr.
+ */
+#ifdef PPC4xx_DMA_64BIT
+typedef struct {
+ uint32_t control;
+ uint32_t control_count;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ phys_addr_t next;
+} ppc_sgl_t;
+#else
+typedef struct {
+ uint32_t control;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ uint32_t control_count;
+ uint32_t next;
+} ppc_sgl_t;
+#endif
+
+
+
+typedef struct {
+ unsigned int ch_id;
+ uint32_t control; /* channel ctrl word; loaded from each descrptr */
+ uint32_t sgl_control; /* LK, TCI, ETI, and ERI bits in sgl descriptor */
+ dma_addr_t dma_addr; /* dma (physical) address of this list */
+ dma_addr_t dummy; /*Dummy variable to allow quad word alignment*/
+ ppc_sgl_t *phead;
+ dma_addr_t phead_dma;
+ ppc_sgl_t *ptail;
+ dma_addr_t ptail_dma;
+} sgl_list_info_t;
+
+typedef struct {
+ phys_addr_t *src_addr;
+ phys_addr_t *dst_addr;
+ phys_addr_t dma_src_addr;
+ phys_addr_t dma_dst_addr;
+} pci_alloc_desc_t;
+
+#define PPC460EX_DMA_SGXFR_COMPLETE(id) (!((1 << (11-id)) & mfdcr(DCR_DMA2P40_SR)))
+#define PPC460EX_DMA_CHAN_BUSY(id) ( (1 << (11-id)) & mfdcr(DCR_DMA2P40_SR) )
+#define DMA_STATUS(id) (mfdcr(DCR_DMA2P40_SR))
+#define CLEAR_DMA_STATUS(id) (mtdcr(DCR_DMA2P40_SR, 0xFFFFFFFF))
+#define PPC460EX_DMA_SGSTAT_FREE(id) (!((1 << (7-id)) & mfdcr(DCR_DMA2P40_SR)) )
+#define PPC460EX_DMA_TC_REACHED(id) ( (1 << (31-id)) & mfdcr(DCR_DMA2P40_SR) )
+#define PPC460EX_DMA_CHAN_XFR_COMPLETE(id) ( (!PPC460EX_DMA_CHAN_BUSY(id)) && (PPC460EX_DMA_TC_REACHED(id)) )
+#define PPC460EX_DMA_CHAN_SGXFR_COMPLETE(id) ( (!PPC460EX_DMA_CHAN_BUSY(id)) && PPC460EX_DMA_SGSTAT_FREE(id) )
+#define PPC460EX_DMA_SG_IN_PROGRESS(id) ( (1 << (7-id)) | (1 << (11-id)) )
+#define PPC460EX_DMA_SG_OP_COMPLETE(id) ( (PPC460EX_DMA_SG_IN_PROGRESS(id) & DMA_STATUS(id) ) == 0)
+
+extern ppc460ex_plb_dma_dev_t *adev;
+int ppc460ex_init_dma_channel(ppc460ex_plb_dma_dev_t *adev,
+ unsigned int ch_id,
+ ppc460ex_plb_dma_ch_t *p_init);
+
+int ppc460ex_set_src_addr(int ch_id, phys_addr_t src_addr);
+
+int ppc460ex_set_dst_addr(int ch_id, phys_addr_t dst_addr);
+
+int ppc460ex_set_dma_mode(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int mode);
+
+void ppc460ex_set_dma_count(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int count);
+
+int ppc460ex_enable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id);
+
+int ppc460ex_enable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id);
+
+int ppc460ex_get_dma_channel(void);
+
+void ppc460ex_disable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id);
+
+int ppc460ex_clear_dma_status(unsigned int ch_id);
+
+#if 0
+extern int test_dma_memcpy(void *src, void *dst, unsigned int length, unsigned int dma_ch);
+
+extern int test_sgdma_memcpy(void *src, void *dst, void *src1, void *dst1,
+ unsigned int length, unsigned int dma_ch);
+#endif
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
new file mode 100644
index 00000000000..82c923146e4
--- /dev/null
+++ b/drivers/dma/qcom_bam_dma.c
@@ -0,0 +1,1111 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * QCOM BAM DMA engine driver
+ *
+ * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
+ * peripherals on the MSM 8x74. The configuration of the channels are dependent
+ * on the way they are hard wired to that specific peripheral. The peripheral
+ * device tree entries specify the configuration of each channel.
+ *
+ * The DMA controller requires the use of external memory for storage of the
+ * hardware descriptors for each channel. The descriptor FIFO is accessed as a
+ * circular buffer and operations are managed according to the offset within the
+ * FIFO. After pipe/channel reset, all of the pipe registers and internal state
+ * are back to defaults.
+ *
+ * During DMA operations, we write descriptors to the FIFO, being careful to
+ * handle wrapping and then write the last FIFO offset to that channel's
+ * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register
+ * indicates the current FIFO offset that is being processed, so there is some
+ * indication of where the hardware is currently working.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+struct bam_desc_hw {
+ u32 addr; /* Buffer physical address */
+ u16 size; /* Buffer size in bytes */
+ u16 flags;
+};
+
+#define DESC_FLAG_INT BIT(15)
+#define DESC_FLAG_EOT BIT(14)
+#define DESC_FLAG_EOB BIT(13)
+
+struct bam_async_desc {
+ struct virt_dma_desc vd;
+
+ u32 num_desc;
+ u32 xfer_len;
+ struct bam_desc_hw *curr_desc;
+
+ enum dma_transfer_direction dir;
+ size_t length;
+ struct bam_desc_hw desc[0];
+};
+
+#define BAM_CTRL 0x0000
+#define BAM_REVISION 0x0004
+#define BAM_SW_REVISION 0x0080
+#define BAM_NUM_PIPES 0x003C
+#define BAM_TIMER 0x0040
+#define BAM_TIMER_CTRL 0x0044
+#define BAM_DESC_CNT_TRSHLD 0x0008
+#define BAM_IRQ_SRCS 0x000C
+#define BAM_IRQ_SRCS_MSK 0x0010
+#define BAM_IRQ_SRCS_UNMASKED 0x0030
+#define BAM_IRQ_STTS 0x0014
+#define BAM_IRQ_CLR 0x0018
+#define BAM_IRQ_EN 0x001C
+#define BAM_CNFG_BITS 0x007C
+#define BAM_IRQ_SRCS_EE(ee) (0x0800 + ((ee) * 0x80))
+#define BAM_IRQ_SRCS_MSK_EE(ee) (0x0804 + ((ee) * 0x80))
+#define BAM_P_CTRL(pipe) (0x1000 + ((pipe) * 0x1000))
+#define BAM_P_RST(pipe) (0x1004 + ((pipe) * 0x1000))
+#define BAM_P_HALT(pipe) (0x1008 + ((pipe) * 0x1000))
+#define BAM_P_IRQ_STTS(pipe) (0x1010 + ((pipe) * 0x1000))
+#define BAM_P_IRQ_CLR(pipe) (0x1014 + ((pipe) * 0x1000))
+#define BAM_P_IRQ_EN(pipe) (0x1018 + ((pipe) * 0x1000))
+#define BAM_P_EVNT_DEST_ADDR(pipe) (0x182C + ((pipe) * 0x1000))
+#define BAM_P_EVNT_REG(pipe) (0x1818 + ((pipe) * 0x1000))
+#define BAM_P_SW_OFSTS(pipe) (0x1800 + ((pipe) * 0x1000))
+#define BAM_P_DATA_FIFO_ADDR(pipe) (0x1824 + ((pipe) * 0x1000))
+#define BAM_P_DESC_FIFO_ADDR(pipe) (0x181C + ((pipe) * 0x1000))
+#define BAM_P_EVNT_TRSHLD(pipe) (0x1828 + ((pipe) * 0x1000))
+#define BAM_P_FIFO_SIZES(pipe) (0x1820 + ((pipe) * 0x1000))
+
+/* BAM CTRL */
+#define BAM_SW_RST BIT(0)
+#define BAM_EN BIT(1)
+#define BAM_EN_ACCUM BIT(4)
+#define BAM_TESTBUS_SEL_SHIFT 5
+#define BAM_TESTBUS_SEL_MASK 0x3F
+#define BAM_DESC_CACHE_SEL_SHIFT 13
+#define BAM_DESC_CACHE_SEL_MASK 0x3
+#define BAM_CACHED_DESC_STORE BIT(15)
+#define IBC_DISABLE BIT(16)
+
+/* BAM REVISION */
+#define REVISION_SHIFT 0
+#define REVISION_MASK 0xFF
+#define NUM_EES_SHIFT 8
+#define NUM_EES_MASK 0xF
+#define CE_BUFFER_SIZE BIT(13)
+#define AXI_ACTIVE BIT(14)
+#define USE_VMIDMT BIT(15)
+#define SECURED BIT(16)
+#define BAM_HAS_NO_BYPASS BIT(17)
+#define HIGH_FREQUENCY_BAM BIT(18)
+#define INACTIV_TMRS_EXST BIT(19)
+#define NUM_INACTIV_TMRS BIT(20)
+#define DESC_CACHE_DEPTH_SHIFT 21
+#define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT)
+#define CMD_DESC_EN BIT(23)
+#define INACTIV_TMR_BASE_SHIFT 24
+#define INACTIV_TMR_BASE_MASK 0xFF
+
+/* BAM NUM PIPES */
+#define BAM_NUM_PIPES_SHIFT 0
+#define BAM_NUM_PIPES_MASK 0xFF
+#define PERIPH_NON_PIPE_GRP_SHIFT 16
+#define PERIPH_NON_PIP_GRP_MASK 0xFF
+#define BAM_NON_PIPE_GRP_SHIFT 24
+#define BAM_NON_PIPE_GRP_MASK 0xFF
+
+/* BAM CNFG BITS */
+#define BAM_PIPE_CNFG BIT(2)
+#define BAM_FULL_PIPE BIT(11)
+#define BAM_NO_EXT_P_RST BIT(12)
+#define BAM_IBC_DISABLE BIT(13)
+#define BAM_SB_CLK_REQ BIT(14)
+#define BAM_PSM_CSW_REQ BIT(15)
+#define BAM_PSM_P_RES BIT(16)
+#define BAM_AU_P_RES BIT(17)
+#define BAM_SI_P_RES BIT(18)
+#define BAM_WB_P_RES BIT(19)
+#define BAM_WB_BLK_CSW BIT(20)
+#define BAM_WB_CSW_ACK_IDL BIT(21)
+#define BAM_WB_RETR_SVPNT BIT(22)
+#define BAM_WB_DSC_AVL_P_RST BIT(23)
+#define BAM_REG_P_EN BIT(24)
+#define BAM_PSM_P_HD_DATA BIT(25)
+#define BAM_AU_ACCUMED BIT(26)
+#define BAM_CMD_ENABLE BIT(27)
+
+#define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \
+ BAM_NO_EXT_P_RST | \
+ BAM_IBC_DISABLE | \
+ BAM_SB_CLK_REQ | \
+ BAM_PSM_CSW_REQ | \
+ BAM_PSM_P_RES | \
+ BAM_AU_P_RES | \
+ BAM_SI_P_RES | \
+ BAM_WB_P_RES | \
+ BAM_WB_BLK_CSW | \
+ BAM_WB_CSW_ACK_IDL | \
+ BAM_WB_RETR_SVPNT | \
+ BAM_WB_DSC_AVL_P_RST | \
+ BAM_REG_P_EN | \
+ BAM_PSM_P_HD_DATA | \
+ BAM_AU_ACCUMED | \
+ BAM_CMD_ENABLE)
+
+/* PIPE CTRL */
+#define P_EN BIT(1)
+#define P_DIRECTION BIT(3)
+#define P_SYS_STRM BIT(4)
+#define P_SYS_MODE BIT(5)
+#define P_AUTO_EOB BIT(6)
+#define P_AUTO_EOB_SEL_SHIFT 7
+#define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT)
+#define P_PREFETCH_LIMIT_SHIFT 9
+#define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT)
+#define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT)
+#define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT)
+#define P_WRITE_NWD BIT(11)
+#define P_LOCK_GROUP_SHIFT 16
+#define P_LOCK_GROUP_MASK 0x1F
+
+/* BAM_DESC_CNT_TRSHLD */
+#define CNT_TRSHLD 0xffff
+#define DEFAULT_CNT_THRSHLD 0x4
+
+/* BAM_IRQ_SRCS */
+#define BAM_IRQ BIT(31)
+#define P_IRQ 0x7fffffff
+
+/* BAM_IRQ_SRCS_MSK */
+#define BAM_IRQ_MSK BAM_IRQ
+#define P_IRQ_MSK P_IRQ
+
+/* BAM_IRQ_STTS */
+#define BAM_TIMER_IRQ BIT(4)
+#define BAM_EMPTY_IRQ BIT(3)
+#define BAM_ERROR_IRQ BIT(2)
+#define BAM_HRESP_ERR_IRQ BIT(1)
+
+/* BAM_IRQ_CLR */
+#define BAM_TIMER_CLR BIT(4)
+#define BAM_EMPTY_CLR BIT(3)
+#define BAM_ERROR_CLR BIT(2)
+#define BAM_HRESP_ERR_CLR BIT(1)
+
+/* BAM_IRQ_EN */
+#define BAM_TIMER_EN BIT(4)
+#define BAM_EMPTY_EN BIT(3)
+#define BAM_ERROR_EN BIT(2)
+#define BAM_HRESP_ERR_EN BIT(1)
+
+/* BAM_P_IRQ_EN */
+#define P_PRCSD_DESC_EN BIT(0)
+#define P_TIMER_EN BIT(1)
+#define P_WAKE_EN BIT(2)
+#define P_OUT_OF_DESC_EN BIT(3)
+#define P_ERR_EN BIT(4)
+#define P_TRNSFR_END_EN BIT(5)
+#define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
+
+/* BAM_P_SW_OFSTS */
+#define P_SW_OFSTS_MASK 0xffff
+
+#define BAM_DESC_FIFO_SIZE SZ_32K
+#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
+#define BAM_MAX_DATA_SIZE (SZ_32K - 8)
+
+struct bam_chan {
+ struct virt_dma_chan vc;
+
+ struct bam_device *bdev;
+
+ /* configuration from device tree */
+ u32 id;
+
+ struct bam_async_desc *curr_txd; /* current running dma */
+
+ /* runtime configuration */
+ struct dma_slave_config slave;
+
+ /* fifo storage */
+ struct bam_desc_hw *fifo_virt;
+ dma_addr_t fifo_phys;
+
+ /* fifo markers */
+ unsigned short head; /* start of active descriptor entries */
+ unsigned short tail; /* end of active descriptor entries */
+
+ unsigned int initialized; /* is the channel hw initialized? */
+ unsigned int paused; /* is the channel paused? */
+ unsigned int reconfigure; /* new slave config? */
+
+ struct list_head node;
+};
+
+static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
+{
+ return container_of(common, struct bam_chan, vc.chan);
+}
+
+struct bam_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct device_dma_parameters dma_parms;
+ struct bam_chan *channels;
+ u32 num_channels;
+
+ /* execution environment ID, from DT */
+ u32 ee;
+
+ struct clk *bamclk;
+ int irq;
+
+ /* dma start transaction tasklet */
+ struct tasklet_struct task;
+};
+
+/**
+ * bam_reset_channel - Reset individual BAM DMA channel
+ * @bchan: bam channel
+ *
+ * This function resets a specific BAM channel
+ */
+static void bam_reset_channel(struct bam_chan *bchan)
+{
+ struct bam_device *bdev = bchan->bdev;
+
+ lockdep_assert_held(&bchan->vc.lock);
+
+ /* reset channel */
+ writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id));
+ writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id));
+
+ /* don't allow cpu to reorder BAM register accesses done after this */
+ wmb();
+
+ /* make sure hw is initialized when channel is used the first time */
+ bchan->initialized = 0;
+}
+
+/**
+ * bam_chan_init_hw - Initialize channel hardware
+ * @bchan: bam channel
+ *
+ * This function resets and initializes the BAM channel
+ */
+static void bam_chan_init_hw(struct bam_chan *bchan,
+ enum dma_transfer_direction dir)
+{
+ struct bam_device *bdev = bchan->bdev;
+ u32 val;
+
+ /* Reset the channel to clear internal state of the FIFO */
+ bam_reset_channel(bchan);
+
+ /*
+ * write out 8 byte aligned address. We have enough space for this
+ * because we allocated 1 more descriptor (8 bytes) than we can use
+ */
+ writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
+ bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id));
+ writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs +
+ BAM_P_FIFO_SIZES(bchan->id));
+
+ /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
+ writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id));
+
+ /* unmask the specific pipe and EE combo */
+ val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ val |= BIT(bchan->id);
+ writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+ /* don't allow cpu to reorder the channel enable done below */
+ wmb();
+
+ /* set fixed direction and mode, then enable channel */
+ val = P_EN | P_SYS_MODE;
+ if (dir == DMA_DEV_TO_MEM)
+ val |= P_DIRECTION;
+
+ writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id));
+
+ bchan->initialized = 1;
+
+ /* init FIFO pointers */
+ bchan->head = 0;
+ bchan->tail = 0;
+}
+
+/**
+ * bam_alloc_chan - Allocate channel resources for DMA channel.
+ * @chan: specified channel
+ *
+ * This function allocates the FIFO descriptor memory
+ */
+static int bam_alloc_chan(struct dma_chan *chan)
+{
+ struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_device *bdev = bchan->bdev;
+
+ if (bchan->fifo_virt)
+ return 0;
+
+ /* allocate FIFO descriptor space, but only if necessary */
+ bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
+ &bchan->fifo_phys, GFP_KERNEL);
+
+ if (!bchan->fifo_virt) {
+ dev_err(bdev->dev, "Failed to allocate desc fifo\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * bam_free_chan - Frees dma resources associated with specific channel
+ * @chan: specified channel
+ *
+ * Free the allocated fifo descriptor memory and channel resources
+ *
+ */
+static void bam_free_chan(struct dma_chan *chan)
+{
+ struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_device *bdev = bchan->bdev;
+ u32 val;
+ unsigned long flags;
+
+ vchan_free_chan_resources(to_virt_chan(chan));
+
+ if (bchan->curr_txd) {
+ dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
+ return;
+ }
+
+ spin_lock_irqsave(&bchan->vc.lock, flags);
+ bam_reset_channel(bchan);
+ spin_unlock_irqrestore(&bchan->vc.lock, flags);
+
+ dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
+ bchan->fifo_phys);
+ bchan->fifo_virt = NULL;
+
+ /* mask irq for pipe/channel */
+ val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ val &= ~BIT(bchan->id);
+ writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+ /* disable irq */
+ writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id));
+}
+
+/**
+ * bam_slave_config - set slave configuration for channel
+ * @chan: dma channel
+ * @cfg: slave configuration
+ *
+ * Sets slave configuration for channel
+ *
+ */
+static void bam_slave_config(struct bam_chan *bchan,
+ struct dma_slave_config *cfg)
+{
+ memcpy(&bchan->slave, cfg, sizeof(*cfg));
+ bchan->reconfigure = 1;
+}
+
+/**
+ * bam_prep_slave_sg - Prep slave sg transaction
+ *
+ * @chan: dma channel
+ * @sgl: scatter gather list
+ * @sg_len: length of sg
+ * @direction: DMA transfer direction
+ * @flags: DMA flags
+ * @context: transfer context (unused)
+ */
+static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
+{
+ struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_device *bdev = bchan->bdev;
+ struct bam_async_desc *async_desc;
+ struct scatterlist *sg;
+ u32 i;
+ struct bam_desc_hw *desc;
+ unsigned int num_alloc = 0;
+
+
+ if (!is_slave_direction(direction)) {
+ dev_err(bdev->dev, "invalid dma direction\n");
+ return NULL;
+ }
+
+ /* calculate number of required entries */
+ for_each_sg(sgl, sg, sg_len, i)
+ num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE);
+
+ /* allocate enough room to accomodate the number of entries */
+ async_desc = kzalloc(sizeof(*async_desc) +
+ (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
+
+ if (!async_desc)
+ goto err_out;
+
+ async_desc->num_desc = num_alloc;
+ async_desc->curr_desc = async_desc->desc;
+ async_desc->dir = direction;
+
+ /* fill in temporary descriptors */
+ desc = async_desc->desc;
+ for_each_sg(sgl, sg, sg_len, i) {
+ unsigned int remainder = sg_dma_len(sg);
+ unsigned int curr_offset = 0;
+
+ do {
+ desc->addr = sg_dma_address(sg) + curr_offset;
+
+ if (remainder > BAM_MAX_DATA_SIZE) {
+ desc->size = BAM_MAX_DATA_SIZE;
+ remainder -= BAM_MAX_DATA_SIZE;
+ curr_offset += BAM_MAX_DATA_SIZE;
+ } else {
+ desc->size = remainder;
+ remainder = 0;
+ }
+
+ async_desc->length += desc->size;
+ desc++;
+ } while (remainder > 0);
+ }
+
+ return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
+
+err_out:
+ kfree(async_desc);
+ return NULL;
+}
+
+/**
+ * bam_dma_terminate_all - terminate all transactions on a channel
+ * @bchan: bam dma channel
+ *
+ * Dequeues and frees all transactions
+ * No callbacks are done
+ *
+ */
+static void bam_dma_terminate_all(struct bam_chan *bchan)
+{
+ unsigned long flag;
+ LIST_HEAD(head);
+
+ /* remove all transactions, including active transaction */
+ spin_lock_irqsave(&bchan->vc.lock, flag);
+ if (bchan->curr_txd) {
+ list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
+ bchan->curr_txd = NULL;
+ }
+
+ vchan_get_all_descriptors(&bchan->vc, &head);
+ spin_unlock_irqrestore(&bchan->vc.lock, flag);
+
+ vchan_dma_desc_free_list(&bchan->vc, &head);
+}
+
+/**
+ * bam_control - DMA device control
+ * @chan: dma channel
+ * @cmd: control cmd
+ * @arg: cmd argument
+ *
+ * Perform DMA control command
+ *
+ */
+static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_device *bdev = bchan->bdev;
+ int ret = 0;
+ unsigned long flag;
+
+ switch (cmd) {
+ case DMA_PAUSE:
+ spin_lock_irqsave(&bchan->vc.lock, flag);
+ writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id));
+ bchan->paused = 1;
+ spin_unlock_irqrestore(&bchan->vc.lock, flag);
+ break;
+
+ case DMA_RESUME:
+ spin_lock_irqsave(&bchan->vc.lock, flag);
+ writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id));
+ bchan->paused = 0;
+ spin_unlock_irqrestore(&bchan->vc.lock, flag);
+ break;
+
+ case DMA_TERMINATE_ALL:
+ bam_dma_terminate_all(bchan);
+ break;
+
+ case DMA_SLAVE_CONFIG:
+ spin_lock_irqsave(&bchan->vc.lock, flag);
+ bam_slave_config(bchan, (struct dma_slave_config *)arg);
+ spin_unlock_irqrestore(&bchan->vc.lock, flag);
+ break;
+
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * process_channel_irqs - processes the channel interrupts
+ * @bdev: bam controller
+ *
+ * This function processes the channel interrupts
+ *
+ */
+static u32 process_channel_irqs(struct bam_device *bdev)
+{
+ u32 i, srcs, pipe_stts;
+ unsigned long flags;
+ struct bam_async_desc *async_desc;
+
+ srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee));
+
+ /* return early if no pipe/channel interrupts are present */
+ if (!(srcs & P_IRQ))
+ return srcs;
+
+ for (i = 0; i < bdev->num_channels; i++) {
+ struct bam_chan *bchan = &bdev->channels[i];
+
+ if (!(srcs & BIT(i)))
+ continue;
+
+ /* clear pipe irq */
+ pipe_stts = readl_relaxed(bdev->regs +
+ BAM_P_IRQ_STTS(i));
+
+ writel_relaxed(pipe_stts, bdev->regs +
+ BAM_P_IRQ_CLR(i));
+
+ spin_lock_irqsave(&bchan->vc.lock, flags);
+ async_desc = bchan->curr_txd;
+
+ if (async_desc) {
+ async_desc->num_desc -= async_desc->xfer_len;
+ async_desc->curr_desc += async_desc->xfer_len;
+ bchan->curr_txd = NULL;
+
+ /* manage FIFO */
+ bchan->head += async_desc->xfer_len;
+ bchan->head %= MAX_DESCRIPTORS;
+
+ /*
+ * if complete, process cookie. Otherwise
+ * push back to front of desc_issued so that
+ * it gets restarted by the tasklet
+ */
+ if (!async_desc->num_desc)
+ vchan_cookie_complete(&async_desc->vd);
+ else
+ list_add(&async_desc->vd.node,
+ &bchan->vc.desc_issued);
+ }
+
+ spin_unlock_irqrestore(&bchan->vc.lock, flags);
+ }
+
+ return srcs;
+}
+
+/**
+ * bam_dma_irq - irq handler for bam controller
+ * @irq: IRQ of interrupt
+ * @data: callback data
+ *
+ * IRQ handler for the bam controller
+ */
+static irqreturn_t bam_dma_irq(int irq, void *data)
+{
+ struct bam_device *bdev = data;
+ u32 clr_mask = 0, srcs = 0;
+
+ srcs |= process_channel_irqs(bdev);
+
+ /* kick off tasklet to start next dma transfer */
+ if (srcs & P_IRQ)
+ tasklet_schedule(&bdev->task);
+
+ if (srcs & BAM_IRQ)
+ clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS);
+
+ /* don't allow reorder of the various accesses to the BAM registers */
+ mb();
+
+ writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * bam_tx_status - returns status of transaction
+ * @chan: dma channel
+ * @cookie: transaction cookie
+ * @txstate: DMA transaction state
+ *
+ * Return status of dma transaction
+ */
+static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct bam_chan *bchan = to_bam_chan(chan);
+ struct virt_dma_desc *vd;
+ int ret;
+ size_t residue = 0;
+ unsigned int i;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ if (!txstate)
+ return bchan->paused ? DMA_PAUSED : ret;
+
+ spin_lock_irqsave(&bchan->vc.lock, flags);
+ vd = vchan_find_desc(&bchan->vc, cookie);
+ if (vd)
+ residue = container_of(vd, struct bam_async_desc, vd)->length;
+ else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
+ for (i = 0; i < bchan->curr_txd->num_desc; i++)
+ residue += bchan->curr_txd->curr_desc[i].size;
+
+ spin_unlock_irqrestore(&bchan->vc.lock, flags);
+
+ dma_set_residue(txstate, residue);
+
+ if (ret == DMA_IN_PROGRESS && bchan->paused)
+ ret = DMA_PAUSED;
+
+ return ret;
+}
+
+/**
+ * bam_apply_new_config
+ * @bchan: bam dma channel
+ * @dir: DMA direction
+ */
+static void bam_apply_new_config(struct bam_chan *bchan,
+ enum dma_transfer_direction dir)
+{
+ struct bam_device *bdev = bchan->bdev;
+ u32 maxburst;
+
+ if (dir == DMA_DEV_TO_MEM)
+ maxburst = bchan->slave.src_maxburst;
+ else
+ maxburst = bchan->slave.dst_maxburst;
+
+ writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD);
+
+ bchan->reconfigure = 0;
+}
+
+/**
+ * bam_start_dma - start next transaction
+ * @bchan - bam dma channel
+ */
+static void bam_start_dma(struct bam_chan *bchan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
+ struct bam_device *bdev = bchan->bdev;
+ struct bam_async_desc *async_desc;
+ struct bam_desc_hw *desc;
+ struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
+ sizeof(struct bam_desc_hw));
+
+ lockdep_assert_held(&bchan->vc.lock);
+
+ if (!vd)
+ return;
+
+ list_del(&vd->node);
+
+ async_desc = container_of(vd, struct bam_async_desc, vd);
+ bchan->curr_txd = async_desc;
+
+ /* on first use, initialize the channel hardware */
+ if (!bchan->initialized)
+ bam_chan_init_hw(bchan, async_desc->dir);
+
+ /* apply new slave config changes, if necessary */
+ if (bchan->reconfigure)
+ bam_apply_new_config(bchan, async_desc->dir);
+
+ desc = bchan->curr_txd->curr_desc;
+
+ if (async_desc->num_desc > MAX_DESCRIPTORS)
+ async_desc->xfer_len = MAX_DESCRIPTORS;
+ else
+ async_desc->xfer_len = async_desc->num_desc;
+
+ /* set INT on last descriptor */
+ desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
+
+ if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
+ u32 partial = MAX_DESCRIPTORS - bchan->tail;
+
+ memcpy(&fifo[bchan->tail], desc,
+ partial * sizeof(struct bam_desc_hw));
+ memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
+ sizeof(struct bam_desc_hw));
+ } else {
+ memcpy(&fifo[bchan->tail], desc,
+ async_desc->xfer_len * sizeof(struct bam_desc_hw));
+ }
+
+ bchan->tail += async_desc->xfer_len;
+ bchan->tail %= MAX_DESCRIPTORS;
+
+ /* ensure descriptor writes and dma start not reordered */
+ wmb();
+ writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
+ bdev->regs + BAM_P_EVNT_REG(bchan->id));
+}
+
+/**
+ * dma_tasklet - DMA IRQ tasklet
+ * @data: tasklet argument (bam controller structure)
+ *
+ * Sets up next DMA operation and then processes all completed transactions
+ */
+static void dma_tasklet(unsigned long data)
+{
+ struct bam_device *bdev = (struct bam_device *)data;
+ struct bam_chan *bchan;
+ unsigned long flags;
+ unsigned int i;
+
+ /* go through the channels and kick off transactions */
+ for (i = 0; i < bdev->num_channels; i++) {
+ bchan = &bdev->channels[i];
+ spin_lock_irqsave(&bchan->vc.lock, flags);
+
+ if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
+ bam_start_dma(bchan);
+ spin_unlock_irqrestore(&bchan->vc.lock, flags);
+ }
+}
+
+/**
+ * bam_issue_pending - starts pending transactions
+ * @chan: dma channel
+ *
+ * Calls tasklet directly which in turn starts any pending transactions
+ */
+static void bam_issue_pending(struct dma_chan *chan)
+{
+ struct bam_chan *bchan = to_bam_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bchan->vc.lock, flags);
+
+ /* if work pending and idle, start a transaction */
+ if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
+ bam_start_dma(bchan);
+
+ spin_unlock_irqrestore(&bchan->vc.lock, flags);
+}
+
+/**
+ * bam_dma_free_desc - free descriptor memory
+ * @vd: virtual descriptor
+ *
+ */
+static void bam_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct bam_async_desc *async_desc = container_of(vd,
+ struct bam_async_desc, vd);
+
+ kfree(async_desc);
+}
+
+static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *of)
+{
+ struct bam_device *bdev = container_of(of->of_dma_data,
+ struct bam_device, common);
+ unsigned int request;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ request = dma_spec->args[0];
+ if (request >= bdev->num_channels)
+ return NULL;
+
+ return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
+}
+
+/**
+ * bam_init
+ * @bdev: bam device
+ *
+ * Initialization helper for global bam registers
+ */
+static int bam_init(struct bam_device *bdev)
+{
+ u32 val;
+
+ /* read revision and configuration information */
+ val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT;
+ val &= NUM_EES_MASK;
+
+ /* check that configured EE is within range */
+ if (bdev->ee >= val)
+ return -EINVAL;
+
+ val = readl_relaxed(bdev->regs + BAM_NUM_PIPES);
+ bdev->num_channels = val & BAM_NUM_PIPES_MASK;
+
+ /* s/w reset bam */
+ /* after reset all pipes are disabled and idle */
+ val = readl_relaxed(bdev->regs + BAM_CTRL);
+ val |= BAM_SW_RST;
+ writel_relaxed(val, bdev->regs + BAM_CTRL);
+ val &= ~BAM_SW_RST;
+ writel_relaxed(val, bdev->regs + BAM_CTRL);
+
+ /* make sure previous stores are visible before enabling BAM */
+ wmb();
+
+ /* enable bam */
+ val |= BAM_EN;
+ writel_relaxed(val, bdev->regs + BAM_CTRL);
+
+ /* set descriptor threshhold, start with 4 bytes */
+ writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD);
+
+ /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
+ writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS);
+
+ /* enable irqs for errors */
+ writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
+ bdev->regs + BAM_IRQ_EN);
+
+ /* unmask global bam interrupt */
+ writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+ return 0;
+}
+
+static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
+ u32 index)
+{
+ bchan->id = index;
+ bchan->bdev = bdev;
+
+ vchan_init(&bchan->vc, &bdev->common);
+ bchan->vc.desc_free = bam_dma_free_desc;
+}
+
+static int bam_dma_probe(struct platform_device *pdev)
+{
+ struct bam_device *bdev;
+ struct resource *iores;
+ int ret, i;
+
+ bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ bdev->dev = &pdev->dev;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(bdev->regs))
+ return PTR_ERR(bdev->regs);
+
+ bdev->irq = platform_get_irq(pdev, 0);
+ if (bdev->irq < 0)
+ return bdev->irq;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
+ if (ret) {
+ dev_err(bdev->dev, "Execution environment unspecified\n");
+ return ret;
+ }
+
+ bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
+ if (IS_ERR(bdev->bamclk))
+ return PTR_ERR(bdev->bamclk);
+
+ ret = clk_prepare_enable(bdev->bamclk);
+ if (ret) {
+ dev_err(bdev->dev, "failed to prepare/enable clock\n");
+ return ret;
+ }
+
+ ret = bam_init(bdev);
+ if (ret)
+ goto err_disable_clk;
+
+ tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
+
+ bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
+ sizeof(*bdev->channels), GFP_KERNEL);
+
+ if (!bdev->channels) {
+ ret = -ENOMEM;
+ goto err_disable_clk;
+ }
+
+ /* allocate and initialize channels */
+ INIT_LIST_HEAD(&bdev->common.channels);
+
+ for (i = 0; i < bdev->num_channels; i++)
+ bam_channel_init(bdev, &bdev->channels[i], i);
+
+ ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
+ IRQF_TRIGGER_HIGH, "bam_dma", bdev);
+ if (ret)
+ goto err_disable_clk;
+
+ /* set max dma segment size */
+ bdev->common.dev = bdev->dev;
+ bdev->common.dev->dma_parms = &bdev->dma_parms;
+ ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE);
+ if (ret) {
+ dev_err(bdev->dev, "cannot set maximum segment size\n");
+ goto err_disable_clk;
+ }
+
+ platform_set_drvdata(pdev, bdev);
+
+ /* set capabilities */
+ dma_cap_zero(bdev->common.cap_mask);
+ dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
+
+ /* initialize dmaengine apis */
+ bdev->common.device_alloc_chan_resources = bam_alloc_chan;
+ bdev->common.device_free_chan_resources = bam_free_chan;
+ bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
+ bdev->common.device_control = bam_control;
+ bdev->common.device_issue_pending = bam_issue_pending;
+ bdev->common.device_tx_status = bam_tx_status;
+ bdev->common.dev = bdev->dev;
+
+ ret = dma_async_device_register(&bdev->common);
+ if (ret) {
+ dev_err(bdev->dev, "failed to register dma async device\n");
+ goto err_disable_clk;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
+ &bdev->common);
+ if (ret)
+ goto err_unregister_dma;
+
+ return 0;
+
+err_unregister_dma:
+ dma_async_device_unregister(&bdev->common);
+err_disable_clk:
+ clk_disable_unprepare(bdev->bamclk);
+ return ret;
+}
+
+static int bam_dma_remove(struct platform_device *pdev)
+{
+ struct bam_device *bdev = platform_get_drvdata(pdev);
+ u32 i;
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&bdev->common);
+
+ /* mask all interrupts for this execution environment */
+ writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+ devm_free_irq(bdev->dev, bdev->irq, bdev);
+
+ for (i = 0; i < bdev->num_channels; i++) {
+ bam_dma_terminate_all(&bdev->channels[i]);
+ tasklet_kill(&bdev->channels[i].vc.task);
+
+ dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
+ bdev->channels[i].fifo_virt,
+ bdev->channels[i].fifo_phys);
+ }
+
+ tasklet_kill(&bdev->task);
+
+ clk_disable_unprepare(bdev->bamclk);
+
+ return 0;
+}
+
+static const struct of_device_id bam_of_match[] = {
+ { .compatible = "qcom,bam-v1.4.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bam_of_match);
+
+static struct platform_driver bam_dma_driver = {
+ .probe = bam_dma_probe,
+ .remove = bam_dma_remove,
+ .driver = {
+ .name = "bam-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = bam_of_match,
+ },
+};
+
+module_platform_driver(bam_dma_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 4eddedb6eb7..012520c9fd7 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -164,6 +164,7 @@ struct s3c24xx_sg {
* @disrcc: value for source control register
* @didstc: value for destination control register
* @dcon: base value for dcon register
+ * @cyclic: indicate cyclic transfer
*/
struct s3c24xx_txd {
struct virt_dma_desc vd;
@@ -173,6 +174,7 @@ struct s3c24xx_txd {
u32 disrcc;
u32 didstc;
u32 dcon;
+ bool cyclic;
};
struct s3c24xx_dma_chan;
@@ -192,7 +194,7 @@ struct s3c24xx_dma_phy {
unsigned int id;
bool valid;
void __iomem *base;
- unsigned int irq;
+ int irq;
struct clk *clk;
spinlock_t lock;
struct s3c24xx_dma_chan *serving;
@@ -669,8 +671,10 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
/* when more sg's are in this txd, start the next one */
if (!list_is_last(txd->at, &txd->dsg_list)) {
txd->at = txd->at->next;
+ if (txd->cyclic)
+ vchan_cyclic_callback(&txd->vd);
s3c24xx_dma_start_next_sg(s3cchan, txd);
- } else {
+ } else if (!txd->cyclic) {
s3cchan->at = NULL;
vchan_cookie_complete(&txd->vd);
@@ -682,6 +686,12 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
s3c24xx_dma_start_next_txd(s3cchan);
else
s3c24xx_dma_phy_free(s3cchan);
+ } else {
+ vchan_cyclic_callback(&txd->vd);
+
+ /* Cyclic: reset at beginning */
+ txd->at = txd->dsg_list.next;
+ s3c24xx_dma_start_next_sg(s3cchan, txd);
}
}
spin_unlock(&s3cchan->vc.lock);
@@ -877,6 +887,104 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
}
+static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+ struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
+ struct s3c24xx_txd *txd;
+ struct s3c24xx_sg *dsg;
+ unsigned sg_len;
+ dma_addr_t slave_addr;
+ u32 hwcfg = 0;
+ int i;
+
+ dev_dbg(&s3cdma->pdev->dev,
+ "prepare cyclic transaction of %zu bytes with period %zu from %s\n",
+ size, period, s3cchan->name);
+
+ if (!is_slave_direction(direction)) {
+ dev_err(&s3cdma->pdev->dev,
+ "direction %d unsupported\n", direction);
+ return NULL;
+ }
+
+ txd = s3c24xx_dma_get_txd();
+ if (!txd)
+ return NULL;
+
+ txd->cyclic = 1;
+
+ if (cdata->handshake)
+ txd->dcon |= S3C24XX_DCON_HANDSHAKE;
+
+ switch (cdata->bus) {
+ case S3C24XX_DMA_APB:
+ txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
+ hwcfg |= S3C24XX_DISRCC_LOC_APB;
+ break;
+ case S3C24XX_DMA_AHB:
+ txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
+ hwcfg |= S3C24XX_DISRCC_LOC_AHB;
+ break;
+ }
+
+ /*
+ * Always assume our peripheral desintation is a fixed
+ * address in memory.
+ */
+ hwcfg |= S3C24XX_DISRCC_INC_FIXED;
+
+ /*
+ * Individual dma operations are requested by the slave,
+ * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
+ */
+ txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
+ S3C24XX_DISRCC_INC_INCREMENT;
+ txd->didstc = hwcfg;
+ slave_addr = s3cchan->cfg.dst_addr;
+ txd->width = s3cchan->cfg.dst_addr_width;
+ } else {
+ txd->disrcc = hwcfg;
+ txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
+ S3C24XX_DIDSTC_INC_INCREMENT;
+ slave_addr = s3cchan->cfg.src_addr;
+ txd->width = s3cchan->cfg.src_addr_width;
+ }
+
+ sg_len = size / period;
+
+ for (i = 0; i < sg_len; i++) {
+ dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
+ if (!dsg) {
+ s3c24xx_dma_free_txd(txd);
+ return NULL;
+ }
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->len = period;
+ /* Check last period length */
+ if (i == sg_len - 1)
+ dsg->len = size - period * i;
+ if (direction == DMA_MEM_TO_DEV) {
+ dsg->src_addr = addr + period * i;
+ dsg->dst_addr = slave_addr;
+ } else { /* DMA_DEV_TO_MEM */
+ dsg->src_addr = slave_addr;
+ dsg->dst_addr = addr + period * i;
+ }
+ }
+
+ return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
+}
+
static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -961,7 +1069,6 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
dsg->src_addr = slave_addr;
dsg->dst_addr = sg_dma_address(sg);
}
- break;
}
return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
@@ -1198,6 +1305,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
/* Initialize slave engine for SoC internal dedicated peripherals */
dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
s3cdma->slave.dev = &pdev->dev;
s3cdma->slave.device_alloc_chan_resources =
@@ -1207,6 +1315,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
+ s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
s3cdma->slave.device_control = s3c24xx_dma_control;
/* Register as many memcpy channels as there are physical channels */
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ab26d46bbe1..5ebdfbc1051 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -113,11 +113,9 @@ struct sa11x0_dma_phy {
struct sa11x0_dma_desc *txd_load;
unsigned sg_done;
struct sa11x0_dma_desc *txd_done;
-#ifdef CONFIG_PM_SLEEP
u32 dbs[2];
u32 dbt[2];
u32 dcsr;
-#endif
};
struct sa11x0_dma_dev {
@@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int sa11x0_dma_suspend(struct device *dev)
{
struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
@@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sa11x0_dma_pm_ops = {
.suspend_noirq = sa11x0_dma_suspend,
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index dadd9e010c0..0f719816c91 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -4,7 +4,7 @@
config SH_DMAE_BASE
bool "Renesas SuperH DMA Engine support"
- depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
+ depends on (SUPERH && SH_DMA) || ARCH_SHMOBILE || COMPILE_TEST
depends on !SH_DMA_API
default y
select DMA_ENGINE
@@ -29,6 +29,12 @@ config RCAR_HPB_DMAE
help
Enable support for the Renesas R-Car series DMA controllers.
+config RCAR_AUDMAC_PP
+ tristate "Renesas R-Car Audio DMAC Peripheral Peripheral support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
+
config SHDMA_R8A73A4
def_bool y
depends on ARCH_R8A73A4 && SH_DMAE != n
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index e856af23b78..1ce88b28cfc 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -7,3 +7,4 @@ endif
shdma-objs := $(shdma-y)
obj-$(CONFIG_SUDMAC) += sudmac.o
obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
+obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
new file mode 100644
index 00000000000..2de77289a2e
--- /dev/null
+++ b/drivers/dma/sh/rcar-audmapp.c
@@ -0,0 +1,320 @@
+/*
+ * This is for Renesas R-Car Audio-DMAC-peri-peri.
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * based on the drivers/dma/sh/shdma.c
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_data/dma-rcar-audmapp.h>
+#include <linux/platform_device.h>
+#include <linux/shdma-base.h>
+
+/*
+ * DMA register
+ */
+#define PDMASAR 0x00
+#define PDMADAR 0x04
+#define PDMACHCR 0x0c
+
+/* PDMACHCR */
+#define PDMACHCR_DE (1 << 0)
+
+#define AUDMAPP_MAX_CHANNELS 29
+
+/* Default MEMCPY transfer size = 2^2 = 4 bytes */
+#define LOG2_DEFAULT_XFER_SIZE 2
+#define AUDMAPP_SLAVE_NUMBER 256
+#define AUDMAPP_LEN_MAX (16 * 1024 * 1024)
+
+struct audmapp_chan {
+ struct shdma_chan shdma_chan;
+ struct audmapp_slave_config *config;
+ void __iomem *base;
+};
+
+struct audmapp_device {
+ struct shdma_dev shdma_dev;
+ struct audmapp_pdata *pdata;
+ struct device *dev;
+ void __iomem *chan_reg;
+};
+
+#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan)
+#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \
+ struct audmapp_device, shdma_dev.dma_dev)
+
+static void audmapp_write(struct audmapp_chan *auchan, u32 data, u32 reg)
+{
+ struct audmapp_device *audev = to_dev(auchan);
+ struct device *dev = audev->dev;
+
+ dev_dbg(dev, "w %p : %08x\n", auchan->base + reg, data);
+
+ iowrite32(data, auchan->base + reg);
+}
+
+static u32 audmapp_read(struct audmapp_chan *auchan, u32 reg)
+{
+ return ioread32(auchan->base + reg);
+}
+
+static void audmapp_halt(struct shdma_chan *schan)
+{
+ struct audmapp_chan *auchan = to_chan(schan);
+ int i;
+
+ audmapp_write(auchan, 0, PDMACHCR);
+
+ for (i = 0; i < 1024; i++) {
+ if (0 == audmapp_read(auchan, PDMACHCR))
+ return;
+ udelay(1);
+ }
+}
+
+static void audmapp_start_xfer(struct shdma_chan *schan,
+ struct shdma_desc *sdecs)
+{
+ struct audmapp_chan *auchan = to_chan(schan);
+ struct audmapp_device *audev = to_dev(auchan);
+ struct audmapp_slave_config *cfg = auchan->config;
+ struct device *dev = audev->dev;
+ u32 chcr = cfg->chcr | PDMACHCR_DE;
+
+ dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n",
+ &cfg->src, &cfg->dst, cfg->chcr);
+
+ audmapp_write(auchan, cfg->src, PDMASAR);
+ audmapp_write(auchan, cfg->dst, PDMADAR);
+ audmapp_write(auchan, chcr, PDMACHCR);
+}
+
+static struct audmapp_slave_config *
+audmapp_find_slave(struct audmapp_chan *auchan, int slave_id)
+{
+ struct audmapp_device *audev = to_dev(auchan);
+ struct audmapp_pdata *pdata = audev->pdata;
+ struct audmapp_slave_config *cfg;
+ int i;
+
+ if (slave_id >= AUDMAPP_SLAVE_NUMBER)
+ return NULL;
+
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+ if (cfg->slave_id == slave_id)
+ return cfg;
+
+ return NULL;
+}
+
+static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
+ dma_addr_t slave_addr, bool try)
+{
+ struct audmapp_chan *auchan = to_chan(schan);
+ struct audmapp_slave_config *cfg =
+ audmapp_find_slave(auchan, slave_id);
+
+ if (!cfg)
+ return -ENODEV;
+ if (try)
+ return 0;
+
+ auchan->config = cfg;
+
+ return 0;
+}
+
+static int audmapp_desc_setup(struct shdma_chan *schan,
+ struct shdma_desc *sdecs,
+ dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+ struct audmapp_chan *auchan = to_chan(schan);
+ struct audmapp_slave_config *cfg = auchan->config;
+
+ if (!cfg)
+ return -ENODEV;
+
+ if (*len > (size_t)AUDMAPP_LEN_MAX)
+ *len = (size_t)AUDMAPP_LEN_MAX;
+
+ return 0;
+}
+
+static void audmapp_setup_xfer(struct shdma_chan *schan,
+ int slave_id)
+{
+}
+
+static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan)
+{
+ return 0; /* always fixed address */
+}
+
+static bool audmapp_channel_busy(struct shdma_chan *schan)
+{
+ struct audmapp_chan *auchan = to_chan(schan);
+ u32 chcr = audmapp_read(auchan, PDMACHCR);
+
+ return chcr & ~PDMACHCR_DE;
+}
+
+static bool audmapp_desc_completed(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ return true;
+}
+
+static struct shdma_desc *audmapp_embedded_desc(void *buf, int i)
+{
+ return &((struct shdma_desc *)buf)[i];
+}
+
+static const struct shdma_ops audmapp_shdma_ops = {
+ .halt_channel = audmapp_halt,
+ .desc_setup = audmapp_desc_setup,
+ .set_slave = audmapp_set_slave,
+ .start_xfer = audmapp_start_xfer,
+ .embedded_desc = audmapp_embedded_desc,
+ .setup_xfer = audmapp_setup_xfer,
+ .slave_addr = audmapp_slave_addr,
+ .channel_busy = audmapp_channel_busy,
+ .desc_completed = audmapp_desc_completed,
+};
+
+static int audmapp_chan_probe(struct platform_device *pdev,
+ struct audmapp_device *audev, int id)
+{
+ struct shdma_dev *sdev = &audev->shdma_dev;
+ struct audmapp_chan *auchan;
+ struct shdma_chan *schan;
+ struct device *dev = audev->dev;
+
+ auchan = devm_kzalloc(dev, sizeof(*auchan), GFP_KERNEL);
+ if (!auchan)
+ return -ENOMEM;
+
+ schan = &auchan->shdma_chan;
+ schan->max_xfer_len = AUDMAPP_LEN_MAX;
+
+ shdma_chan_probe(sdev, schan, id);
+
+ auchan->base = audev->chan_reg + 0x20 + (0x10 * id);
+ dev_dbg(dev, "%02d : %p / %p", id, auchan->base, audev->chan_reg);
+
+ return 0;
+}
+
+static void audmapp_chan_remove(struct audmapp_device *audev)
+{
+ struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
+ struct shdma_chan *schan;
+ int i;
+
+ shdma_for_each_chan(schan, &audev->shdma_dev, i) {
+ BUG_ON(!schan);
+ shdma_chan_remove(schan);
+ }
+ dma_dev->chancnt = 0;
+}
+
+static int audmapp_probe(struct platform_device *pdev)
+{
+ struct audmapp_pdata *pdata = pdev->dev.platform_data;
+ struct audmapp_device *audev;
+ struct shdma_dev *sdev;
+ struct dma_device *dma_dev;
+ struct resource *res;
+ int err, i;
+
+ if (!pdata)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ audev = devm_kzalloc(&pdev->dev, sizeof(*audev), GFP_KERNEL);
+ if (!audev)
+ return -ENOMEM;
+
+ audev->dev = &pdev->dev;
+ audev->pdata = pdata;
+ audev->chan_reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(audev->chan_reg))
+ return PTR_ERR(audev->chan_reg);
+
+ sdev = &audev->shdma_dev;
+ sdev->ops = &audmapp_shdma_ops;
+ sdev->desc_size = sizeof(struct shdma_desc);
+
+ dma_dev = &sdev->dma_dev;
+ dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ err = shdma_init(&pdev->dev, sdev, AUDMAPP_MAX_CHANNELS);
+ if (err < 0)
+ return err;
+
+ platform_set_drvdata(pdev, audev);
+
+ /* Create DMA Channel */
+ for (i = 0; i < AUDMAPP_MAX_CHANNELS; i++) {
+ err = audmapp_chan_probe(pdev, audev, i);
+ if (err)
+ goto chan_probe_err;
+ }
+
+ err = dma_async_device_register(dma_dev);
+ if (err < 0)
+ goto chan_probe_err;
+
+ return err;
+
+chan_probe_err:
+ audmapp_chan_remove(audev);
+ shdma_cleanup(sdev);
+
+ return err;
+}
+
+static int audmapp_remove(struct platform_device *pdev)
+{
+ struct audmapp_device *audev = platform_get_drvdata(pdev);
+ struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+
+ audmapp_chan_remove(audev);
+ shdma_cleanup(&audev->shdma_dev);
+
+ return 0;
+}
+
+static struct platform_driver audmapp_driver = {
+ .probe = audmapp_probe,
+ .remove = audmapp_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "rcar-audmapp-engine",
+ },
+};
+module_platform_driver(audmapp_driver);
+
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_DESCRIPTION("Renesas R-Car Audio DMAC peri-peri driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index 3083d901a41..b212d9471ab 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -18,6 +18,7 @@
#include <linux/dmaengine.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 2e7b394def8..b35007e21e6 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -73,8 +73,7 @@ static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct shdma_desc *chunk, *c, *desc =
- container_of(tx, struct shdma_desc, async_tx),
- *last = desc;
+ container_of(tx, struct shdma_desc, async_tx);
struct shdma_chan *schan = to_shdma_chan(tx->chan);
dma_async_tx_callback callback = tx->callback;
dma_cookie_t cookie;
@@ -98,19 +97,20 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
&chunk->node == &schan->ld_free))
break;
chunk->mark = DESC_SUBMITTED;
- /* Callback goes to the last chunk */
- chunk->async_tx.callback = NULL;
+ if (chunk->chunks == 1) {
+ chunk->async_tx.callback = callback;
+ chunk->async_tx.callback_param = tx->callback_param;
+ } else {
+ /* Callback goes to the last chunk */
+ chunk->async_tx.callback = NULL;
+ }
chunk->cookie = cookie;
list_move_tail(&chunk->node, &schan->ld_queue);
- last = chunk;
dev_dbg(schan->dev, "submit #%d@%p on %d\n",
- tx->cookie, &last->async_tx, schan->id);
+ tx->cookie, &chunk->async_tx, schan->id);
}
- last->async_tx.callback = callback;
- last->async_tx.callback_param = tx->callback_param;
-
if (power_up) {
int ret;
schan->pm_state = SHDMA_PM_BUSY;
@@ -227,7 +227,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
struct shdma_chan *schan = to_shdma_chan(chan);
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
- int match = (int)arg;
+ int match = (long)arg;
int ret;
if (match < 0)
@@ -304,6 +304,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
dma_async_tx_callback callback = NULL;
void *param = NULL;
unsigned long flags;
+ LIST_HEAD(cyclic_list);
spin_lock_irqsave(&schan->chan_lock, flags);
list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
@@ -369,10 +370,16 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
if (((desc->mark == DESC_COMPLETED ||
desc->mark == DESC_WAITING) &&
async_tx_test_ack(&desc->async_tx)) || all) {
- /* Remove from ld_queue list */
- desc->mark = DESC_IDLE;
- list_move(&desc->node, &schan->ld_free);
+ if (all || !desc->cyclic) {
+ /* Remove from ld_queue list */
+ desc->mark = DESC_IDLE;
+ list_move(&desc->node, &schan->ld_free);
+ } else {
+ /* reuse as cyclic */
+ desc->mark = DESC_SUBMITTED;
+ list_move_tail(&desc->node, &cyclic_list);
+ }
if (list_empty(&schan->ld_queue)) {
dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
@@ -389,6 +396,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
*/
schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
+ list_splice_tail(&cyclic_list, &schan->ld_queue);
+
spin_unlock_irqrestore(&schan->chan_lock, flags);
if (callback)
@@ -491,8 +500,8 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
}
dev_dbg(schan->dev,
- "chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
- copy_size, *len, *src, *dst, &new->async_tx,
+ "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
+ copy_size, *len, src, dst, &new->async_tx,
new->async_tx.cookie);
new->mark = DESC_PREPARED;
@@ -521,7 +530,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
*/
static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
- enum dma_transfer_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
{
struct scatterlist *sg;
struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
@@ -555,8 +564,8 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
goto err_get_desc;
do {
- dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n",
- i, sg, len, (unsigned long long)sg_addr);
+ dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
+ i, sg, len, &sg_addr);
if (direction == DMA_DEV_TO_MEM)
new = shdma_add_desc(schan, flags,
@@ -569,7 +578,11 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
if (!new)
goto err_get_desc;
- new->chunks = chunks--;
+ new->cyclic = cyclic;
+ if (cyclic)
+ new->chunks = 1;
+ else
+ new->chunks = chunks--;
list_add_tail(&new->node, &tx_list);
} while (len);
}
@@ -612,7 +625,8 @@ static struct dma_async_tx_descriptor *shdma_prep_memcpy(
sg_dma_address(&sg) = dma_src;
sg_dma_len(&sg) = len;
- return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags);
+ return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
+ flags, false);
}
static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
@@ -640,7 +654,58 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
slave_addr = ops->slave_addr(schan);
return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
- direction, flags);
+ direction, flags, false);
+}
+
+#define SHDMA_MAX_SG_LEN 32
+
+static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ unsigned int sg_len = buf_len / period_len;
+ int slave_id = schan->slave_id;
+ dma_addr_t slave_addr;
+ struct scatterlist sgl[SHDMA_MAX_SG_LEN];
+ int i;
+
+ if (!chan)
+ return NULL;
+
+ BUG_ON(!schan->desc_num);
+
+ if (sg_len > SHDMA_MAX_SG_LEN) {
+ dev_err(schan->dev, "sg length %d exceds limit %d",
+ sg_len, SHDMA_MAX_SG_LEN);
+ return NULL;
+ }
+
+ /* Someone calling slave DMA on a generic channel? */
+ if (slave_id < 0 || (buf_len < period_len)) {
+ dev_warn(schan->dev,
+ "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
+ __func__, buf_len, period_len, slave_id);
+ return NULL;
+ }
+
+ slave_addr = ops->slave_addr(schan);
+
+ sg_init_table(sgl, sg_len);
+ for (i = 0; i < sg_len; i++) {
+ dma_addr_t src = buf_addr + (period_len * i);
+
+ sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
+ offset_in_page(src));
+ sg_dma_address(&sgl[i]) = src;
+ sg_dma_len(&sgl[i]) = period_len;
+ }
+
+ return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
+ direction, flags, true);
}
static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -915,6 +980,7 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev,
/* Compulsory for DMA_SLAVE fields */
dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
+ dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
dma_dev->device_control = shdma_control;
dma_dev->dev = dev;
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index 06473a05fe4..b4ff9d3e56d 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -33,7 +33,8 @@ static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
/* Only slave DMA channels can be allocated via DT */
dma_cap_set(DMA_SLAVE, mask);
- chan = dma_request_channel(mask, shdma_chan_filter, (void *)id);
+ chan = dma_request_channel(mask, shdma_chan_filter,
+ (void *)(uintptr_t)id);
if (chan)
to_shdma_chan(chan)->hw_req = id;
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 0d765c0e21e..146d5df926d 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -18,21 +18,22 @@
*
*/
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kdebug.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/rculist.h>
#include <linux/sh_dma.h>
-#include <linux/notifier.h>
-#include <linux/kdebug.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/rculist.h>
#include "../dmaengine.h"
#include "shdma.h"
@@ -443,6 +444,7 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
return ret;
}
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
static irqreturn_t sh_dmae_err(int irq, void *data)
{
struct sh_dmae_device *shdev = data;
@@ -453,6 +455,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
sh_dmae_reset(shdev);
return IRQ_HANDLED;
}
+#endif
static bool sh_dmae_desc_completed(struct shdma_chan *schan,
struct shdma_desc *sdesc)
@@ -637,7 +640,7 @@ static int sh_dmae_resume(struct device *dev)
#define sh_dmae_resume NULL
#endif
-const struct dev_pm_ops sh_dmae_pm = {
+static const struct dev_pm_ops sh_dmae_pm = {
.suspend = sh_dmae_suspend,
.resume = sh_dmae_resume,
.runtime_suspend = sh_dmae_runtime_suspend,
@@ -685,9 +688,12 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
static int sh_dmae_probe(struct platform_device *pdev)
{
const struct sh_dmae_pdata *pdata;
- unsigned long irqflags = 0,
- chan_flag[SH_DMAE_MAX_CHANNELS] = {};
- int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
+ unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
+ int chan_irq[SH_DMAE_MAX_CHANNELS];
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
+ unsigned long irqflags = 0;
+ int errirq;
+#endif
int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
struct sh_dmae_device *shdev;
struct dma_device *dma_dev;
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index c7e9cdff070..3ce10390989 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -14,12 +14,13 @@
* published by the Free Software Foundation.
*/
+#include <linux/dmaengine.h>
+#include <linux/err.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/sudmac.h>
struct sudmac_chan {
@@ -178,8 +179,8 @@ static int sudmac_desc_setup(struct shdma_chan *schan,
struct sudmac_chan *sc = to_chan(schan);
struct sudmac_desc *sd = to_desc(sdesc);
- dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n",
- __func__, src, dst, *len);
+ dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n",
+ __func__, &src, &dst, *len);
if (*len > schan->max_xfer_len)
*len = schan->max_xfer_len;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index d4d3a3109b1..03f7820fa33 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -18,6 +18,7 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
+#include <linux/of_dma.h>
#include <linux/sirfsoc_dma.h>
#include "dmaengine.h"
@@ -659,6 +660,18 @@ static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
return 0;
}
+static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct sirfsoc_dma *sdma = ofdma->of_dma_data;
+ unsigned int request = dma_spec->args[0];
+
+ if (request >= SIRFSOC_DMA_CHANNELS)
+ return NULL;
+
+ return dma_get_slave_channel(&sdma->channels[request].chan);
+}
+
static int sirfsoc_dma_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
@@ -764,11 +777,20 @@ static int sirfsoc_dma_probe(struct platform_device *op)
if (ret)
goto free_irq;
+ /* Device-tree DMA controller registration */
+ ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
+ if (ret) {
+ dev_err(dev, "failed to register DMA controller\n");
+ goto unreg_dma_dev;
+ }
+
pm_runtime_enable(&op->dev);
dev_info(dev, "initialized SIRFSOC DMAC driver\n");
return 0;
+unreg_dma_dev:
+ dma_async_device_unregister(dma);
free_irq:
free_irq(sdma->irq, sdma);
irq_dispose:
@@ -781,6 +803,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
struct device *dev = &op->dev;
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ of_dma_controller_free(op->dev.of_node);
dma_async_device_unregister(&sdma->dma);
free_irq(sdma->irq, sdma);
irq_dispose_mapping(sdma->irq);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 00a2de957b2..c7984459ede 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -556,7 +556,6 @@ struct d40_gen_dmac {
* later
* @reg_val_backup_chan: Backup data for standard channel parameter registers.
* @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
- * @initialized: true if the dma has been initialized
* @gen_dmac: the struct for generic registers values to represent u8500/8540
* DMA controller
*/
@@ -594,7 +593,6 @@ struct d40_base {
u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
u32 *reg_val_backup_chan;
u16 gcc_pwr_off_mask;
- bool initialized;
struct d40_gen_dmac gen_dmac;
};
@@ -1056,62 +1054,6 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
return len;
}
-
-#ifdef CONFIG_PM
-static void dma40_backup(void __iomem *baseaddr, u32 *backup,
- u32 *regaddr, int num, bool save)
-{
- int i;
-
- for (i = 0; i < num; i++) {
- void __iomem *addr = baseaddr + regaddr[i];
-
- if (save)
- backup[i] = readl_relaxed(addr);
- else
- writel_relaxed(backup[i], addr);
- }
-}
-
-static void d40_save_restore_registers(struct d40_base *base, bool save)
-{
- int i;
-
- /* Save/Restore channel specific registers */
- for (i = 0; i < base->num_phy_chans; i++) {
- void __iomem *addr;
- int idx;
-
- if (base->phy_res[i].reserved)
- continue;
-
- addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
- idx = i * ARRAY_SIZE(d40_backup_regs_chan);
-
- dma40_backup(addr, &base->reg_val_backup_chan[idx],
- d40_backup_regs_chan,
- ARRAY_SIZE(d40_backup_regs_chan),
- save);
- }
-
- /* Save/Restore global registers */
- dma40_backup(base->virtbase, base->reg_val_backup,
- d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
- save);
-
- /* Save/Restore registers only existing on dma40 v3 and later */
- if (base->gen_dmac.backup)
- dma40_backup(base->virtbase, base->reg_val_backup_v4,
- base->gen_dmac.backup,
- base->gen_dmac.backup_size,
- save);
-}
-#else
-static void d40_save_restore_registers(struct d40_base *base, bool save)
-{
-}
-#endif
-
static int __d40_execute_command_phy(struct d40_chan *d40c,
enum d40_command command)
{
@@ -1495,8 +1437,8 @@ static int d40_pause(struct d40_chan *d40c)
if (!d40c->busy)
return 0;
- pm_runtime_get_sync(d40c->base->dev);
spin_lock_irqsave(&d40c->lock, flags);
+ pm_runtime_get_sync(d40c->base->dev);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
@@ -1641,6 +1583,7 @@ static void dma_tasklet(unsigned long data)
struct d40_chan *d40c = (struct d40_chan *) data;
struct d40_desc *d40d;
unsigned long flags;
+ bool callback_active;
dma_async_tx_callback callback;
void *callback_param;
@@ -1668,6 +1611,7 @@ static void dma_tasklet(unsigned long data)
}
/* Callback to client */
+ callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
callback = d40d->txd.callback;
callback_param = d40d->txd.callback_param;
@@ -1690,7 +1634,7 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&d40c->lock, flags);
- if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
+ if (callback_active && callback)
callback(callback_param);
return;
@@ -2996,18 +2940,88 @@ failure1:
}
/* Suspend resume functionality */
-#ifdef CONFIG_PM
-static int dma40_pm_suspend(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int dma40_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev);
- int ret = 0;
+ int ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
if (base->lcpa_regulator)
ret = regulator_disable(base->lcpa_regulator);
return ret;
}
+static int dma40_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (base->lcpa_regulator) {
+ ret = regulator_enable(base->lcpa_regulator);
+ if (ret)
+ return ret;
+ }
+
+ return pm_runtime_force_resume(dev);
+}
+#endif
+
+#ifdef CONFIG_PM
+static void dma40_backup(void __iomem *baseaddr, u32 *backup,
+ u32 *regaddr, int num, bool save)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ void __iomem *addr = baseaddr + regaddr[i];
+
+ if (save)
+ backup[i] = readl_relaxed(addr);
+ else
+ writel_relaxed(backup[i], addr);
+ }
+}
+
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+ int i;
+
+ /* Save/Restore channel specific registers */
+ for (i = 0; i < base->num_phy_chans; i++) {
+ void __iomem *addr;
+ int idx;
+
+ if (base->phy_res[i].reserved)
+ continue;
+
+ addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
+ idx = i * ARRAY_SIZE(d40_backup_regs_chan);
+
+ dma40_backup(addr, &base->reg_val_backup_chan[idx],
+ d40_backup_regs_chan,
+ ARRAY_SIZE(d40_backup_regs_chan),
+ save);
+ }
+
+ /* Save/Restore global registers */
+ dma40_backup(base->virtbase, base->reg_val_backup,
+ d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
+ save);
+
+ /* Save/Restore registers only existing on dma40 v3 and later */
+ if (base->gen_dmac.backup)
+ dma40_backup(base->virtbase, base->reg_val_backup_v4,
+ base->gen_dmac.backup,
+ base->gen_dmac.backup_size,
+ save);
+}
+
static int dma40_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -3028,36 +3042,20 @@ static int dma40_runtime_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev);
- if (base->initialized)
- d40_save_restore_registers(base, false);
+ d40_save_restore_registers(base, false);
writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
base->virtbase + D40_DREG_GCC);
return 0;
}
-
-static int dma40_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct d40_base *base = platform_get_drvdata(pdev);
- int ret = 0;
-
- if (base->lcpa_regulator)
- ret = regulator_enable(base->lcpa_regulator);
-
- return ret;
-}
+#endif
static const struct dev_pm_ops dma40_pm_ops = {
- .suspend = dma40_pm_suspend,
- .runtime_suspend = dma40_runtime_suspend,
- .runtime_resume = dma40_runtime_resume,
- .resume = dma40_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
+ SET_PM_RUNTIME_PM_OPS(dma40_runtime_suspend,
+ dma40_runtime_resume,
+ NULL)
};
-#define DMA40_PM_OPS (&dma40_pm_ops)
-#else
-#define DMA40_PM_OPS NULL
-#endif
/* Initialization functions. */
@@ -3643,12 +3641,6 @@ static int __init d40_probe(struct platform_device *pdev)
goto failure;
}
- pm_runtime_irq_safe(base->dev);
- pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(base->dev);
- pm_runtime_enable(base->dev);
- pm_runtime_resume(base->dev);
-
if (base->plat_data->use_esram_lcla) {
base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
@@ -3669,7 +3661,15 @@ static int __init d40_probe(struct platform_device *pdev)
}
}
- base->initialized = true;
+ writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
+
+ pm_runtime_irq_safe(base->dev);
+ pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(base->dev);
+ pm_runtime_mark_last_busy(base->dev);
+ pm_runtime_set_active(base->dev);
+ pm_runtime_enable(base->dev);
+
ret = d40_dmaengine_init(base, num_reserved_chans);
if (ret)
goto failure;
@@ -3752,7 +3752,7 @@ static struct platform_driver d40_driver = {
.driver = {
.owner = THIS_MODULE,
.name = D40_NAME,
- .pm = DMA40_PM_OPS,
+ .pm = &dma40_pm_ops,
.of_match_table = d40_match,
},
};
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
new file mode 100644
index 00000000000..3c4e9f2fea2
--- /dev/null
+++ b/drivers/dma/xilinx/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
new file mode 100644
index 00000000000..42a13e8d460
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -0,0 +1,1379 @@
+/*
+ * DMA driver for Xilinx Video DMA Engine
+ *
+ * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
+ *
+ * Based on the Freescale DMA driver.
+ *
+ * Description:
+ * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
+ * core that provides high-bandwidth direct memory access between memory
+ * and AXI4-Stream type video target peripherals. The core provides efficient
+ * two dimensional DMA operations with independent asynchronous read (S2MM)
+ * and write (MM2S) channel operation. It can be configured to have either
+ * one channel or two channels. If configured as two channels, one is to
+ * transmit to the video device (MM2S) and another is to receive from the
+ * video device (S2MM). Initialization, status, interrupt and management
+ * registers are accessed through an AXI4-Lite slave interface.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/amba/xilinx_dma.h>
+#include <linux/bitops.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include "../dmaengine.h"
+
+/* Register/Descriptor Offsets */
+#define XILINX_VDMA_MM2S_CTRL_OFFSET 0x0000
+#define XILINX_VDMA_S2MM_CTRL_OFFSET 0x0030
+#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
+#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
+
+/* Control Registers */
+#define XILINX_VDMA_REG_DMACR 0x0000
+#define XILINX_VDMA_DMACR_DELAY_MAX 0xff
+#define XILINX_VDMA_DMACR_DELAY_SHIFT 24
+#define XILINX_VDMA_DMACR_FRAME_COUNT_MAX 0xff
+#define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT 16
+#define XILINX_VDMA_DMACR_ERR_IRQ BIT(14)
+#define XILINX_VDMA_DMACR_DLY_CNT_IRQ BIT(13)
+#define XILINX_VDMA_DMACR_FRM_CNT_IRQ BIT(12)
+#define XILINX_VDMA_DMACR_MASTER_SHIFT 8
+#define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT 5
+#define XILINX_VDMA_DMACR_FRAMECNT_EN BIT(4)
+#define XILINX_VDMA_DMACR_GENLOCK_EN BIT(3)
+#define XILINX_VDMA_DMACR_RESET BIT(2)
+#define XILINX_VDMA_DMACR_CIRC_EN BIT(1)
+#define XILINX_VDMA_DMACR_RUNSTOP BIT(0)
+#define XILINX_VDMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
+
+#define XILINX_VDMA_REG_DMASR 0x0004
+#define XILINX_VDMA_DMASR_EOL_LATE_ERR BIT(15)
+#define XILINX_VDMA_DMASR_ERR_IRQ BIT(14)
+#define XILINX_VDMA_DMASR_DLY_CNT_IRQ BIT(13)
+#define XILINX_VDMA_DMASR_FRM_CNT_IRQ BIT(12)
+#define XILINX_VDMA_DMASR_SOF_LATE_ERR BIT(11)
+#define XILINX_VDMA_DMASR_SG_DEC_ERR BIT(10)
+#define XILINX_VDMA_DMASR_SG_SLV_ERR BIT(9)
+#define XILINX_VDMA_DMASR_EOF_EARLY_ERR BIT(8)
+#define XILINX_VDMA_DMASR_SOF_EARLY_ERR BIT(7)
+#define XILINX_VDMA_DMASR_DMA_DEC_ERR BIT(6)
+#define XILINX_VDMA_DMASR_DMA_SLAVE_ERR BIT(5)
+#define XILINX_VDMA_DMASR_DMA_INT_ERR BIT(4)
+#define XILINX_VDMA_DMASR_IDLE BIT(1)
+#define XILINX_VDMA_DMASR_HALTED BIT(0)
+#define XILINX_VDMA_DMASR_DELAY_MASK GENMASK(31, 24)
+#define XILINX_VDMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
+
+#define XILINX_VDMA_REG_CURDESC 0x0008
+#define XILINX_VDMA_REG_TAILDESC 0x0010
+#define XILINX_VDMA_REG_REG_INDEX 0x0014
+#define XILINX_VDMA_REG_FRMSTORE 0x0018
+#define XILINX_VDMA_REG_THRESHOLD 0x001c
+#define XILINX_VDMA_REG_FRMPTR_STS 0x0024
+#define XILINX_VDMA_REG_PARK_PTR 0x0028
+#define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT 8
+#define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT 0
+#define XILINX_VDMA_REG_VDMA_VERSION 0x002c
+
+/* Register Direct Mode Registers */
+#define XILINX_VDMA_REG_VSIZE 0x0000
+#define XILINX_VDMA_REG_HSIZE 0x0004
+
+#define XILINX_VDMA_REG_FRMDLY_STRIDE 0x0008
+#define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
+#define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
+
+#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
+
+/* HW specific definitions */
+#define XILINX_VDMA_MAX_CHANS_PER_DEVICE 0x2
+
+#define XILINX_VDMA_DMAXR_ALL_IRQ_MASK \
+ (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \
+ XILINX_VDMA_DMASR_DLY_CNT_IRQ | \
+ XILINX_VDMA_DMASR_ERR_IRQ)
+
+#define XILINX_VDMA_DMASR_ALL_ERR_MASK \
+ (XILINX_VDMA_DMASR_EOL_LATE_ERR | \
+ XILINX_VDMA_DMASR_SOF_LATE_ERR | \
+ XILINX_VDMA_DMASR_SG_DEC_ERR | \
+ XILINX_VDMA_DMASR_SG_SLV_ERR | \
+ XILINX_VDMA_DMASR_EOF_EARLY_ERR | \
+ XILINX_VDMA_DMASR_SOF_EARLY_ERR | \
+ XILINX_VDMA_DMASR_DMA_DEC_ERR | \
+ XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \
+ XILINX_VDMA_DMASR_DMA_INT_ERR)
+
+/*
+ * Recoverable errors are DMA Internal error, SOF Early, EOF Early
+ * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
+ * is enabled in the h/w system.
+ */
+#define XILINX_VDMA_DMASR_ERR_RECOVER_MASK \
+ (XILINX_VDMA_DMASR_SOF_LATE_ERR | \
+ XILINX_VDMA_DMASR_EOF_EARLY_ERR | \
+ XILINX_VDMA_DMASR_SOF_EARLY_ERR | \
+ XILINX_VDMA_DMASR_DMA_INT_ERR)
+
+/* Axi VDMA Flush on Fsync bits */
+#define XILINX_VDMA_FLUSH_S2MM 3
+#define XILINX_VDMA_FLUSH_MM2S 2
+#define XILINX_VDMA_FLUSH_BOTH 1
+
+/* Delay loop counter to prevent hardware failure */
+#define XILINX_VDMA_LOOP_COUNT 1000000
+
+/**
+ * struct xilinx_vdma_desc_hw - Hardware Descriptor
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @pad1: Reserved @0x04
+ * @buf_addr: Buffer address @0x08
+ * @pad2: Reserved @0x0C
+ * @vsize: Vertical Size @0x10
+ * @hsize: Horizontal Size @0x14
+ * @stride: Number of bytes between the first
+ * pixels of each horizontal line @0x18
+ */
+struct xilinx_vdma_desc_hw {
+ u32 next_desc;
+ u32 pad1;
+ u32 buf_addr;
+ u32 pad2;
+ u32 vsize;
+ u32 hsize;
+ u32 stride;
+} __aligned(64);
+
+/**
+ * struct xilinx_vdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_vdma_tx_segment {
+ struct xilinx_vdma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_vdma_tx_descriptor - Per Transaction structure
+ * @async_tx: Async transaction descriptor
+ * @segments: TX segments list
+ * @node: Node in the channel descriptors list
+ */
+struct xilinx_vdma_tx_descriptor {
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head segments;
+ struct list_head node;
+};
+
+/**
+ * struct xilinx_vdma_chan - Driver specific VDMA channel structure
+ * @xdev: Driver specific device structure
+ * @ctrl_offset: Control registers offset
+ * @desc_offset: TX descriptor registers offset
+ * @lock: Descriptor operation lock
+ * @pending_list: Descriptors waiting
+ * @active_desc: Active descriptor
+ * @allocated_desc: Allocated descriptor
+ * @done_list: Complete descriptors
+ * @common: DMA common channel
+ * @desc_pool: Descriptors pool
+ * @dev: The dma device
+ * @irq: Channel IRQ
+ * @id: Channel ID
+ * @direction: Transfer direction
+ * @num_frms: Number of frames
+ * @has_sg: Support scatter transfers
+ * @genlock: Support genlock mode
+ * @err: Channel has errors
+ * @tasklet: Cleanup work after irq
+ * @config: Device configuration info
+ * @flush_on_fsync: Flush on Frame sync
+ */
+struct xilinx_vdma_chan {
+ struct xilinx_vdma_device *xdev;
+ u32 ctrl_offset;
+ u32 desc_offset;
+ spinlock_t lock;
+ struct list_head pending_list;
+ struct xilinx_vdma_tx_descriptor *active_desc;
+ struct xilinx_vdma_tx_descriptor *allocated_desc;
+ struct list_head done_list;
+ struct dma_chan common;
+ struct dma_pool *desc_pool;
+ struct device *dev;
+ int irq;
+ int id;
+ enum dma_transfer_direction direction;
+ int num_frms;
+ bool has_sg;
+ bool genlock;
+ bool err;
+ struct tasklet_struct tasklet;
+ struct xilinx_vdma_config config;
+ bool flush_on_fsync;
+};
+
+/**
+ * struct xilinx_vdma_device - VDMA device structure
+ * @regs: I/O mapped base address
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific VDMA channel
+ * @has_sg: Specifies whether Scatter-Gather is present or not
+ * @flush_on_fsync: Flush on frame sync
+ */
+struct xilinx_vdma_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE];
+ bool has_sg;
+ u32 flush_on_fsync;
+};
+
+/* Macros */
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct xilinx_vdma_chan, common)
+#define to_vdma_tx_descriptor(tx) \
+ container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
+
+/* IO accessors */
+static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg)
+{
+ return ioread32(chan->xdev->regs + reg);
+}
+
+static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value)
+{
+ iowrite32(value, chan->xdev->regs + reg);
+}
+
+static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg,
+ u32 value)
+{
+ vdma_write(chan, chan->desc_offset + reg, value);
+}
+
+static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg)
+{
+ return vdma_read(chan, chan->ctrl_offset + reg);
+}
+
+static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg,
+ u32 value)
+{
+ vdma_write(chan, chan->ctrl_offset + reg, value);
+}
+
+static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg,
+ u32 clr)
+{
+ vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr);
+}
+
+static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg,
+ u32 set)
+{
+ vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set);
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors and segments alloc and free
+ */
+
+/**
+ * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific VDMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_vdma_tx_segment *
+xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan)
+{
+ struct xilinx_vdma_tx_segment *segment;
+ dma_addr_t phys;
+
+ segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ if (!segment)
+ return NULL;
+
+ memset(segment, 0, sizeof(*segment));
+ segment->phys = phys;
+
+ return segment;
+}
+
+/**
+ * xilinx_vdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific VDMA channel
+ * @segment: VDMA transaction segment
+ */
+static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan,
+ struct xilinx_vdma_tx_segment *segment)
+{
+ dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_vdma_tx_descriptor - Allocate transaction descriptor
+ * @chan: Driver specific VDMA channel
+ *
+ * Return: The allocated descriptor on success and NULL on failure.
+ */
+static struct xilinx_vdma_tx_descriptor *
+xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
+{
+ struct xilinx_vdma_tx_descriptor *desc;
+ unsigned long flags;
+
+ if (chan->allocated_desc)
+ return chan->allocated_desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->allocated_desc = desc;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ INIT_LIST_HEAD(&desc->segments);
+
+ return desc;
+}
+
+/**
+ * xilinx_vdma_free_tx_descriptor - Free transaction descriptor
+ * @chan: Driver specific VDMA channel
+ * @desc: VDMA transaction descriptor
+ */
+static void
+xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan,
+ struct xilinx_vdma_tx_descriptor *desc)
+{
+ struct xilinx_vdma_tx_segment *segment, *next;
+
+ if (!desc)
+ return;
+
+ list_for_each_entry_safe(segment, next, &desc->segments, node) {
+ list_del(&segment->node);
+ xilinx_vdma_free_tx_segment(chan, segment);
+ }
+
+ kfree(desc);
+}
+
+/* Required functions */
+
+/**
+ * xilinx_vdma_free_desc_list - Free descriptors list
+ * @chan: Driver specific VDMA channel
+ * @list: List to parse and delete the descriptor
+ */
+static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan,
+ struct list_head *list)
+{
+ struct xilinx_vdma_tx_descriptor *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node) {
+ list_del(&desc->node);
+ xilinx_vdma_free_tx_descriptor(chan, desc);
+ }
+}
+
+/**
+ * xilinx_vdma_free_descriptors - Free channel descriptors
+ * @chan: Driver specific VDMA channel
+ */
+static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xilinx_vdma_free_desc_list(chan, &chan->pending_list);
+ xilinx_vdma_free_desc_list(chan, &chan->done_list);
+
+ xilinx_vdma_free_tx_descriptor(chan, chan->active_desc);
+ chan->active_desc = NULL;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_vdma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel
+ */
+static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+
+ dev_dbg(chan->dev, "Free all channel resources.\n");
+
+ xilinx_vdma_free_descriptors(chan);
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+}
+
+/**
+ * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific VDMA channel
+ */
+static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan)
+{
+ struct xilinx_vdma_tx_descriptor *desc, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ /* Remove from the list of running transactions */
+ list_del(&desc->node);
+
+ /* Run the link descriptor callback function */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ /* Run any dependencies, then free the descriptor */
+ dma_run_dependencies(&desc->async_tx);
+ xilinx_vdma_free_tx_descriptor(chan, desc);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_vdma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx VDMA channel structure
+ */
+static void xilinx_vdma_do_tasklet(unsigned long data)
+{
+ struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data;
+
+ xilinx_vdma_chan_desc_cleanup(chan);
+}
+
+/**
+ * xilinx_vdma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+
+ /* Has this channel already been allocated? */
+ if (chan->desc_pool)
+ return 0;
+
+ /*
+ * We need the descriptor to be aligned to 64bytes
+ * for meeting Xilinx VDMA specification requirement.
+ */
+ chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
+ chan->dev,
+ sizeof(struct xilinx_vdma_tx_segment),
+ __alignof__(struct xilinx_vdma_tx_segment), 0);
+ if (!chan->desc_pool) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptor pool\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ dma_cookie_init(dchan);
+ return 0;
+}
+
+/**
+ * xilinx_vdma_tx_status - Get VDMA transaction status
+ * @dchan: DMA channel
+ * @cookie: Transaction identifier
+ * @txstate: Transaction state
+ *
+ * Return: DMA transaction status
+ */
+static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+/**
+ * xilinx_vdma_is_running - Check if VDMA channel is running
+ * @chan: Driver specific VDMA channel
+ *
+ * Return: '1' if running, '0' if not.
+ */
+static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan)
+{
+ return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
+ XILINX_VDMA_DMASR_HALTED) &&
+ (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
+ XILINX_VDMA_DMACR_RUNSTOP);
+}
+
+/**
+ * xilinx_vdma_is_idle - Check if VDMA channel is idle
+ * @chan: Driver specific VDMA channel
+ *
+ * Return: '1' if idle, '0' if not.
+ */
+static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
+{
+ return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
+ XILINX_VDMA_DMASR_IDLE;
+}
+
+/**
+ * xilinx_vdma_halt - Halt VDMA channel
+ * @chan: Driver specific VDMA channel
+ */
+static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
+{
+ int loop = XILINX_VDMA_LOOP_COUNT;
+
+ vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
+
+ /* Wait for the hardware to halt */
+ do {
+ if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
+ XILINX_VDMA_DMASR_HALTED)
+ break;
+ } while (loop--);
+
+ if (!loop) {
+ dev_err(chan->dev, "Cannot stop channel %p: %x\n",
+ chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
+ chan->err = true;
+ }
+
+ return;
+}
+
+/**
+ * xilinx_vdma_start - Start VDMA channel
+ * @chan: Driver specific VDMA channel
+ */
+static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
+{
+ int loop = XILINX_VDMA_LOOP_COUNT;
+
+ vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
+
+ /* Wait for the hardware to start */
+ do {
+ if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
+ XILINX_VDMA_DMASR_HALTED))
+ break;
+ } while (loop--);
+
+ if (!loop) {
+ dev_err(chan->dev, "Cannot start channel %p: %x\n",
+ chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
+
+ chan->err = true;
+ }
+
+ return;
+}
+
+/**
+ * xilinx_vdma_start_transfer - Starts VDMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
+{
+ struct xilinx_vdma_config *config = &chan->config;
+ struct xilinx_vdma_tx_descriptor *desc;
+ unsigned long flags;
+ u32 reg;
+ struct xilinx_vdma_tx_segment *head, *tail = NULL;
+
+ if (chan->err)
+ return;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* There's already an active descriptor, bail out. */
+ if (chan->active_desc)
+ goto out_unlock;
+
+ if (list_empty(&chan->pending_list))
+ goto out_unlock;
+
+ desc = list_first_entry(&chan->pending_list,
+ struct xilinx_vdma_tx_descriptor, node);
+
+ /* If it is SG mode and hardware is busy, cannot submit */
+ if (chan->has_sg && xilinx_vdma_is_running(chan) &&
+ !xilinx_vdma_is_idle(chan)) {
+ dev_dbg(chan->dev, "DMA controller still busy\n");
+ goto out_unlock;
+ }
+
+ /*
+ * If hardware is idle, then all descriptors on the running lists are
+ * done, start new transfers
+ */
+ if (chan->has_sg) {
+ head = list_first_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ tail = list_entry(desc->segments.prev,
+ struct xilinx_vdma_tx_segment, node);
+
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys);
+ }
+
+ /* Configure the hardware using info in the config structure */
+ reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
+
+ if (config->frm_cnt_en)
+ reg |= XILINX_VDMA_DMACR_FRAMECNT_EN;
+ else
+ reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN;
+
+ /*
+ * With SG, start with circular mode, so that BDs can be fetched.
+ * In direct register mode, if not parking, enable circular mode
+ */
+ if (chan->has_sg || !config->park)
+ reg |= XILINX_VDMA_DMACR_CIRC_EN;
+
+ if (config->park)
+ reg &= ~XILINX_VDMA_DMACR_CIRC_EN;
+
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg);
+
+ if (config->park && (config->park_frm >= 0) &&
+ (config->park_frm < chan->num_frms)) {
+ if (chan->direction == DMA_MEM_TO_DEV)
+ vdma_write(chan, XILINX_VDMA_REG_PARK_PTR,
+ config->park_frm <<
+ XILINX_VDMA_PARK_PTR_RD_REF_SHIFT);
+ else
+ vdma_write(chan, XILINX_VDMA_REG_PARK_PTR,
+ config->park_frm <<
+ XILINX_VDMA_PARK_PTR_WR_REF_SHIFT);
+ }
+
+ /* Start the hardware */
+ xilinx_vdma_start(chan);
+
+ if (chan->err)
+ goto out_unlock;
+
+ /* Start the transfer */
+ if (chan->has_sg) {
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys);
+ } else {
+ struct xilinx_vdma_tx_segment *segment, *last = NULL;
+ int i = 0;
+
+ list_for_each_entry(segment, &desc->segments, node) {
+ vdma_desc_write(chan,
+ XILINX_VDMA_REG_START_ADDRESS(i++),
+ segment->hw.buf_addr);
+ last = segment;
+ }
+
+ if (!last)
+ goto out_unlock;
+
+ /* HW expects these parameters to be same for one transaction */
+ vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
+ vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE,
+ last->hw.stride);
+ vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
+ }
+
+ list_del(&desc->node);
+ chan->active_desc = desc;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_vdma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel
+ */
+static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
+{
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_vdma_start_transfer(chan);
+}
+
+/**
+ * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete
+ * @chan : xilinx DMA channel
+ *
+ * CONTEXT: hardirq
+ */
+static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
+{
+ struct xilinx_vdma_tx_descriptor *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = chan->active_desc;
+ if (!desc) {
+ dev_dbg(chan->dev, "no running descriptors\n");
+ goto out_unlock;
+ }
+
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
+
+ chan->active_desc = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_vdma_reset - Reset VDMA channel
+ * @chan: Driver specific VDMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
+{
+ int loop = XILINX_VDMA_LOOP_COUNT;
+ u32 tmp;
+
+ vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
+
+ tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
+ XILINX_VDMA_DMACR_RESET;
+
+ /* Wait for the hardware to finish reset */
+ do {
+ tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
+ XILINX_VDMA_DMACR_RESET;
+ } while (loop-- && tmp);
+
+ if (!loop) {
+ dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
+ vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR),
+ vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
+ return -ETIMEDOUT;
+ }
+
+ chan->err = false;
+
+ return 0;
+}
+
+/**
+ * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts
+ * @chan: Driver specific VDMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan)
+{
+ int err;
+
+ /* Reset VDMA */
+ err = xilinx_vdma_reset(chan);
+ if (err)
+ return err;
+
+ /* Enable interrupts */
+ vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR,
+ XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
+
+ return 0;
+}
+
+/**
+ * xilinx_vdma_irq_handler - VDMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx VDMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
+{
+ struct xilinx_vdma_chan *chan = data;
+ u32 status;
+
+ /* Read the status and ack the interrupts. */
+ status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR);
+ if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK))
+ return IRQ_NONE;
+
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR,
+ status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
+
+ if (status & XILINX_VDMA_DMASR_ERR_IRQ) {
+ /*
+ * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
+ * error is recoverable, ignore it. Otherwise flag the error.
+ *
+ * Only recoverable errors can be cleared in the DMASR register,
+ * make sure not to write to other error bits to 1.
+ */
+ u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK;
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR,
+ errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK);
+
+ if (!chan->flush_on_fsync ||
+ (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) {
+ dev_err(chan->dev,
+ "Channel %p has errors %x, cdr %x tdr %x\n",
+ chan, errors,
+ vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC),
+ vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC));
+ chan->err = true;
+ }
+ }
+
+ if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) {
+ /*
+ * Device takes too long to do the transfer when user requires
+ * responsiveness.
+ */
+ dev_dbg(chan->dev, "Inter-packet latency too long\n");
+ }
+
+ if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
+ xilinx_vdma_complete_descriptor(chan);
+ xilinx_vdma_start_transfer(chan);
+ }
+
+ tasklet_schedule(&chan->tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_vdma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor
+ *
+ * Return: cookie value on success and failure value on error
+ */
+static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx);
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+ int err;
+
+ if (chan->err) {
+ /*
+ * If reset fails, need to hard reset the system.
+ * Channel is no longer functional
+ */
+ err = xilinx_vdma_chan_reset(chan);
+ if (err < 0)
+ return err;
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ /* Append the transaction to the pending transactions queue. */
+ list_add_tail(&desc->node, &chan->pending_list);
+
+ /* Free the allocated desc */
+ chan->allocated_desc = NULL;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
+ * DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_vdma_tx_descriptor *desc;
+ struct xilinx_vdma_tx_segment *segment, *prev = NULL;
+ struct xilinx_vdma_desc_hw *hw;
+
+ if (!is_slave_direction(xt->dir))
+ return NULL;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_vdma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_vdma_tx_submit;
+ async_tx_ack(&desc->async_tx);
+
+ /* Allocate the link descriptor from DMA pool */
+ segment = xilinx_vdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /* Fill in the hardware descriptor */
+ hw = &segment->hw;
+ hw->vsize = xt->numf;
+ hw->hsize = xt->sgl[0].size;
+ hw->stride = xt->sgl[0].icg <<
+ XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT;
+ hw->stride |= chan->config.frm_dly <<
+ XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
+
+ if (xt->dir != DMA_MEM_TO_DEV)
+ hw->buf_addr = xt->dst_start;
+ else
+ hw->buf_addr = xt->src_start;
+
+ /* Link the previous next descriptor to current */
+ prev = list_last_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ prev->hw.next_desc = segment->phys;
+
+ /* Insert the segment into the descriptor segments list. */
+ list_add_tail(&segment->node, &desc->segments);
+
+ prev = segment;
+
+ /* Link the last hardware descriptor with the first. */
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ prev->hw.next_desc = segment->phys;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_vdma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_vdma_terminate_all - Halt the channel and free descriptors
+ * @chan: Driver specific VDMA Channel pointer
+ */
+static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan)
+{
+ /* Halt the DMA engine */
+ xilinx_vdma_halt(chan);
+
+ /* Remove and free all of the descriptors in the lists */
+ xilinx_vdma_free_descriptors(chan);
+}
+
+/**
+ * xilinx_vdma_channel_set_config - Configure VDMA channel
+ * Run-time configuration for Axi VDMA, supports:
+ * . halt the channel
+ * . configure interrupt coalescing and inter-packet delay threshold
+ * . start/stop parking
+ * . enable genlock
+ *
+ * @dchan: DMA channel
+ * @cfg: VDMA device configuration pointer
+ *
+ * Return: '0' on success and failure value on error
+ */
+int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
+ struct xilinx_vdma_config *cfg)
+{
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ u32 dmacr;
+
+ if (cfg->reset)
+ return xilinx_vdma_chan_reset(chan);
+
+ dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
+
+ chan->config.frm_dly = cfg->frm_dly;
+ chan->config.park = cfg->park;
+
+ /* genlock settings */
+ chan->config.gen_lock = cfg->gen_lock;
+ chan->config.master = cfg->master;
+
+ if (cfg->gen_lock && chan->genlock) {
+ dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN;
+ dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT;
+ }
+
+ chan->config.frm_cnt_en = cfg->frm_cnt_en;
+ if (cfg->park)
+ chan->config.park_frm = cfg->park_frm;
+ else
+ chan->config.park_frm = -1;
+
+ chan->config.coalesc = cfg->coalesc;
+ chan->config.delay = cfg->delay;
+
+ if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) {
+ dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT;
+ chan->config.coalesc = cfg->coalesc;
+ }
+
+ if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) {
+ dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT;
+ chan->config.delay = cfg->delay;
+ }
+
+ /* FSync Source selection */
+ dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK;
+ dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT;
+
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr);
+
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
+
+/**
+ * xilinx_vdma_device_control - Configure DMA channel of the device
+ * @dchan: DMA Channel pointer
+ * @cmd: DMA control command
+ * @arg: Channel configuration
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_vdma_device_control(struct dma_chan *dchan,
+ enum dma_ctrl_cmd cmd, unsigned long arg)
+{
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ xilinx_vdma_terminate_all(chan);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+/**
+ * xilinx_vdma_chan_remove - Per Channel remove function
+ * @chan: Driver specific VDMA channel
+ */
+static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan)
+{
+ /* Disable all interrupts */
+ vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR,
+ XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
+
+ if (chan->irq > 0)
+ free_irq(chan->irq, chan);
+
+ tasklet_kill(&chan->tasklet);
+
+ list_del(&chan->common.device_node);
+}
+
+/**
+ * xilinx_vdma_chan_probe - Per Channel Probing
+ * It get channel features from the device tree entry and
+ * initialize special channel handling routines
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
+ struct device_node *node)
+{
+ struct xilinx_vdma_chan *chan;
+ bool has_dre = false;
+ u32 value, width;
+ int err;
+
+ /* Allocate and initialize the channel structure */
+ chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ chan->dev = xdev->dev;
+ chan->xdev = xdev;
+ chan->has_sg = xdev->has_sg;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+
+ /* Retrieve the channel properties from the device tree */
+ has_dre = of_property_read_bool(node, "xlnx,include-dre");
+
+ chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
+
+ err = of_property_read_u32(node, "xlnx,datawidth", &value);
+ if (err) {
+ dev_err(xdev->dev, "missing xlnx,datawidth property\n");
+ return err;
+ }
+ width = value >> 3; /* Convert bits to bytes */
+
+ /* If data width is greater than 8 bytes, DRE is not in hw */
+ if (width > 8)
+ has_dre = false;
+
+ if (!has_dre)
+ xdev->common.copy_align = fls(width - 1);
+
+ if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) {
+ chan->direction = DMA_MEM_TO_DEV;
+ chan->id = 0;
+
+ chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET;
+ chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
+
+ if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH ||
+ xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S)
+ chan->flush_on_fsync = true;
+ } else if (of_device_is_compatible(node,
+ "xlnx,axi-vdma-s2mm-channel")) {
+ chan->direction = DMA_DEV_TO_MEM;
+ chan->id = 1;
+
+ chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET;
+ chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
+
+ if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH ||
+ xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM)
+ chan->flush_on_fsync = true;
+ } else {
+ dev_err(xdev->dev, "Invalid channel compatible node\n");
+ return -EINVAL;
+ }
+
+ /* Request the interrupt */
+ chan->irq = irq_of_parse_and_map(node, 0);
+ err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED,
+ "xilinx-vdma-controller", chan);
+ if (err) {
+ dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
+ return err;
+ }
+
+ /* Initialize the tasklet */
+ tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet,
+ (unsigned long)chan);
+
+ /*
+ * Initialize the DMA channel and add it to the DMA engine channels
+ * list.
+ */
+ chan->common.device = &xdev->common;
+
+ list_add_tail(&chan->common.device_node, &xdev->common.channels);
+ xdev->chan[chan->id] = chan;
+
+ /* Reset the channel */
+ err = xilinx_vdma_chan_reset(chan);
+ if (err < 0) {
+ dev_err(xdev->dev, "Reset channel failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * of_dma_xilinx_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
+ int chan_id = dma_spec->args[0];
+
+ if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE)
+ return NULL;
+
+ return dma_get_slave_channel(&xdev->chan[chan_id]->common);
+}
+
+/**
+ * xilinx_vdma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_vdma_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct xilinx_vdma_device *xdev;
+ struct device_node *child;
+ struct resource *io;
+ u32 num_frames;
+ int i, err;
+
+ /* Allocate and initialize the DMA engine structure */
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+
+ /* Request and map I/O memory */
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(xdev->regs))
+ return PTR_ERR(xdev->regs);
+
+ /* Retrieve the DMA engine properties from the device tree */
+ xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
+
+ err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames);
+ if (err < 0) {
+ dev_err(xdev->dev, "missing xlnx,num-fstores property\n");
+ return err;
+ }
+
+ err = of_property_read_u32(node, "xlnx,flush-fsync",
+ &xdev->flush_on_fsync);
+ if (err < 0)
+ dev_warn(xdev->dev, "missing xlnx,flush-fsync property\n");
+
+ /* Initialize the DMA engine */
+ xdev->common.dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&xdev->common.channels);
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+
+ xdev->common.device_alloc_chan_resources =
+ xilinx_vdma_alloc_chan_resources;
+ xdev->common.device_free_chan_resources =
+ xilinx_vdma_free_chan_resources;
+ xdev->common.device_prep_interleaved_dma =
+ xilinx_vdma_dma_prep_interleaved;
+ xdev->common.device_control = xilinx_vdma_device_control;
+ xdev->common.device_tx_status = xilinx_vdma_tx_status;
+ xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
+
+ platform_set_drvdata(pdev, xdev);
+
+ /* Initialize the channels */
+ for_each_child_of_node(node, child) {
+ err = xilinx_vdma_chan_probe(xdev, child);
+ if (err < 0)
+ goto error;
+ }
+
+ for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
+ if (xdev->chan[i])
+ xdev->chan[i]->num_frms = num_frames;
+
+ /* Register the DMA engine with the core */
+ dma_async_device_register(&xdev->common);
+
+ err = of_dma_controller_register(node, of_dma_xilinx_xlate,
+ xdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Unable to register DMA to DT\n");
+ dma_async_device_unregister(&xdev->common);
+ goto error;
+ }
+
+ dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
+
+ return 0;
+
+error:
+ for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
+ if (xdev->chan[i])
+ xilinx_vdma_chan_remove(xdev->chan[i]);
+
+ return err;
+}
+
+/**
+ * xilinx_vdma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int xilinx_vdma_remove(struct platform_device *pdev)
+{
+ struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev);
+ int i;
+
+ of_dma_controller_free(pdev->dev.of_node);
+
+ dma_async_device_unregister(&xdev->common);
+
+ for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
+ if (xdev->chan[i])
+ xilinx_vdma_chan_remove(xdev->chan[i]);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_vdma_of_ids[] = {
+ { .compatible = "xlnx,axi-vdma-1.00.a",},
+ {}
+};
+
+static struct platform_driver xilinx_vdma_driver = {
+ .driver = {
+ .name = "xilinx-vdma",
+ .owner = THIS_MODULE,
+ .of_match_table = xilinx_vdma_of_ids,
+ },
+ .probe = xilinx_vdma_probe,
+ .remove = xilinx_vdma_remove,
+};
+
+module_platform_driver(xilinx_vdma_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx VDMA driver");
+MODULE_LICENSE("GPL v2");