aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma/intel_mid_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/intel_mid_dma.c')
-rw-r--r--drivers/dma/intel_mid_dma.c68
1 files changed, 26 insertions, 42 deletions
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 74f70aadf9e..1aab8130efa 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -29,6 +29,8 @@
#include <linux/intel_mid_dma.h>
#include <linux/module.h>
+#include "dmaengine.h"
+
#define MAX_CHAN 4 /*max ch across controllers*/
#include "intel_mid_dma_regs.h"
@@ -288,7 +290,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
struct intel_mid_dma_lli *llitem;
void *param_txd = NULL;
- midc->completed = txd->cookie;
+ dma_cookie_complete(txd);
callback_txd = txd->callback;
param_txd = txd->callback_param;
@@ -307,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
callback_txd(param_txd);
}
if (midc->raw_tfr) {
- desc->status = DMA_SUCCESS;
+ desc->status = DMA_COMPLETE;
if (desc->lli != NULL) {
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
@@ -392,11 +394,11 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
}
}
/*Populate CTL_HI values*/
- ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
+ ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
desc->width,
midc->dma->block_size);
/*Populate SAR and DAR values*/
- sg_phy_addr = sg_phys(sg);
+ sg_phy_addr = sg_dma_address(sg);
if (desc->dirn == DMA_MEM_TO_DEV) {
lli_bloc_desc->sar = sg_phy_addr;
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
@@ -425,7 +427,7 @@ DMA engine callback Functions*/
* intel_mid_dma_tx_submit - callback to submit DMA transaction
* @tx: dma engine descriptor
*
- * Submit the DMA trasaction for this descriptor, start if ch idle
+ * Submit the DMA transaction for this descriptor, start if ch idle
*/
static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
@@ -434,14 +436,7 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
dma_cookie_t cookie;
spin_lock_bh(&midc->lock);
- cookie = midc->chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- midc->chan.cookie = cookie;
- desc->txd.cookie = cookie;
-
+ cookie = dma_cookie_assign(tx);
if (list_empty(&midc->active_list))
list_add_tail(&desc->desc_node, &midc->active_list);
@@ -482,31 +477,18 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
- int ret;
-
- last_complete = midc->completed;
- last_used = chan->cookie;
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+ enum dma_status ret;
- ret = dma_async_is_complete(cookie, last_complete, last_used);
- if (ret != DMA_SUCCESS) {
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret != DMA_COMPLETE) {
spin_lock_bh(&midc->lock);
midc_scan_descriptors(to_middma_device(chan->device), midc);
spin_unlock_bh(&midc->lock);
- last_complete = midc->completed;
- last_used = chan->cookie;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
+ ret = dma_cookie_status(chan, cookie, txstate);
}
- if (txstate) {
- txstate->last = last_complete;
- txstate->used = last_used;
- txstate->residue = 0;
- }
return ret;
}
@@ -732,13 +714,14 @@ err_desc_get:
* @sg_len: length of sg txn
* @direction: DMA transfer dirtn
* @flags: DMA flags
+ * @context: transfer context (ignored)
*
* Prepares LLI based periphral transfer
*/
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags)
+ unsigned long flags, void *context)
{
struct intel_mid_dma_chan *midc = NULL;
struct intel_mid_dma_slave *mids = NULL;
@@ -764,7 +747,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
txd = intel_mid_dma_prep_memcpy(chan,
mids->dma_slave.dst_addr,
mids->dma_slave.src_addr,
- sgl->length,
+ sg_dma_len(sgl),
flags);
return txd;
} else {
@@ -776,7 +759,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
sg_len, direction, flags);
- txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
+ txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
if (NULL == txd) {
pr_err("MDMA: Prep memcpy failed\n");
return NULL;
@@ -832,7 +815,6 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
/*trying to free ch in use!!!!!*/
pr_err("ERR_MDMA: trying to free ch in use\n");
}
- pm_runtime_put(&mid->pdev->dev);
spin_lock_bh(&midc->lock);
midc->descs_allocated = 0;
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
@@ -853,6 +835,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
/* Disable CH interrupts */
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
+ pm_runtime_put(&mid->pdev->dev);
}
/**
@@ -886,7 +869,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
pm_runtime_put(&mid->pdev->dev);
return -EIO;
}
- midc->completed = chan->cookie = 1;
+ dma_cookie_init(chan);
spin_lock_bh(&midc->lock);
while (midc->descs_allocated < DESCS_PER_CHANNEL) {
@@ -1056,7 +1039,8 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
}
err_status &= mid->intr_mask;
if (err_status) {
- iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
+ iowrite32((err_status << INT_MASK_WE),
+ mid->dma_base + MASK_ERR);
call_tasklet = 1;
}
if (call_tasklet)
@@ -1118,7 +1102,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
struct intel_mid_dma_chan *midch = &dma->ch[i];
midch->chan.device = &dma->common;
- midch->chan.cookie = 1;
+ dma_cookie_init(&midch->chan);
midch->ch_id = dma->chan_base + i;
pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
@@ -1241,7 +1225,7 @@ static void middma_shutdown(struct pci_dev *pdev)
* Initialize the PCI device, map BARs, query driver data.
* Call setup_dma to complete contoller and chan initilzation
*/
-static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
+static int intel_mid_dma_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct middma_device *device;
@@ -1324,7 +1308,7 @@ err_enable_device:
* Free up all resources and data
* Call shutdown_dma to complete contoller and chan cleanup
*/
-static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
+static void intel_mid_dma_remove(struct pci_dev *pdev)
{
struct middma_device *device = pci_get_drvdata(pdev);
@@ -1421,7 +1405,7 @@ static int dma_runtime_idle(struct device *dev)
return -EAGAIN;
}
- return pm_schedule_suspend(dev, 0);
+ return 0;
}
/******************************************************************************
@@ -1448,7 +1432,7 @@ static struct pci_driver intel_mid_dma_pci_driver = {
.name = "Intel MID DMA",
.id_table = intel_mid_dma_ids,
.probe = intel_mid_dma_probe,
- .remove = __devexit_p(intel_mid_dma_remove),
+ .remove = intel_mid_dma_remove,
#ifdef CONFIG_PM
.driver = {
.pm = &intel_mid_dma_pm,