aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@stericsson.com>2011-02-08 13:03:12 +0100
committerGrant Likely <grant.likely@secretlab.ca>2011-02-15 15:09:00 -0700
commitecd442fd9e388a31d382a62fb43e851048c282e1 (patch)
treeb0b872742d1a4c30e46a35ebe89d96548021ec91
parentea3065df7ddffe4669136619ad712783ccee22c6 (diff)
spi/pl022: use dmaengine helper macros
This simplifies the DMA code a bit by using the dmaengine helpers. The cookie from descriptor submission can be ignored in this case as has been established in review of the MMCI/PL180 driver. Cc: Dan Williams <dan.j.williams@intel.com> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
-rw-r--r--drivers/spi/amba-pl022.c27
1 files changed, 10 insertions, 17 deletions
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 71a1219a995..4220aad4284 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -917,7 +917,6 @@ static int configure_dma(struct pl022 *pl022)
struct dma_chan *txchan = pl022->dma_tx_channel;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
- dma_cookie_t cookie;
/* Check that the channels are available */
if (!rxchan || !txchan)
@@ -962,10 +961,8 @@ static int configure_dma(struct pl022 *pl022)
tx_conf.dst_addr_width = rx_conf.src_addr_width;
BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
- rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
- (unsigned long) &rx_conf);
- txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
- (unsigned long) &tx_conf);
+ dmaengine_slave_config(rxchan, &rx_conf);
+ dmaengine_slave_config(txchan, &tx_conf);
/* Create sglists for the transfers */
pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1;
@@ -1018,23 +1015,19 @@ static int configure_dma(struct pl022 *pl022)
rxdesc->callback_param = pl022;
/* Submit and fire RX and TX with TX last so we're ready to read! */
- cookie = rxdesc->tx_submit(rxdesc);
- if (dma_submit_error(cookie))
- goto err_submit_rx;
- cookie = txdesc->tx_submit(txdesc);
- if (dma_submit_error(cookie))
- goto err_submit_tx;
- rxchan->device->device_issue_pending(rxchan);
- txchan->device->device_issue_pending(txchan);
+ dmaengine_submit(rxdesc);
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(rxchan);
+ dma_async_issue_pending(txchan);
return 0;
err_submit_tx:
err_submit_rx:
err_txdesc:
- txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(txchan);
err_rxdesc:
- rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(rxchan);
dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
pl022->sgt_tx.nents, DMA_TO_DEVICE);
err_tx_sgmap:
@@ -1101,8 +1094,8 @@ static void terminate_dma(struct pl022 *pl022)
struct dma_chan *rxchan = pl022->dma_rx_channel;
struct dma_chan *txchan = pl022->dma_tx_channel;
- rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
- txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(rxchan);
+ dmaengine_terminate_all(txchan);
unmap_free_dma_scatter(pl022);
}