aboutsummaryrefslogtreecommitdiff
path: root/drivers/spi/spi-pxa2xx-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi/spi-pxa2xx-dma.c')
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c32
1 files changed, 8 insertions, 24 deletions
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index c735c5a008a..c41ff148a2b 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -9,7 +9,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -30,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
struct sg_table *sgt;
void *buf, *pbuf;
- /*
- * Some DMA controllers have problems transferring buffers that are
- * not multiple of 4 bytes. So we truncate the transfer so that it
- * is suitable for such controllers, and handle the trailing bytes
- * manually after the DMA completes.
- *
- * REVISIT: It would be better if this information could be
- * retrieved directly from the DMA device in a similar way than
- * ->copy_align etc. is done.
- */
- len = ALIGN(drv_data->len, 4);
-
if (dir == DMA_TO_DEVICE) {
dmadev = drv_data->tx_chan->device->dev;
sgt = &drv_data->tx_sgt;
@@ -59,7 +46,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
int ret;
sg_free_table(sgt);
- ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+ ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
if (ret)
return ret;
}
@@ -145,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
if (!error) {
pxa2xx_spi_unmap_dma_buffers(drv_data);
- /* Handle the last bytes of unaligned transfer */
drv_data->tx += drv_data->tx_map_len;
- drv_data->write(drv_data);
-
drv_data->rx += drv_data->rx_map_len;
- drv_data->read(drv_data);
msg->actual_length += drv_data->len;
msg->state = pxa2xx_spi_next_transfer(drv_data);
@@ -327,22 +310,23 @@ void pxa2xx_spi_dma_start(struct driver_data *drv_data)
int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
struct pxa2xx_spi_master *pdata = drv_data->master_info;
+ struct device *dev = &drv_data->pdev->dev;
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- drv_data->dummy = devm_kzalloc(&drv_data->pdev->dev, SZ_2K, GFP_KERNEL);
+ drv_data->dummy = devm_kzalloc(dev, SZ_2K, GFP_KERNEL);
if (!drv_data->dummy)
return -ENOMEM;
- drv_data->tx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter,
- pdata);
+ drv_data->tx_chan = dma_request_slave_channel_compat(mask,
+ pxa2xx_spi_dma_filter, pdata, dev, "tx");
if (!drv_data->tx_chan)
return -ENODEV;
- drv_data->rx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter,
- pdata);
+ drv_data->rx_chan = dma_request_slave_channel_compat(mask,
+ pxa2xx_spi_dma_filter, pdata, dev, "rx");
if (!drv_data->rx_chan) {
dma_release_channel(drv_data->tx_chan);
drv_data->tx_chan = NULL;
@@ -384,7 +368,7 @@ int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
* otherwise we use the default. Also we use the default FIFO
* thresholds for now.
*/
- *burst_code = chip_info ? chip_info->dma_burst_size : 16;
+ *burst_code = chip_info ? chip_info->dma_burst_size : 1;
*threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
| SSCR1_TxTresh(TX_THRESH_DFLT);