diff options
Diffstat (limited to 'arch/microblaze/kernel/dma.c')
| -rw-r--r-- | arch/microblaze/kernel/dma.c | 128 |
1 files changed, 76 insertions, 52 deletions
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 79c74659f20..4633c36c1b3 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -10,44 +10,14 @@ #include <linux/dma-mapping.h> #include <linux/gfp.h> #include <linux/dma-debug.h> -#include <asm/bug.h> -#include <asm/cacheflush.h> - -/* - * Generic direct DMA implementation - * - * This implementation supports a per-device offset that can be applied if - * the address at which memory is visible to devices is not 0. Platform code - * can set archdata.dma_data to an unsigned long holding the offset. By - * default the offset is PCI_DRAM_OFFSET. - */ -static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, - size_t size, enum dma_data_direction direction) -{ - switch (direction) { - case DMA_TO_DEVICE: - flush_dcache_range(paddr + offset, paddr + offset + size); - break; - case DMA_FROM_DEVICE: - invalidate_dcache_range(paddr + offset, paddr + offset + size); - break; - default: - BUG(); - } -} - -static unsigned long get_dma_direct_offset(struct device *dev) -{ - if (likely(dev)) - return (unsigned long)dev->archdata.dma_data; - - return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ -} +#include <linux/export.h> +#include <linux/bug.h> #define NOT_COHERENT_CACHE static void *dma_direct_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { #ifdef NOT_COHERENT_CACHE return consistent_alloc(flag, size, dma_handle); @@ -64,14 +34,15 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, return NULL; ret = page_address(page); memset(ret, 0, size); - *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); + *dma_handle = virt_to_phys(ret); return ret; #endif } static void dma_direct_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { #ifdef NOT_COHERENT_CACHE consistent_free(size, vaddr); @@ -89,20 +60,14 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, /* FIXME this part of code is untested */ for_each_sg(sgl, sg, nents, i) { - sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); - __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset, + sg->dma_address = sg_phys(sg); + __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, sg->length, direction); } return nents; } -static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) -{ -} - static int dma_direct_dma_supported(struct device *dev, u64 mask) { return 1; @@ -115,8 +80,8 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, enum dma_data_direction direction, struct dma_attrs *attrs) { - __dma_sync_page(page_to_phys(page), offset, size, direction); - return page_to_phys(page) + offset + get_dma_direct_offset(dev); + __dma_sync(page_to_phys(page) + offset, size, direction); + return page_to_phys(page) + offset; } static inline void dma_direct_unmap_page(struct device *dev, @@ -130,17 +95,76 @@ static inline void dma_direct_unmap_page(struct device *dev, * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and * dma_address is physical address */ - __dma_sync_page(dma_address, 0 , size, direction); + __dma_sync(dma_address, size, direction); +} + +static inline void +dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + /* + * It's pointless to flush the cache as the memory segment + * is given to the CPU + */ + + if (direction == DMA_FROM_DEVICE) + __dma_sync(dma_handle, size, direction); +} + +static inline void +dma_direct_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + /* + * It's pointless to invalidate the cache if the device isn't + * supposed to write to the relevant region + */ + + if (direction == DMA_TO_DEVICE) + __dma_sync(dma_handle, size, direction); +} + +static inline void +dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, + enum dma_data_direction direction) +{ + struct scatterlist *sg; + int i; + + /* FIXME this part of code is untested */ + if (direction == DMA_FROM_DEVICE) + for_each_sg(sgl, sg, nents, i) + __dma_sync(sg->dma_address, sg->length, direction); +} + +static inline void +dma_direct_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, + enum dma_data_direction direction) +{ + struct scatterlist *sg; + int i; + + /* FIXME this part of code is untested */ + if (direction == DMA_TO_DEVICE) + for_each_sg(sgl, sg, nents, i) + __dma_sync(sg->dma_address, sg->length, direction); } struct dma_map_ops dma_direct_ops = { - .alloc_coherent = dma_direct_alloc_coherent, - .free_coherent = dma_direct_free_coherent, + .alloc = dma_direct_alloc_coherent, + .free = dma_direct_free_coherent, .map_sg = dma_direct_map_sg, - .unmap_sg = dma_direct_unmap_sg, .dma_supported = dma_direct_dma_supported, .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, + .sync_single_for_cpu = dma_direct_sync_single_for_cpu, + .sync_single_for_device = dma_direct_sync_single_for_device, + .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, + .sync_sg_for_device = dma_direct_sync_sg_for_device, }; EXPORT_SYMBOL(dma_direct_ops); @@ -149,8 +173,8 @@ EXPORT_SYMBOL(dma_direct_ops); static int __init dma_init(void) { - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - return 0; + return 0; } fs_initcall(dma_init); |
