diff options
Diffstat (limited to 'arch/powerpc/include/asm/dma-mapping.h')
| -rw-r--r-- | arch/powerpc/include/asm/dma-mapping.h | 379 |
1 files changed, 79 insertions, 300 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 3d9e887c3c0..150866b2a3f 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -14,6 +14,7 @@ #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/dma-attrs.h> +#include <linux/dma-debug.h> #include <asm/io.h> #include <asm/swiotlb.h> @@ -21,11 +22,15 @@ /* Some dma direct funcs must be visible for use in other dma_ops */ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs); extern void dma_direct_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - -extern unsigned long get_dma_direct_offset(struct device *dev); + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs); +extern int dma_direct_mmap_coherent(struct device *dev, + struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t handle, + size_t size, struct dma_attrs *attrs); #ifdef CONFIG_NOT_COHERENT_CACHE /* @@ -42,6 +47,7 @@ extern void __dma_free_coherent(size_t size, void *vaddr); extern void __dma_sync(void *vaddr, size_t size, int direction); extern void __dma_sync_page(struct page *page, unsigned long offset, size_t size, int direction); +extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr); #else /* ! CONFIG_NOT_COHERENT_CACHE */ /* @@ -64,58 +70,14 @@ static inline unsigned long device_to_mask(struct device *dev) } /* - * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO - */ -struct dma_mapping_ops { - void * (*alloc_coherent)(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - void (*free_coherent)(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - int (*map_sg)(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction, - struct dma_attrs *attrs); - void (*unmap_sg)(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction, - struct dma_attrs *attrs); - int (*dma_supported)(struct device *dev, u64 mask); - int (*set_dma_mask)(struct device *dev, u64 dma_mask); - dma_addr_t (*map_page)(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs); - void (*unmap_page)(struct device *dev, - dma_addr_t dma_address, size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs); - int (*addr_needs_map)(struct device *dev, dma_addr_t addr, - size_t size); -#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS - void (*sync_single_range_for_cpu)(struct device *hwdev, - dma_addr_t dma_handle, unsigned long offset, - size_t size, - enum dma_data_direction direction); - void (*sync_single_range_for_device)(struct device *hwdev, - dma_addr_t dma_handle, unsigned long offset, - size_t size, - enum dma_data_direction direction); - void (*sync_sg_for_cpu)(struct device *hwdev, - struct scatterlist *sg, int nelems, - enum dma_data_direction direction); - void (*sync_sg_for_device)(struct device *hwdev, - struct scatterlist *sg, int nelems, - enum dma_data_direction direction); -#endif -}; - -/* * Available generic sets of operations */ #ifdef CONFIG_PPC64 -extern struct dma_mapping_ops dma_iommu_ops; +extern struct dma_map_ops dma_iommu_ops; #endif -extern struct dma_mapping_ops dma_direct_ops; +extern struct dma_map_ops dma_direct_ops; -static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { /* We don't handle the NULL dev case for ISA for now. We could * do it via an out of line call but it is not needed for now. The @@ -128,312 +90,129 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) return dev->archdata.dma_ops; } -static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops) +static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) { dev->archdata.dma_ops = ops; } -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - if (unlikely(dma_ops == NULL)) - return 0; - if (dma_ops->dma_supported == NULL) - return 1; - return dma_ops->dma_supported(dev, mask); -} - -/* We have our own implementation of pci_set_dma_mask() */ -#define HAVE_ARCH_PCI_SET_DMA_MASK - -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - if (unlikely(dma_ops == NULL)) - return -EIO; - if (dma_ops->set_dma_mask != NULL) - return dma_ops->set_dma_mask(dev, dma_mask); - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - *dev->dma_mask = dma_mask; - return 0; -} - /* - * map_/unmap_single actually call through to map/unmap_page now that all the - * dma_mapping_ops have been converted over. We just have to get the page and - * offset to pass through to map_page + * get_dma_offset() + * + * Get the dma offset on configurations where the dma address can be determined + * from the physical address by looking at a simple offset. Direct dma and + * swiotlb use this function, but it is typically not used by implementations + * with an iommu. */ -static inline dma_addr_t dma_map_single_attrs(struct device *dev, - void *cpu_addr, - size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) +static inline dma_addr_t get_dma_offset(struct device *dev) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); + if (dev) + return dev->archdata.dma_data.dma_offset; - return dma_ops->map_page(dev, virt_to_page(cpu_addr), - (unsigned long)cpu_addr % PAGE_SIZE, size, - direction, attrs); + return PCI_DRAM_OFFSET; } -static inline void dma_unmap_single_attrs(struct device *dev, - dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) +static inline void set_dma_offset(struct device *dev, dma_addr_t off) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - - dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); + if (dev) + dev->archdata.dma_data.dma_offset = off; } -static inline dma_addr_t dma_map_page_attrs(struct device *dev, - struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); +/* this will be removed soon */ +#define flush_write_buffers() - return dma_ops->map_page(dev, page, offset, size, direction, attrs); -} +#include <asm-generic/dma-mapping-common.h> -static inline void dma_unmap_page_attrs(struct device *dev, - dma_addr_t dma_address, - size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - - dma_ops->unmap_page(dev, dma_address, size, direction, attrs); -} - -static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - return dma_ops->map_sg(dev, sg, nents, direction, attrs); -} - -static inline void dma_unmap_sg_attrs(struct device *dev, - struct scatterlist *sg, - int nhwentries, - enum dma_data_direction direction, - struct dma_attrs *attrs) +static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + struct dma_map_ops *dma_ops = get_dma_ops(dev); - BUG_ON(!dma_ops); - dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs); + if (unlikely(dma_ops == NULL)) + return 0; + if (dma_ops->dma_supported == NULL) + return 1; + return dma_ops->dma_supported(dev, mask); } -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); +extern int dma_set_mask(struct device *dev, u64 dma_mask); +extern int __dma_set_mask(struct device *dev, u64 dma_mask); - BUG_ON(!dma_ops); - return dma_ops->alloc_coherent(dev, size, dma_handle, flag); -} +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) -static inline void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + struct dma_map_ops *dma_ops = get_dma_ops(dev); + void *cpu_addr; BUG_ON(!dma_ops); - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); -} - -static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, - size_t size, - enum dma_data_direction direction) -{ - return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL); -} - -static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction) -{ - dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL); -} - -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - return dma_map_page_attrs(dev, page, offset, size, direction, NULL); -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, - enum dma_data_direction direction) -{ - dma_unmap_page_attrs(dev, dma_address, size, direction, NULL); -} -static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) -{ - return dma_map_sg_attrs(dev, sg, nents, direction, NULL); -} + cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); -static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nhwentries, - enum dma_data_direction direction) -{ - dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); -} + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); -#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS -static inline void dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, - size, direction); + return cpu_addr; } -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_device(dev, dma_handle, - 0, size, direction); -} +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) -static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, - enum dma_data_direction direction) +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + struct dma_map_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); -} -static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nents, - enum dma_data_direction direction) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - BUG_ON(!dma_ops); - dma_ops->sync_sg_for_device(dev, sgl, nents, direction); + dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); } -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, unsigned long offset, size_t size, - enum dma_data_direction direction) +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + struct dma_map_ops *dma_ops = get_dma_ops(dev); - BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_cpu(dev, dma_handle, - offset, size, direction); -} - -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + debug_dma_mapping_error(dev, dma_addr); + if (dma_ops->mapping_error) + return dma_ops->mapping_error(dev, dma_addr); - BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_device(dev, dma_handle, offset, - size, direction); -} -#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ -static inline void dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ +#ifdef CONFIG_PPC64 + return (dma_addr == DMA_ERROR_CODE); +#else + return 0; +#endif } -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { -} +#ifdef CONFIG_SWIOTLB + struct dev_archdata *sd = &dev->archdata; -static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, - enum dma_data_direction direction) -{ -} + if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr) + return 0; +#endif -static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nents, - enum dma_data_direction direction) -{ -} + if (!dev->dma_mask) + return 0; -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, unsigned long offset, size_t size, - enum dma_data_direction direction) -{ + return addr + size - 1 <= *dev->dma_mask; } -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, unsigned long offset, size_t size, - enum dma_data_direction direction) +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { + return paddr + get_dma_offset(dev); } -#endif -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) { -#ifdef CONFIG_PPC64 - return (dma_addr == DMA_ERROR_CODE); -#else - return 0; -#endif + return daddr - get_dma_offset(dev); } #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#ifdef CONFIG_NOT_COHERENT_CACHE -#define dma_is_consistent(d, h) (0) -#else -#define dma_is_consistent(d, h) (1) -#endif -static inline int dma_get_cache_alignment(void) -{ -#ifdef CONFIG_PPC64 - /* no easy way to get cache size on all processors, so return - * the maximum possible, to be safe */ - return (1 << INTERNODE_CACHE_SHIFT); -#else - /* - * Each processor family will define its own L1_CACHE_SHIFT, - * L1_CACHE_BYTES wraps to this, so this is always safe. - */ - return L1_CACHE_BYTES; -#endif -} +#define ARCH_HAS_DMA_MMAP_COHERENT static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) |
