diff options
Diffstat (limited to 'arch/powerpc/include/asm/dma-mapping.h')
| -rw-r--r-- | arch/powerpc/include/asm/dma-mapping.h | 67 |
1 files changed, 24 insertions, 43 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 80a973bb9e7..150866b2a3f 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -22,10 +22,15 @@ /* Some dma direct funcs must be visible for use in other dma_ops */ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs); extern void dma_direct_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs); +extern int dma_direct_mmap_coherent(struct device *dev, + struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t handle, + size_t size, struct dma_attrs *attrs); #ifdef CONFIG_NOT_COHERENT_CACHE /* @@ -42,6 +47,7 @@ extern void __dma_free_coherent(size_t size, void *vaddr); extern void __dma_sync(void *vaddr, size_t size, int direction); extern void __dma_sync_page(struct page *page, unsigned long offset, size_t size, int direction); +extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr); #else /* ! CONFIG_NOT_COHERENT_CACHE */ /* @@ -127,40 +133,32 @@ static inline int dma_supported(struct device *dev, u64 mask) return dma_ops->dma_supported(dev, mask); } -/* We have our own implementation of pci_set_dma_mask() */ -#define HAVE_ARCH_PCI_SET_DMA_MASK +extern int dma_set_mask(struct device *dev, u64 dma_mask); +extern int __dma_set_mask(struct device *dev, u64 dma_mask); -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - if (unlikely(dma_ops == NULL)) - return -EIO; - if (dma_ops->set_dma_mask != NULL) - return dma_ops->set_dma_mask(dev, dma_mask); - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - *dev->dma_mask = dma_mask; - return 0; -} - -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { struct dma_map_ops *dma_ops = get_dma_ops(dev); void *cpu_addr; BUG_ON(!dma_ops); - cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag); + cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); return cpu_addr; } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { struct dma_map_ops *dma_ops = get_dma_ops(dev); @@ -168,13 +166,14 @@ static inline void dma_free_coherent(struct device *dev, size_t size, debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); + dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); } static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { struct dma_map_ops *dma_ops = get_dma_ops(dev); + debug_dma_mapping_error(dev, dma_addr); if (dma_ops->mapping_error) return dma_ops->mapping_error(dev, dma_addr); @@ -212,26 +211,8 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#ifdef CONFIG_NOT_COHERENT_CACHE -#define dma_is_consistent(d, h) (0) -#else -#define dma_is_consistent(d, h) (1) -#endif -static inline int dma_get_cache_alignment(void) -{ -#ifdef CONFIG_PPC64 - /* no easy way to get cache size on all processors, so return - * the maximum possible, to be safe */ - return (1 << INTERNODE_CACHE_SHIFT); -#else - /* - * Each processor family will define its own L1_CACHE_SHIFT, - * L1_CACHE_BYTES wraps to this, so this is always safe. - */ - return L1_CACHE_BYTES; -#endif -} +#define ARCH_HAS_DMA_MMAP_COHERENT static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) |
