diff options
Diffstat (limited to 'arch/x86/include/asm/dma-mapping.h')
| -rw-r--r-- | arch/x86/include/asm/dma-mapping.h | 260 |
1 files changed, 65 insertions, 195 deletions
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 132a134d12f..808dae63eee 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -2,63 +2,36 @@ #define _ASM_X86_DMA_MAPPING_H /* - * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and + * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and * Documentation/DMA-API.txt for documentation. */ +#include <linux/kmemcheck.h> #include <linux/scatterlist.h> +#include <linux/dma-debug.h> +#include <linux/dma-attrs.h> #include <asm/io.h> #include <asm/swiotlb.h> #include <asm-generic/dma-coherent.h> +#include <linux/dma-contiguous.h> + +#ifdef CONFIG_ISA +# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) +#else +# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) +#endif + +#define DMA_ERROR_CODE 0 -extern dma_addr_t bad_dma_address; extern int iommu_merge; extern struct device x86_dma_fallback_dev; extern int panic_on_overflow; -struct dma_mapping_ops { - int (*mapping_error)(struct device *dev, - dma_addr_t dma_addr); - void* (*alloc_coherent)(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); - void (*free_coherent)(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, - size_t size, int direction); - void (*unmap_single)(struct device *dev, dma_addr_t addr, - size_t size, int direction); - void (*sync_single_for_cpu)(struct device *hwdev, - dma_addr_t dma_handle, size_t size, - int direction); - void (*sync_single_for_device)(struct device *hwdev, - dma_addr_t dma_handle, size_t size, - int direction); - void (*sync_single_range_for_cpu)(struct device *hwdev, - dma_addr_t dma_handle, unsigned long offset, - size_t size, int direction); - void (*sync_single_range_for_device)(struct device *hwdev, - dma_addr_t dma_handle, unsigned long offset, - size_t size, int direction); - void (*sync_sg_for_cpu)(struct device *hwdev, - struct scatterlist *sg, int nelems, - int direction); - void (*sync_sg_for_device)(struct device *hwdev, - struct scatterlist *sg, int nelems, - int direction); - int (*map_sg)(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); - void (*unmap_sg)(struct device *hwdev, - struct scatterlist *sg, int nents, - int direction); - int (*dma_supported)(struct device *hwdev, u64 mask); - int is_phys; -}; - -extern struct dma_mapping_ops *dma_ops; - -static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) +extern struct dma_map_ops *dma_ops; + +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { -#ifdef CONFIG_X86_32 +#ifndef CONFIG_X86_DEV_DMA_OPS return dma_ops; #else if (unlikely(!dev) || !dev->archdata.dma_ops) @@ -68,160 +41,57 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) #endif } +#include <asm-generic/dma-mapping-common.h> + /* Make sure we keep the same behaviour */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - struct dma_mapping_ops *ops = get_dma_ops(dev); + struct dma_map_ops *ops = get_dma_ops(dev); + debug_dma_mapping_error(dev, dma_addr); if (ops->mapping_error) return ops->mapping_error(dev, dma_addr); - return (dma_addr == bad_dma_address); + return (dma_addr == DMA_ERROR_CODE); } #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d, h) (1) extern int dma_supported(struct device *hwdev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask); extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag); - -static inline dma_addr_t -dma_map_single(struct device *hwdev, void *ptr, size_t size, - int direction) -{ - struct dma_mapping_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(direction)); - return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, - int direction) -{ - struct dma_mapping_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(direction)); - if (ops->unmap_single) - ops->unmap_single(dev, addr, size, direction); -} - -static inline int -dma_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction) -{ - struct dma_mapping_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(direction)); - return ops->map_sg(hwdev, sg, nents, direction); -} - -static inline void -dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, - int direction) -{ - struct dma_mapping_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(direction)); - if (ops->unmap_sg) - ops->unmap_sg(hwdev, sg, nents, direction); -} - -static inline void -dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, - size_t size, int direction) -{ - struct dma_mapping_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(direction)); - if (ops->sync_single_for_cpu) - ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); - flush_write_buffers(); -} - -static inline void -dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, - size_t size, int direction) -{ - struct dma_mapping_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(direction)); - if (ops->sync_single_for_device) - ops->sync_single_for_device(hwdev, dma_handle, size, direction); - flush_write_buffers(); -} - -static inline void -dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, - unsigned long offset, size_t size, int direction) -{ - struct dma_mapping_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(direction)); - if (ops->sync_single_range_for_cpu) - ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, - size, direction); - flush_write_buffers(); -} - -static inline void -dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - int direction) -{ - struct dma_mapping_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(direction)); - if (ops->sync_single_range_for_device) - ops->sync_single_range_for_device(hwdev, dma_handle, - offset, size, direction); - flush_write_buffers(); -} + dma_addr_t *dma_addr, gfp_t flag, + struct dma_attrs *attrs); -static inline void -dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, int direction) -{ - struct dma_mapping_ops *ops = get_dma_ops(hwdev); +extern void dma_generic_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, + struct dma_attrs *attrs); - BUG_ON(!valid_dma_direction(direction)); - if (ops->sync_sg_for_cpu) - ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); - flush_write_buffers(); -} +#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ +extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); +extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); +extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); +#else -static inline void -dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, int direction) +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { - struct dma_mapping_ops *ops = get_dma_ops(hwdev); + if (!dev->dma_mask) + return 0; - BUG_ON(!valid_dma_direction(direction)); - if (ops->sync_sg_for_device) - ops->sync_sg_for_device(hwdev, sg, nelems, direction); - - flush_write_buffers(); + return addr + size - 1 <= *dev->dma_mask; } -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - size_t offset, size_t size, - int direction) +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { - struct dma_mapping_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(direction)); - return ops->map_single(dev, page_to_phys(page) + offset, - size, direction); + return paddr; } -static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, int direction) +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) { - dma_unmap_single(dev, addr, size, direction); + return daddr; } +#endif /* CONFIG_X86_DMA_REMAP */ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, @@ -230,13 +100,6 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, flush_write_buffers(); } -static inline int dma_get_cache_alignment(void) -{ - /* no easy way to get cache size on all x86, so return the - * maximum possible, to be safe */ - return boot_cpu_data.x86_clflush_size; -} - static inline unsigned long dma_alloc_coherent_mask(struct device *dev, gfp_t gfp) { @@ -244,7 +107,7 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev, dma_mask = dev->coherent_dma_mask; if (!dma_mask) - dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK; + dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); return dma_mask; } @@ -253,20 +116,22 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) { unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); - if (dma_mask <= DMA_24BIT_MASK) + if (dma_mask <= DMA_BIT_MASK(24)) gfp |= GFP_DMA; #ifdef CONFIG_X86_64 - if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) + if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) gfp |= GFP_DMA32; #endif return gfp; } +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) + static inline void * -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp) +dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, struct dma_attrs *attrs) { - struct dma_mapping_ops *ops = get_dma_ops(dev); + struct dma_map_ops *ops = get_dma_ops(dev); void *memory; gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); @@ -274,33 +139,38 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) return memory; - if (!dev) { + if (!dev) dev = &x86_dma_fallback_dev; - gfp |= GFP_DMA; - } if (!is_device_dma_capable(dev)) return NULL; - if (!ops->alloc_coherent) + if (!ops->alloc) return NULL; - return ops->alloc_coherent(dev, size, dma_handle, - dma_alloc_coherent_gfp_flags(dev, gfp)); + memory = ops->alloc(dev, size, dma_handle, + dma_alloc_coherent_gfp_flags(dev, gfp), attrs); + debug_dma_alloc_coherent(dev, size, *dma_handle, memory); + + return memory; } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t bus) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *vaddr, dma_addr_t bus, + struct dma_attrs *attrs) { - struct dma_mapping_ops *ops = get_dma_ops(dev); + struct dma_map_ops *ops = get_dma_ops(dev); WARN_ON(irqs_disabled()); /* for portability */ if (dma_release_from_coherent(dev, get_order(size), vaddr)) return; - if (ops->free_coherent) - ops->free_coherent(dev, size, vaddr, bus); + debug_dma_free_coherent(dev, size, vaddr, bus); + if (ops->free) + ops->free(dev, size, vaddr, bus, attrs); } #endif |
