aboutsummaryrefslogtreecommitdiff
path: root/arch/microblaze/include/asm/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/microblaze/include/asm/dma-mapping.h')
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h45
1 files changed, 15 insertions, 30 deletions
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 3a3e5b88685..ab353723076 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -35,16 +35,6 @@
#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
#define __dma_free_coherent(size, addr) ((void)0)
-static inline unsigned long device_to_mask(struct device *dev)
-{
- if (dev->dma_mask && *dev->dma_mask)
- return *dev->dma_mask;
- /* Assume devices without mask can take 32 bit addresses */
- return 0xfffffffful;
-}
-
-extern struct dma_map_ops *dma_ops;
-
/*
* Available generic sets of operations
*/
@@ -52,20 +42,7 @@ extern struct dma_map_ops dma_direct_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- /* We don't handle the NULL dev case for ISA for now. We could
- * do it via an out of line call but it is not needed for now. The
- * only ISA DMA device we support is the floppy and we have a hack
- * in the floppy driver directly to get a device for us.
- */
- if (unlikely(!dev) || !dev->archdata.dma_ops)
- return NULL;
-
- return dev->archdata.dma_ops;
-}
-
-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
-{
- dev->archdata.dma_ops = ops;
+ return &dma_direct_ops;
}
static inline int dma_supported(struct device *dev, u64 mask)
@@ -114,6 +91,8 @@ static inline void __dma_sync(unsigned long paddr,
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *ops = get_dma_ops(dev);
+
+ debug_dma_mapping_error(dev, dma_addr);
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
@@ -123,28 +102,34 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *memory;
BUG_ON(!ops);
- memory = ops->alloc_coherent(dev, size, dma_handle, flag);
+ memory = ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
return memory;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- ops->free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,