aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/common/dmabounce.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/common/dmabounce.c')
-rw-r--r--arch/arm/common/dmabounce.c572
1 files changed, 228 insertions, 344 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index ad6c89a555b..1143c4d5c56 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -5,8 +5,8 @@
* limited DMA windows. These functions utilize bounce buffers to
* copy data to/from buffers located outside the DMA region. This
* only works for systems in which DMA memory is at the bottom of
- * RAM and the remainder of memory is at the top an the DMA memory
- * can be marked as ZONE_DMA. Anything beyond that such as discontigous
+ * RAM, the remainder of memory is at the top and the DMA memory
+ * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
* DMA windows will require custom implementations that reserve memory
* areas at early bootup.
*
@@ -25,14 +25,15 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/page-flags.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/list.h>
+#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
-#undef DEBUG
#undef STATS
#ifdef STATS
@@ -66,46 +67,39 @@ struct dmabounce_pool {
};
struct dmabounce_device_info {
- struct list_head node;
-
struct device *dev;
struct list_head safe_buffers;
#ifdef STATS
unsigned long total_allocs;
unsigned long map_op_count;
unsigned long bounce_count;
+ int attr_res;
#endif
struct dmabounce_pool small;
struct dmabounce_pool large;
-};
-static LIST_HEAD(dmabounce_devs);
+ rwlock_t lock;
+
+ int (*needs_bounce)(struct device *, dma_addr_t, size_t);
+};
#ifdef STATS
-static void print_alloc_stats(struct dmabounce_device_info *device_info)
+static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- printk(KERN_INFO
- "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
- device_info->dev->bus_id,
- device_info->small.allocs, device_info->large.allocs,
+ struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
+ return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
+ device_info->small.allocs,
+ device_info->large.allocs,
device_info->total_allocs - device_info->small.allocs -
device_info->large.allocs,
- device_info->total_allocs);
+ device_info->total_allocs,
+ device_info->map_op_count,
+ device_info->bounce_count);
}
-#endif
-
-/* find the given device in the dmabounce device list */
-static inline struct dmabounce_device_info *
-find_dmabounce_dev(struct device *dev)
-{
- struct dmabounce_device_info *d;
- list_for_each_entry(d, &dmabounce_devs, node)
- if (d->dev == dev)
- return d;
-
- return NULL;
-}
+static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
+#endif
/* allocate a 'safe' buffer and keep track of it */
@@ -116,6 +110,7 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
struct safe_buffer *buf;
struct dmabounce_pool *pool;
struct device *dev = device_info->dev;
+ unsigned long flags;
dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
__func__, ptr, size, dir);
@@ -159,11 +154,11 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
if (pool)
pool->allocs++;
device_info->total_allocs++;
- if (device_info->total_allocs % 1000 == 0)
- print_alloc_stats(device_info);
#endif
+ write_lock_irqsave(&device_info->lock, flags);
list_add(&buf->node, &device_info->safe_buffers);
+ write_unlock_irqrestore(&device_info->lock, flags);
return buf;
}
@@ -172,22 +167,35 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
static inline struct safe_buffer *
find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
{
- struct safe_buffer *b;
+ struct safe_buffer *b, *rb = NULL;
+ unsigned long flags;
+
+ read_lock_irqsave(&device_info->lock, flags);
list_for_each_entry(b, &device_info->safe_buffers, node)
- if (b->safe_dma_addr == safe_dma_addr)
- return b;
+ if (b->safe_dma_addr <= safe_dma_addr &&
+ b->safe_dma_addr + b->size > safe_dma_addr) {
+ rb = b;
+ break;
+ }
- return NULL;
+ read_unlock_irqrestore(&device_info->lock, flags);
+ return rb;
}
static inline void
free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
{
+ unsigned long flags;
+
dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
+ write_lock_irqsave(&device_info->lock, flags);
+
list_del(&buf->node);
+ write_unlock_irqrestore(&device_info->lock, flags);
+
if (buf->pool)
dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
else
@@ -199,189 +207,97 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *
/* ************************************************** */
-#ifdef STATS
-static void print_map_stats(struct dmabounce_device_info *device_info)
+static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
+ dma_addr_t dma_addr, const char *where)
{
- dev_info(device_info->dev,
- "dmabounce: map_op_count=%lu, bounce_count=%lu\n",
- device_info->map_op_count, device_info->bounce_count);
+ if (!dev || !dev->archdata.dmabounce)
+ return NULL;
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Trying to %s invalid mapping\n", where);
+ return NULL;
+ }
+ return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
}
-#endif
-static inline dma_addr_t
-map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction dir)
+static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
- struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
- dma_addr_t dma_addr;
- int needs_bounce = 0;
-
- if (device_info)
- DO_STATS ( device_info->map_op_count++ );
-
- dma_addr = virt_to_dma(dev, ptr);
+ if (!dev || !dev->archdata.dmabounce)
+ return 0;
if (dev->dma_mask) {
- unsigned long mask = *dev->dma_mask;
- unsigned long limit;
+ unsigned long limit, mask = *dev->dma_mask;
limit = (mask + 1) & ~mask;
if (limit && size > limit) {
dev_err(dev, "DMA mapping too big (requested %#x "
"mask %#Lx)\n", size, *dev->dma_mask);
- return ~0;
+ return -E2BIG;
}
- /*
- * Figure out if we need to bounce from the DMA mask.
- */
- needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
+ /* Figure out if we need to bounce from the DMA mask. */
+ if ((dma_addr | (dma_addr + size - 1)) & ~mask)
+ return 1;
}
- if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
- struct safe_buffer *buf;
-
- buf = alloc_safe_buffer(device_info, ptr, size, dir);
- if (buf == 0) {
- dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
- __func__, ptr);
- return 0;
- }
+ return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
+}
- dev_dbg(dev,
- "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
- __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
- buf->safe, (void *) buf->safe_dma_addr);
+static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
+ struct safe_buffer *buf;
- if ((dir == DMA_TO_DEVICE) ||
- (dir == DMA_BIDIRECTIONAL)) {
- dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
- __func__, ptr, buf->safe, size);
- memcpy(buf->safe, ptr, size);
- }
- ptr = buf->safe;
+ if (device_info)
+ DO_STATS ( device_info->map_op_count++ );
- dma_addr = buf->safe_dma_addr;
+ buf = alloc_safe_buffer(device_info, ptr, size, dir);
+ if (buf == NULL) {
+ dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
+ __func__, ptr);
+ return DMA_ERROR_CODE;
}
- consistent_sync(ptr, size, dir);
-
- return dma_addr;
-}
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ buf->safe, buf->safe_dma_addr);
-static inline void
-unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
-{
- struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
- struct safe_buffer *buf = NULL;
-
- /*
- * Trying to unmap an invalid mapping
- */
- if (dma_mapping_error(dma_addr)) {
- dev_err(dev, "Trying to unmap invalid mapping\n");
- return;
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
+ dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
+ __func__, ptr, buf->safe, size);
+ memcpy(buf->safe, ptr, size);
}
- if (device_info)
- buf = find_safe_buffer(device_info, dma_addr);
-
- if (buf) {
- BUG_ON(buf->size != size);
-
- dev_dbg(dev,
- "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
- __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
- buf->safe, (void *) buf->safe_dma_addr);
-
- DO_STATS ( device_info->bounce_count++ );
-
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
- unsigned long ptr;
-
- dev_dbg(dev,
- "%s: copy back safe %p to unsafe %p size %d\n",
- __func__, buf->safe, buf->ptr, size);
- memcpy(buf->ptr, buf->safe, size);
-
- /*
- * DMA buffers must have the same cache properties
- * as if they were really used for DMA - which means
- * data must be written back to RAM. Note that
- * we don't use dmac_flush_range() here for the
- * bidirectional case because we know the cache
- * lines will be coherent with the data written.
- */
- ptr = (unsigned long)buf->ptr;
- dmac_clean_range(ptr, ptr + size);
- }
- free_safe_buffer(device_info, buf);
- }
+ return buf->safe_dma_addr;
}
-static inline void
-sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
+static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
+ size_t size, enum dma_data_direction dir)
{
- struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
- struct safe_buffer *buf = NULL;
+ BUG_ON(buf->size != size);
+ BUG_ON(buf->direction != dir);
- if (device_info)
- buf = find_safe_buffer(device_info, dma_addr);
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ buf->safe, buf->safe_dma_addr);
- if (buf) {
- /*
- * Both of these checks from original code need to be
- * commented out b/c some drivers rely on the following:
- *
- * 1) Drivers may map a large chunk of memory into DMA space
- * but only sync a small portion of it. Good example is
- * allocating a large buffer, mapping it, and then
- * breaking it up into small descriptors. No point
- * in syncing the whole buffer if you only have to
- * touch one descriptor.
- *
- * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
- * usually only synced in one dir at a time.
- *
- * See drivers/net/eepro100.c for examples of both cases.
- *
- * -ds
- *
- * BUG_ON(buf->size != size);
- * BUG_ON(buf->direction != dir);
- */
+ DO_STATS(dev->archdata.dmabounce->bounce_count++);
- dev_dbg(dev,
- "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
- __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
- buf->safe, (void *) buf->safe_dma_addr);
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
+ void *ptr = buf->ptr;
- DO_STATS ( device_info->bounce_count++ );
+ dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
+ __func__, buf->safe, ptr, size);
+ memcpy(ptr, buf->safe, size);
- switch (dir) {
- case DMA_FROM_DEVICE:
- dev_dbg(dev,
- "%s: copy back safe %p to unsafe %p size %d\n",
- __func__, buf->safe, buf->ptr, size);
- memcpy(buf->ptr, buf->safe, size);
- break;
- case DMA_TO_DEVICE:
- dev_dbg(dev,
- "%s: copy out unsafe %p to safe %p, size %d\n",
- __func__,buf->ptr, buf->safe, size);
- memcpy(buf->safe, buf->ptr, size);
- break;
- case DMA_BIDIRECTIONAL:
- BUG(); /* is this allowed? what does it mean? */
- default:
- BUG();
- }
- consistent_sync(buf->safe, size, dir);
- } else {
- consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
+ /*
+ * Since we may have written to a page cache page,
+ * we need to ensure that the data will be coherent
+ * with user mappings.
+ */
+ __cpuc_flush_dcache_area(ptr, size);
}
+ free_safe_buffer(dev->archdata.dmabounce, buf);
}
/* ************************************************** */
@@ -392,25 +308,33 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
-dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction dir)
+static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
- unsigned long flags;
dma_addr_t dma_addr;
+ int ret;
- dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
- __func__, ptr, size, dir);
+ dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
+ __func__, page, offset, size, dir);
- BUG_ON(dir == DMA_NONE);
+ dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
- local_irq_save(flags);
+ ret = needs_bounce(dev, dma_addr, size);
+ if (ret < 0)
+ return DMA_ERROR_CODE;
- dma_addr = map_single(dev, ptr, size, dir);
+ if (ret == 0) {
+ arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
+ return dma_addr;
+ }
- local_irq_restore(flags);
+ if (PageHighMem(page)) {
+ dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
+ return DMA_ERROR_CODE;
+ }
- return dma_addr;
+ return map_single(dev, page_address(page) + offset, size, dir);
}
/*
@@ -419,161 +343,129 @@ dma_map_single(struct device *dev, void *ptr, size_t size,
* the safe buffer. (basically return things back to the way they
* should be)
*/
-
-void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
+static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
- unsigned long flags;
-
- dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
- __func__, (void *) dma_addr, size, dir);
-
- BUG_ON(dir == DMA_NONE);
+ struct safe_buffer *buf;
- local_irq_save(flags);
+ dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
+ __func__, dma_addr, size, dir);
- unmap_single(dev, dma_addr, size, dir);
+ buf = find_safe_buffer_dev(dev, dma_addr, __func__);
+ if (!buf) {
+ arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
+ return;
+ }
- local_irq_restore(flags);
+ unmap_single(dev, buf, size, dir);
}
-int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
{
- unsigned long flags;
- int i;
-
- dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
- __func__, sg, nents, dir);
-
- BUG_ON(dir == DMA_NONE);
-
- local_irq_save(flags);
-
- for (i = 0; i < nents; i++, sg++) {
- struct page *page = sg->page;
- unsigned int offset = sg->offset;
- unsigned int length = sg->length;
- void *ptr = page_address(page) + offset;
-
- sg->dma_address =
- map_single(dev, ptr, length, dir);
- }
-
- local_irq_restore(flags);
+ struct safe_buffer *buf;
+ unsigned long off;
- return nents;
-}
+ dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+ __func__, addr, sz, dir);
-void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
-{
- unsigned long flags;
- int i;
+ buf = find_safe_buffer_dev(dev, addr, __func__);
+ if (!buf)
+ return 1;
- dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
- __func__, sg, nents, dir);
+ off = addr - buf->safe_dma_addr;
- BUG_ON(dir == DMA_NONE);
+ BUG_ON(buf->direction != dir);
- local_irq_save(flags);
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
+ buf->safe, buf->safe_dma_addr);
- for (i = 0; i < nents; i++, sg++) {
- dma_addr_t dma_addr = sg->dma_address;
- unsigned int length = sg->length;
+ DO_STATS(dev->archdata.dmabounce->bounce_count++);
- unmap_single(dev, dma_addr, length, dir);
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
+ dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
+ __func__, buf->safe + off, buf->ptr + off, sz);
+ memcpy(buf->ptr + off, buf->safe + off, sz);
}
-
- local_irq_restore(flags);
+ return 0;
}
-void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
+static void dmabounce_sync_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
- unsigned long flags;
-
- dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
- __func__, (void *) dma_addr, size, dir);
-
- local_irq_save(flags);
-
- sync_single(dev, dma_addr, size, dir);
+ if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
+ return;
- local_irq_restore(flags);
+ arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
}
-void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
+static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
{
- unsigned long flags;
-
- dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
- __func__, (void *) dma_addr, size, dir);
-
- local_irq_save(flags);
-
- sync_single(dev, dma_addr, size, dir);
+ struct safe_buffer *buf;
+ unsigned long off;
- local_irq_restore(flags);
-}
+ dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+ __func__, addr, sz, dir);
-void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
-{
- unsigned long flags;
- int i;
+ buf = find_safe_buffer_dev(dev, addr, __func__);
+ if (!buf)
+ return 1;
- dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
- __func__, sg, nents, dir);
+ off = addr - buf->safe_dma_addr;
- BUG_ON(dir == DMA_NONE);
+ BUG_ON(buf->direction != dir);
- local_irq_save(flags);
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
+ buf->safe, buf->safe_dma_addr);
- for (i = 0; i < nents; i++, sg++) {
- dma_addr_t dma_addr = sg->dma_address;
- unsigned int length = sg->length;
+ DO_STATS(dev->archdata.dmabounce->bounce_count++);
- sync_single(dev, dma_addr, length, dir);
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
+ dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
+ __func__,buf->ptr + off, buf->safe + off, sz);
+ memcpy(buf->safe + off, buf->ptr + off, sz);
}
-
- local_irq_restore(flags);
+ return 0;
}
-void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+static void dmabounce_sync_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
- unsigned long flags;
- int i;
-
- dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
- __func__, sg, nents, dir);
-
- BUG_ON(dir == DMA_NONE);
-
- local_irq_save(flags);
+ if (!__dmabounce_sync_for_device(dev, handle, size, dir))
+ return;
- for (i = 0; i < nents; i++, sg++) {
- dma_addr_t dma_addr = sg->dma_address;
- unsigned int length = sg->length;
+ arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
+}
- sync_single(dev, dma_addr, length, dir);
- }
+static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (dev->archdata.dmabounce)
+ return 0;
- local_irq_restore(flags);
+ return arm_dma_ops.set_dma_mask(dev, dma_mask);
}
-static int
-dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
- unsigned long size)
+static struct dma_map_ops dmabounce_ops = {
+ .alloc = arm_dma_alloc,
+ .free = arm_dma_free,
+ .mmap = arm_dma_mmap,
+ .get_sgtable = arm_dma_get_sgtable,
+ .map_page = dmabounce_map_page,
+ .unmap_page = dmabounce_unmap_page,
+ .sync_single_for_cpu = dmabounce_sync_for_cpu,
+ .sync_single_for_device = dmabounce_sync_for_device,
+ .map_sg = arm_dma_map_sg,
+ .unmap_sg = arm_dma_unmap_sg,
+ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .set_dma_mask = dmabounce_set_mask,
+};
+
+static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
+ const char *name, unsigned long size)
{
pool->size = size;
DO_STATS(pool->allocs = 0);
@@ -584,18 +476,17 @@ dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char
return pool->pool ? 0 : -ENOMEM;
}
-int
-dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
- unsigned long large_buffer_size)
+int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
+ unsigned long large_buffer_size,
+ int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
{
struct dmabounce_device_info *device_info;
int ret;
device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
if (!device_info) {
- printk(KERN_ERR
- "Could not allocated dmabounce_device_info for %s",
- dev->bus_id);
+ dev_err(dev,
+ "Could not allocated dmabounce_device_info\n");
return -ENOMEM;
}
@@ -622,17 +513,20 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
device_info->dev = dev;
INIT_LIST_HEAD(&device_info->safe_buffers);
+ rwlock_init(&device_info->lock);
+ device_info->needs_bounce = needs_bounce_fn;
#ifdef STATS
device_info->total_allocs = 0;
device_info->map_op_count = 0;
device_info->bounce_count = 0;
+ device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
#endif
- list_add(&device_info->node, &dmabounce_devs);
+ dev->archdata.dmabounce = device_info;
+ set_dma_ops(dev, &dmabounce_ops);
- printk(KERN_INFO "dmabounce: registered device %s on %s bus\n",
- dev->bus_id, dev->bus->name);
+ dev_info(dev, "dmabounce: registered device\n");
return 0;
@@ -642,23 +536,25 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
kfree(device_info);
return ret;
}
+EXPORT_SYMBOL(dmabounce_register_dev);
-void
-dmabounce_unregister_dev(struct device *dev)
+void dmabounce_unregister_dev(struct device *dev)
{
- struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
+ struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
+
+ dev->archdata.dmabounce = NULL;
+ set_dma_ops(dev, NULL);
if (!device_info) {
- printk(KERN_WARNING
- "%s: Never registered with dmabounce but attempting" \
- "to unregister!\n", dev->bus_id);
+ dev_warn(dev,
+ "Never registered with dmabounce but attempting"
+ "to unregister!\n");
return;
}
if (!list_empty(&device_info->safe_buffers)) {
- printk(KERN_ERR
- "%s: Removing from dmabounce with pending buffers!\n",
- dev->bus_id);
+ dev_err(dev,
+ "Removing from dmabounce with pending buffers!\n");
BUG();
}
@@ -668,26 +564,14 @@ dmabounce_unregister_dev(struct device *dev)
dma_pool_destroy(device_info->large.pool);
#ifdef STATS
- print_alloc_stats(device_info);
- print_map_stats(device_info);
+ if (device_info->attr_res == 0)
+ device_remove_file(dev, &dev_attr_dmabounce_stats);
#endif
- list_del(&device_info->node);
-
kfree(device_info);
- printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n",
- dev->bus_id, dev->bus->name);
+ dev_info(dev, "dmabounce: device unregistered\n");
}
-
-
-EXPORT_SYMBOL(dma_map_single);
-EXPORT_SYMBOL(dma_unmap_single);
-EXPORT_SYMBOL(dma_map_sg);
-EXPORT_SYMBOL(dma_unmap_sg);
-EXPORT_SYMBOL(dma_sync_single);
-EXPORT_SYMBOL(dma_sync_sg);
-EXPORT_SYMBOL(dmabounce_register_dev);
EXPORT_SYMBOL(dmabounce_unregister_dev);
MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");