diff options
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
| -rw-r--r-- | arch/arm/mm/dma-mapping.c | 437 |
1 files changed, 341 insertions, 96 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 076c26d4386..1f88db06b13 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -9,6 +9,7 @@ * * DMA uncached mapping support. */ +#include <linux/bootmem.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/gfp.h> @@ -157,9 +158,47 @@ struct dma_map_ops arm_coherent_dma_ops = { }; EXPORT_SYMBOL(arm_coherent_dma_ops); +static int __dma_supported(struct device *dev, u64 mask, bool warn) +{ + unsigned long max_dma_pfn; + + /* + * If the mask allows for more memory than we can address, + * and we actually have that much memory, then we must + * indicate that DMA to this device is not supported. + */ + if (sizeof(mask) != sizeof(dma_addr_t) && + mask > (dma_addr_t)~0 && + dma_to_pfn(dev, ~0) < max_pfn) { + if (warn) { + dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", + mask); + dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); + } + return 0; + } + + max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); + + /* + * Translate the device's DMA mask to a PFN limit. This + * PFN number includes the page which we can DMA to. + */ + if (dma_to_pfn(dev, mask) < max_dma_pfn) { + if (warn) + dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", + mask, + dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, + max_dma_pfn + 1); + return 0; + } + + return 1; +} + static u64 get_coherent_dma_mask(struct device *dev) { - u64 mask = (u64)arm_dma_limit; + u64 mask = (u64)DMA_BIT_MASK(32); if (dev) { mask = dev->coherent_dma_mask; @@ -173,12 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev) return 0; } - if ((~mask) & (u64)arm_dma_limit) { - dev_warn(dev, "coherent DMA mask %#llx is smaller " - "than system GFP_DMA mask %#llx\n", - mask, (u64)arm_dma_limit); + if (!__dma_supported(dev, mask, true)) return 0; - } } return mask; @@ -186,13 +221,24 @@ static u64 get_coherent_dma_mask(struct device *dev) static void __dma_clear_buffer(struct page *page, size_t size) { - void *ptr; /* * Ensure that the allocated pages are zeroed, and that any data * lurking in the kernel direct-mapped region is invalidated. */ - ptr = page_address(page); - if (ptr) { + if (PageHighMem(page)) { + phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); + phys_addr_t end = base + size; + while (size > 0) { + void *ptr = kmap_atomic(page); + memset(ptr, 0, PAGE_SIZE); + dmac_flush_range(ptr, ptr + PAGE_SIZE); + kunmap_atomic(ptr); + page++; + size -= PAGE_SIZE; + } + outer_flush_range(base, end); + } else { + void *ptr = page_address(page); memset(ptr, 0, size); dmac_flush_range(ptr, ptr + size); outer_flush_range(__pa(ptr), __pa(ptr) + size); @@ -238,12 +284,10 @@ static void __dma_free_buffer(struct page *page, size_t size) } #ifdef CONFIG_MMU -#ifdef CONFIG_HUGETLB_PAGE -#error ARM Coherent DMA allocator does not (yet) support huge TLB -#endif static void *__alloc_from_contiguous(struct device *dev, size_t size, - pgprot_t prot, struct page **ret_page); + pgprot_t prot, struct page **ret_page, + const void *caller); static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, @@ -329,7 +373,8 @@ void __init init_dma_coherent_pool_size(unsigned long size) static int __init atomic_pool_init(void) { struct dma_pool *pool = &atomic_pool; - pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); + pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); + gfp_t gfp = GFP_KERNEL | GFP_DMA; unsigned long nr_pages = pool->size >> PAGE_SHIFT; unsigned long *bitmap; struct page *page; @@ -345,11 +390,12 @@ static int __init atomic_pool_init(void) if (!pages) goto no_pages; - if (IS_ENABLED(CONFIG_CMA)) - ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); + if (dev_get_cma_area(NULL)) + ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, + atomic_pool_init); else - ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, - &page, NULL); + ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, + atomic_pool_init); if (ptr) { int i; @@ -415,12 +461,21 @@ void __init dma_contiguous_remap(void) map.type = MT_MEMORY_DMA_READY; /* - * Clear previous low-memory mapping + * Clear previous low-memory mapping to ensure that the + * TLB does not see any conflicting entries, then flush + * the TLB of the old entries before creating new mappings. + * + * This ensures that any speculatively loaded TLB entries + * (even though they may be rare) can not cause any problems, + * and ensures that this code is architecturally compliant. */ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); + flush_tlb_kernel_range(__phys_to_virt(start), + __phys_to_virt(end)); + iotable_init(&map, 1); } } @@ -441,7 +496,6 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot) unsigned end = start + size; apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); - dsb(); flush_tlb_kernel_range(start, end); } @@ -542,27 +596,41 @@ static int __free_from_pool(void *start, size_t size) } static void *__alloc_from_contiguous(struct device *dev, size_t size, - pgprot_t prot, struct page **ret_page) + pgprot_t prot, struct page **ret_page, + const void *caller) { unsigned long order = get_order(size); size_t count = size >> PAGE_SHIFT; struct page *page; + void *ptr; page = dma_alloc_from_contiguous(dev, count, order); if (!page) return NULL; __dma_clear_buffer(page, size); - __dma_remap(page, size, prot); + if (PageHighMem(page)) { + ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); + if (!ptr) { + dma_release_from_contiguous(dev, page, count); + return NULL; + } + } else { + __dma_remap(page, size, prot); + ptr = page_address(page); + } *ret_page = page; - return page_address(page); + return ptr; } static void __free_from_contiguous(struct device *dev, struct page *page, - size_t size) + void *cpu_addr, size_t size) { - __dma_remap(page, size, pgprot_kernel); + if (PageHighMem(page)) + __dma_free_remap(cpu_addr, size); + else + __dma_remap(page, size, PAGE_KERNEL); dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); } @@ -583,9 +651,9 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) #define __get_dma_pgprot(attrs, prot) __pgprot(0) #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL #define __alloc_from_pool(size, ret_page) NULL -#define __alloc_from_contiguous(dev, size, prot, ret) NULL +#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL #define __free_from_pool(cpu_addr, size) 0 -#define __free_from_contiguous(dev, page, size) do { } while (0) +#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) #define __dma_free_remap(cpu_addr, size) do { } while (0) #endif /* CONFIG_MMU */ @@ -640,12 +708,12 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (is_coherent || nommu()) addr = __alloc_simple_buffer(dev, size, gfp, &page); - else if (gfp & GFP_ATOMIC) + else if (!(gfp & __GFP_WAIT)) addr = __alloc_from_pool(size, &page); - else if (!IS_ENABLED(CONFIG_CMA)) + else if (!dev_get_cma_area(dev)) addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); else - addr = __alloc_from_contiguous(dev, size, prot, &page); + addr = __alloc_from_contiguous(dev, size, prot, &page, caller); if (addr) *handle = pfn_to_dma(dev, page_to_pfn(page)); @@ -660,7 +728,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { - pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); + pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); void *memory; if (dma_alloc_from_coherent(dev, size, handle, &memory)) @@ -673,7 +741,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, static void *arm_coherent_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { - pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); + pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); void *memory; if (dma_alloc_from_coherent(dev, size, handle, &memory)) @@ -731,7 +799,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, __dma_free_buffer(page, size); } else if (__free_from_pool(cpu_addr, size)) { return; - } else if (!IS_ENABLED(CONFIG_CMA)) { + } else if (!dev_get_cma_area(dev)) { __dma_free_remap(cpu_addr, size); __dma_free_buffer(page, size); } else { @@ -739,7 +807,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, * Non-atomic allocations cannot be freed with IRQs disabled */ WARN_ON(irqs_disabled()); - __free_from_contiguous(dev, page, size); + __free_from_contiguous(dev, page, cpu_addr, size); } } @@ -795,16 +863,17 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, if (PageHighMem(page)) { if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; - vaddr = kmap_high_get(page); - if (vaddr) { - vaddr += offset; - op(vaddr, len, dir); - kunmap_high(page); - } else if (cache_is_vipt()) { - /* unmapped pages might still be cached */ + + if (cache_is_vipt_nonaliasing()) { vaddr = kmap_atomic(page); op(vaddr + offset, len, dir); kunmap_atomic(vaddr); + } else { + vaddr = kmap_high_get(page); + if (vaddr) { + op(vaddr + offset, len, dir); + kunmap_high(page); + } } } else { vaddr = page_address(page) + offset; @@ -825,7 +894,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { - unsigned long paddr; + phys_addr_t paddr; dma_cache_maint_page(page, off, size, dir, dmac_map_area); @@ -841,20 +910,35 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { - unsigned long paddr = page_to_phys(page) + off; + phys_addr_t paddr = page_to_phys(page) + off; /* FIXME: non-speculating: not required */ - /* don't bother invalidating if DMA to device */ - if (dir != DMA_TO_DEVICE) + /* in any case, don't bother invalidating if DMA to device */ + if (dir != DMA_TO_DEVICE) { outer_inv_range(paddr, paddr + size); - dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); + dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); + } /* - * Mark the D-cache clean for this page to avoid extra flushing. + * Mark the D-cache clean for these pages to avoid extra flushing. */ - if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) - set_bit(PG_dcache_clean, &page->flags); + if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { + unsigned long pfn; + size_t left = size; + + pfn = page_to_pfn(page) + off / PAGE_SIZE; + off %= PAGE_SIZE; + if (off) { + pfn++; + left -= PAGE_SIZE - off; + } + while (left >= PAGE_SIZE) { + page = pfn_to_page(pfn++); + set_bit(PG_dcache_clean, &page->flags); + left -= PAGE_SIZE; + } + } } /** @@ -965,9 +1049,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, */ int dma_supported(struct device *dev, u64 mask) { - if (mask < (u64)arm_dma_limit) - return 0; - return 1; + return __dma_supported(dev, mask, false); } EXPORT_SYMBOL(dma_supported); @@ -994,45 +1076,98 @@ fs_initcall(dma_debug_do_init); /* IOMMU */ +static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); + static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, size_t size) { unsigned int order = get_order(size); unsigned int align = 0; unsigned int count, start; + size_t mapping_size = mapping->bits << PAGE_SHIFT; unsigned long flags; + dma_addr_t iova; + int i; - count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + - (1 << mapping->order) - 1) >> mapping->order; + if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) + order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; - if (order > mapping->order) - align = (1 << (order - mapping->order)) - 1; + count = PAGE_ALIGN(size) >> PAGE_SHIFT; + align = (1 << order) - 1; spin_lock_irqsave(&mapping->lock, flags); - start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, - count, align); - if (start > mapping->bits) { - spin_unlock_irqrestore(&mapping->lock, flags); - return DMA_ERROR_CODE; + for (i = 0; i < mapping->nr_bitmaps; i++) { + start = bitmap_find_next_zero_area(mapping->bitmaps[i], + mapping->bits, 0, count, align); + + if (start > mapping->bits) + continue; + + bitmap_set(mapping->bitmaps[i], start, count); + break; } - bitmap_set(mapping->bitmap, start, count); + /* + * No unused range found. Try to extend the existing mapping + * and perform a second attempt to reserve an IO virtual + * address range of size bytes. + */ + if (i == mapping->nr_bitmaps) { + if (extend_iommu_mapping(mapping)) { + spin_unlock_irqrestore(&mapping->lock, flags); + return DMA_ERROR_CODE; + } + + start = bitmap_find_next_zero_area(mapping->bitmaps[i], + mapping->bits, 0, count, align); + + if (start > mapping->bits) { + spin_unlock_irqrestore(&mapping->lock, flags); + return DMA_ERROR_CODE; + } + + bitmap_set(mapping->bitmaps[i], start, count); + } spin_unlock_irqrestore(&mapping->lock, flags); - return mapping->base + (start << (mapping->order + PAGE_SHIFT)); + iova = mapping->base + (mapping_size * i); + iova += start << PAGE_SHIFT; + + return iova; } static inline void __free_iova(struct dma_iommu_mapping *mapping, dma_addr_t addr, size_t size) { - unsigned int start = (addr - mapping->base) >> - (mapping->order + PAGE_SHIFT); - unsigned int count = ((size >> PAGE_SHIFT) + - (1 << mapping->order) - 1) >> mapping->order; + unsigned int start, count; + size_t mapping_size = mapping->bits << PAGE_SHIFT; unsigned long flags; + dma_addr_t bitmap_base; + u32 bitmap_index; + + if (!size) + return; + + bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; + BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); + + bitmap_base = mapping->base + mapping_size * bitmap_index; + + start = (addr - bitmap_base) >> PAGE_SHIFT; + + if (addr + size > bitmap_base + mapping_size) { + /* + * The address range to be freed reaches into the iova + * range of the next bitmap. This should not happen as + * we don't allow this in __alloc_iova (at the + * moment). + */ + BUG(); + } else + count = size >> PAGE_SHIFT; spin_lock_irqsave(&mapping->lock, flags); - bitmap_clear(mapping->bitmap, start, count); + bitmap_clear(mapping->bitmaps[bitmap_index], start, count); spin_unlock_irqrestore(&mapping->lock, flags); } @@ -1068,12 +1203,17 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, return pages; } + /* + * IOMMU can map any pages, so himem can also be used here + */ + gfp |= __GFP_NOWARN | __GFP_HIGHMEM; + while (count) { int j, order = __fls(count); - pages[i] = alloc_pages(gfp | __GFP_NOWARN, order); + pages[i] = alloc_pages(gfp, order); while (!pages[i] && order) - pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order); + pages[i] = alloc_pages(gfp, --order); if (!pages[i]) goto error; @@ -1182,7 +1322,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) break; len = (j - i) << PAGE_SHIFT; - ret = iommu_map(mapping->domain, iova, phys, len, 0); + ret = iommu_map(mapping->domain, iova, phys, len, + IOMMU_READ|IOMMU_WRITE); if (ret < 0) goto fail; iova += len; @@ -1257,26 +1398,35 @@ err_mapping: return NULL; } -static void __iommu_free_atomic(struct device *dev, struct page **pages, +static void __iommu_free_atomic(struct device *dev, void *cpu_addr, dma_addr_t handle, size_t size) { __iommu_remove_mapping(dev, handle, size); - __free_from_pool(page_address(pages[0]), size); + __free_from_pool(cpu_addr, size); } static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { - pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); + pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); struct page **pages; void *addr = NULL; *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); - if (gfp & GFP_ATOMIC) + if (!(gfp & __GFP_WAIT)) return __iommu_alloc_atomic(dev, size, handle); + /* + * Following is a work-around (a.k.a. hack) to prevent pages + * with __GFP_COMP being passed to split_page() which cannot + * handle them. The real problem is that this flag probably + * should be 0 on ARM as it is not supported on this + * platform; see CONFIG_HUGETLBFS. + */ + gfp &= ~(__GFP_COMP); + pages = __iommu_alloc_buffer(dev, size, gfp, attrs); if (!pages) return NULL; @@ -1335,16 +1485,17 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { - struct page **pages = __iommu_get_pages(cpu_addr, attrs); + struct page **pages; size = PAGE_ALIGN(size); - if (!pages) { - WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); + if (__in_atomic_pool(cpu_addr, size)) { + __iommu_free_atomic(dev, cpu_addr, handle, size); return; } - if (__in_atomic_pool(cpu_addr, size)) { - __iommu_free_atomic(dev, pages, handle, size); + pages = __iommu_get_pages(cpu_addr, attrs); + if (!pages) { + WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); return; } @@ -1371,6 +1522,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, GFP_KERNEL); } +static int __dma_direction_to_prot(enum dma_data_direction dir) +{ + int prot; + + switch (dir) { + case DMA_BIDIRECTIONAL: + prot = IOMMU_READ | IOMMU_WRITE; + break; + case DMA_TO_DEVICE: + prot = IOMMU_READ; + break; + case DMA_FROM_DEVICE: + prot = IOMMU_WRITE; + break; + default: + prot = 0; + } + + return prot; +} + /* * Map a part of the scatter-gather list into contiguous io address space */ @@ -1384,6 +1556,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, int ret = 0; unsigned int count; struct scatterlist *s; + int prot; size = PAGE_ALIGN(size); *handle = DMA_ERROR_CODE; @@ -1400,7 +1573,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); - ret = iommu_map(mapping->domain, iova, phys, len, 0); + prot = __dma_direction_to_prot(dir); + + ret = iommu_map(mapping->domain, iova, phys, len, prot); if (ret < 0) goto fail; count += len >> PAGE_SHIFT; @@ -1599,13 +1774,15 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t dma_addr; - int ret, len = PAGE_ALIGN(size + offset); + int ret, prot, len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); if (dma_addr == DMA_ERROR_CODE) return dma_addr; - ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); + prot = __dma_direction_to_prot(dir); + + ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); if (ret < 0) goto fail; @@ -1732,6 +1909,8 @@ struct dma_map_ops iommu_ops = { .unmap_sg = arm_iommu_unmap_sg, .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, .sync_sg_for_device = arm_iommu_sync_sg_for_device, + + .set_dma_mask = arm_dma_set_mask, }; struct dma_map_ops iommu_coherent_ops = { @@ -1745,14 +1924,15 @@ struct dma_map_ops iommu_coherent_ops = { .map_sg = arm_coherent_iommu_map_sg, .unmap_sg = arm_coherent_iommu_unmap_sg, + + .set_dma_mask = arm_dma_set_mask, }; /** * arm_iommu_create_mapping * @bus: pointer to the bus holding the client device (for IOMMU calls) * @base: start address of the valid IO address space - * @size: size of the valid IO address space - * @order: accuracy of the IO addresses allocations + * @size: maximum size of the valid IO address space * * Creates a mapping structure which holds information about used/unused * IO address ranges, which is required to perform memory allocation and @@ -1762,59 +1942,97 @@ struct dma_map_ops iommu_coherent_ops = { * arm_iommu_attach_device function. */ struct dma_iommu_mapping * -arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, - int order) +arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size) { - unsigned int count = size >> (PAGE_SHIFT + order); - unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); + unsigned int bits = size >> PAGE_SHIFT; + unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); struct dma_iommu_mapping *mapping; + int extensions = 1; int err = -ENOMEM; - if (!count) + if (!bitmap_size) return ERR_PTR(-EINVAL); + if (bitmap_size > PAGE_SIZE) { + extensions = bitmap_size / PAGE_SIZE; + bitmap_size = PAGE_SIZE; + } + mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); if (!mapping) goto err; - mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!mapping->bitmap) + mapping->bitmap_size = bitmap_size; + mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *), + GFP_KERNEL); + if (!mapping->bitmaps) goto err2; + mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); + if (!mapping->bitmaps[0]) + goto err3; + + mapping->nr_bitmaps = 1; + mapping->extensions = extensions; mapping->base = base; mapping->bits = BITS_PER_BYTE * bitmap_size; - mapping->order = order; + spin_lock_init(&mapping->lock); mapping->domain = iommu_domain_alloc(bus); if (!mapping->domain) - goto err3; + goto err4; kref_init(&mapping->kref); return mapping; +err4: + kfree(mapping->bitmaps[0]); err3: - kfree(mapping->bitmap); + kfree(mapping->bitmaps); err2: kfree(mapping); err: return ERR_PTR(err); } +EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); static void release_iommu_mapping(struct kref *kref) { + int i; struct dma_iommu_mapping *mapping = container_of(kref, struct dma_iommu_mapping, kref); iommu_domain_free(mapping->domain); - kfree(mapping->bitmap); + for (i = 0; i < mapping->nr_bitmaps; i++) + kfree(mapping->bitmaps[i]); + kfree(mapping->bitmaps); kfree(mapping); } +static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) +{ + int next_bitmap; + + if (mapping->nr_bitmaps > mapping->extensions) + return -EINVAL; + + next_bitmap = mapping->nr_bitmaps; + mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, + GFP_ATOMIC); + if (!mapping->bitmaps[next_bitmap]) + return -ENOMEM; + + mapping->nr_bitmaps++; + + return 0; +} + void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) { if (mapping) kref_put(&mapping->kref, release_iommu_mapping); } +EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); /** * arm_iommu_attach_device @@ -1843,5 +2061,32 @@ int arm_iommu_attach_device(struct device *dev, pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); return 0; } +EXPORT_SYMBOL_GPL(arm_iommu_attach_device); + +/** + * arm_iommu_detach_device + * @dev: valid struct device pointer + * + * Detaches the provided device from a previously attached map. + * This voids the dma operations (dma_map_ops pointer) + */ +void arm_iommu_detach_device(struct device *dev) +{ + struct dma_iommu_mapping *mapping; + + mapping = to_dma_iommu_mapping(dev); + if (!mapping) { + dev_warn(dev, "Not attached\n"); + return; + } + + iommu_detach_device(mapping->domain, dev); + kref_put(&mapping->kref, release_iommu_mapping); + dev->archdata.mapping = NULL; + set_dma_ops(dev, NULL); + + pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); +} +EXPORT_SYMBOL_GPL(arm_iommu_detach_device); #endif |
