diff options
Diffstat (limited to 'arch/sparc/kernel/iommu.c')
| -rw-r--r-- | arch/sparc/kernel/iommu.c | 55 |
1 files changed, 25 insertions, 30 deletions
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 0aeaefe696b..bfa4d0c2df4 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -5,12 +5,14 @@ */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/export.h> +#include <linux/slab.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/iommu-helper.h> +#include <linux/bitmap.h> #ifdef CONFIG_PCI #include <linux/pci.h> @@ -19,6 +21,7 @@ #include <asm/iommu.h> #include "iommu_common.h" +#include "kernel.h" #define STC_CTXMATCH_ADDR(STC, CTX) \ ((STC)->strbuf_ctxmatch_base + ((CTX) << 3)) @@ -169,7 +172,7 @@ void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long np entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; - iommu_area_free(arena->map, entry, npages); + bitmap_clear(arena->map, entry, npages); } int iommu_table_init(struct iommu *iommu, int tsbsize, @@ -253,10 +256,9 @@ static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, static int iommu_alloc_ctx(struct iommu *iommu) { int lowest = iommu->ctx_lowest_free; - int sz = IOMMU_NUM_CTXS - lowest; - int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); + int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); - if (unlikely(n == sz)) { + if (unlikely(n == IOMMU_NUM_CTXS)) { n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); if (unlikely(n == lowest)) { printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); @@ -279,7 +281,8 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx) } static void *dma_4u_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addrp, gfp_t gfp) + dma_addr_t *dma_addrp, gfp_t gfp, + struct dma_attrs *attrs) { unsigned long flags, order, first_page; struct iommu *iommu; @@ -329,16 +332,14 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size, } static void dma_4u_free_coherent(struct device *dev, size_t size, - void *cpu, dma_addr_t dvma) + void *cpu, dma_addr_t dvma, + struct dma_attrs *attrs) { struct iommu *iommu; - iopte_t *iopte; unsigned long flags, order, npages; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; iommu = dev->archdata.iommu; - iopte = iommu->page_table + - ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); spin_lock_irqsave(&iommu->lock, flags); @@ -353,7 +354,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, unsigned long offset, size_t sz, - enum dma_data_direction direction) + enum dma_data_direction direction, + struct dma_attrs *attrs) { struct iommu *iommu; struct strbuf *strbuf; @@ -474,7 +476,8 @@ do_flush_sync: } static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, - size_t sz, enum dma_data_direction direction) + size_t sz, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct iommu *iommu; struct strbuf *strbuf; @@ -520,7 +523,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, } static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) + int nelems, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct scatterlist *s, *outs, *segstart; unsigned long flags, handle, prot, ctx; @@ -691,7 +695,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) } static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) + int nelems, enum dma_data_direction direction, + struct dma_attrs *attrs) { unsigned long flags, ctx; struct scatterlist *sg; @@ -822,9 +827,9 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, spin_unlock_irqrestore(&iommu->lock, flags); } -static const struct dma_ops sun4u_dma_ops = { - .alloc_coherent = dma_4u_alloc_coherent, - .free_coherent = dma_4u_free_coherent, +static struct dma_map_ops sun4u_dma_ops = { + .alloc = dma_4u_alloc_coherent, + .free = dma_4u_free_coherent, .map_page = dma_4u_map_page, .unmap_page = dma_4u_unmap_page, .map_sg = dma_4u_map_sg, @@ -833,7 +838,7 @@ static const struct dma_ops sun4u_dma_ops = { .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, }; -const struct dma_ops *dma_ops = &sun4u_dma_ops; +struct dma_map_ops *dma_ops = &sun4u_dma_ops; EXPORT_SYMBOL(dma_ops); int dma_supported(struct device *dev, u64 device_mask) @@ -848,20 +853,10 @@ int dma_supported(struct device *dev, u64 device_mask) return 1; #ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_dma_supported(to_pci_dev(dev), device_mask); + if (dev_is_pci(dev)) + return pci64_dma_supported(to_pci_dev(dev), device_mask); #endif return 0; } EXPORT_SYMBOL(dma_supported); - -int dma_set_mask(struct device *dev, u64 dma_mask) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_set_dma_mask(to_pci_dev(dev), dma_mask); -#endif - return -EINVAL; -} -EXPORT_SYMBOL(dma_set_mask); |
