diff options
Diffstat (limited to 'arch/alpha/kernel/pci_iommu.c')
| -rw-r--r-- | arch/alpha/kernel/pci_iommu.c | 311 |
1 files changed, 139 insertions, 172 deletions
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index dd6e334ab9e..eddee772034 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -5,8 +5,9 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> -#include <linux/slab.h> +#include <linux/gfp.h> #include <linux/bootmem.h> +#include <linux/export.h> #include <linux/scatterlist.h> #include <linux/log2.h> #include <linux/dma-mapping.h> @@ -41,13 +42,6 @@ mk_iommu_pte(unsigned long paddr) return (paddr >> (PAGE_SHIFT-1)) | 1; } -static inline long -calc_npages(long bytes) -{ - return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; -} - - /* Return the minimum of MAX or the first power of two larger than main memory. */ @@ -79,25 +73,21 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, #ifdef CONFIG_DISCONTIGMEM - if (!NODE_DATA(nid) || - (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid), - sizeof(*arena))))) { - printk("%s: couldn't allocate arena from node %d\n" - " falling back to system-wide allocation\n", - __FUNCTION__, nid); - arena = alloc_bootmem(sizeof(*arena)); - } - - if (!NODE_DATA(nid) || - (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), - mem_size, - align, - 0)))) { - printk("%s: couldn't allocate arena ptes from node %d\n" - " falling back to system-wide allocation\n", - __FUNCTION__, nid); - arena->ptes = __alloc_bootmem(mem_size, align, 0); - } + arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena)); + if (!NODE_DATA(nid) || !arena) { + printk("%s: couldn't allocate arena from node %d\n" + " falling back to system-wide allocation\n", + __func__, nid); + arena = alloc_bootmem(sizeof(*arena)); + } + + arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0); + if (!NODE_DATA(nid) || !arena->ptes) { + printk("%s: couldn't allocate arena ptes from node %d\n" + " falling back to system-wide allocation\n", + __func__, nid); + arena->ptes = __alloc_bootmem(mem_size, align, 0); + } #else /* CONFIG_DISCONTIGMEM */ @@ -227,10 +217,30 @@ iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) for (i = 0; i < n; ++i) p[i] = 0; } - -/* True if the machine supports DAC addressing, and DEV can - make use of it given MASK. */ -static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask); + +/* + * True if the machine supports DAC addressing, and DEV can + * make use of it given MASK. + */ +static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask) +{ + dma_addr_t dac_offset = alpha_mv.pci_dac_offset; + int ok = 1; + + /* If this is not set, the machine doesn't support DAC at all. */ + if (dac_offset == 0) + ok = 0; + + /* The device has to be able to address our DAC bit. */ + if ((dac_offset & dev->dma_mask) != dac_offset) + ok = 0; + + /* If both conditions above are met, we are fine. */ + DBGA("pci_dac_dma_supported %s from %pf\n", + ok ? "yes" : "no", __builtin_return_address(0)); + + return ok; +} /* Map a single buffer of the indicated size for PCI DMA in streaming mode. The 32-bit PCI bus mastering address to use is returned. @@ -258,7 +268,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, && paddr + size <= __direct_map_size) { ret = paddr + __direct_map_base; - DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n", + DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n", cpu_addr, size, ret, __builtin_return_address(0)); return ret; @@ -269,7 +279,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, if (dac_allowed) { ret = paddr + alpha_mv.pci_dac_offset; - DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n", + DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n", cpu_addr, size, ret, __builtin_return_address(0)); return ret; @@ -279,11 +289,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, assume it doesn't support sg mapping, and, since we tried to use direct_map above, it now must be considered an error. */ if (! alpha_mv.mv_pci_tbi) { - static int been_here = 0; /* Only print the message once. */ - if (!been_here) { - printk(KERN_WARNING "pci_map_single: no HW sg\n"); - been_here = 1; - } + printk_once(KERN_WARNING "pci_map_single: no HW sg\n"); return 0; } @@ -291,7 +297,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, if (!arena || arena->dma_base + arena->size - 1 > max_dma) arena = hose->sg_isa; - npages = calc_npages((paddr & ~PAGE_MASK) + size); + npages = iommu_num_pages(paddr, size, PAGE_SIZE); /* Force allocation to 64KB boundary for ISA bridges. */ if (pdev && pdev == isa_bridge) @@ -310,39 +316,50 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, ret = arena->dma_base + dma_ofs * PAGE_SIZE; ret += (unsigned long)cpu_addr & ~PAGE_MASK; - DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n", + DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n", cpu_addr, size, npages, ret, __builtin_return_address(0)); return ret; } -dma_addr_t -pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir) +/* Helper for generic DMA-mapping functions. */ +static struct pci_dev *alpha_gendev_to_pci(struct device *dev) { - int dac_allowed; + if (dev && dev_is_pci(dev)) + return to_pci_dev(dev); - if (dir == PCI_DMA_NONE) - BUG(); + /* Assume that non-PCI devices asking for DMA are either ISA or EISA, + BUG() otherwise. */ + BUG_ON(!isa_bridge); - dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; - return pci_map_single_1(pdev, cpu_addr, size, dac_allowed); + /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA + bridge is bus master then). */ + if (!dev || !dev->dma_mask || !*dev->dma_mask) + return isa_bridge; + + /* For EISA bus masters, return isa_bridge (it might have smaller + dma_mask due to wiring limitations). */ + if (*dev->dma_mask >= isa_bridge->dma_mask) + return isa_bridge; + + /* This assumes ISA bus master with dma_mask 0xffffff. */ + return NULL; } -EXPORT_SYMBOL(pci_map_single); -dma_addr_t -pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset, - size_t size, int dir) +static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) { + struct pci_dev *pdev = alpha_gendev_to_pci(dev); int dac_allowed; - if (dir == PCI_DMA_NONE) - BUG(); + BUG_ON(dir == PCI_DMA_NONE); dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; return pci_map_single_1(pdev, (char *)page_address(page) + offset, size, dac_allowed); } -EXPORT_SYMBOL(pci_map_page); /* Unmap a single streaming mode DMA translation. The DMA_ADDR and SIZE must match what was provided for in a previous pci_map_single @@ -350,30 +367,30 @@ EXPORT_SYMBOL(pci_map_page); the cpu to the buffer are guaranteed to see whatever the device wrote there. */ -void -pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, - int direction) +static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { unsigned long flags; + struct pci_dev *pdev = alpha_gendev_to_pci(dev); struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; struct pci_iommu_arena *arena; long dma_ofs, npages; - if (direction == PCI_DMA_NONE) - BUG(); + BUG_ON(dir == PCI_DMA_NONE); if (dma_addr >= __direct_map_base && dma_addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ - DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n", + DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n", dma_addr, size, __builtin_return_address(0)); return; } if (dma_addr > 0xffffffff) { - DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n", + DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n", dma_addr, size, __builtin_return_address(0)); return; } @@ -384,14 +401,14 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; if (dma_ofs * PAGE_SIZE >= arena->size) { - printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx " - " base %lx size %x\n", dma_addr, arena->dma_base, - arena->size); + printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx " + " base %llx size %x\n", + dma_addr, arena->dma_base, arena->size); return; BUG(); } - npages = calc_npages((dma_addr & ~PAGE_MASK) + size); + npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); spin_lock_irqsave(&arena->lock, flags); @@ -405,28 +422,20 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, spin_unlock_irqrestore(&arena->lock, flags); - DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n", + DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n", dma_addr, size, npages, __builtin_return_address(0)); } -EXPORT_SYMBOL(pci_unmap_single); - -void -pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr, - size_t size, int direction) -{ - pci_unmap_single(pdev, dma_addr, size, direction); -} -EXPORT_SYMBOL(pci_unmap_page); /* Allocate and map kernel buffer using consistent mode DMA for PCI device. Returns non-NULL cpu-view pointer to the buffer if successful and sets *DMA_ADDRP to the pci side dma address as well, else DMA_ADDRP is undefined. */ -void * -__pci_alloc_consistent(struct pci_dev *pdev, size_t size, - dma_addr_t *dma_addrp, gfp_t gfp) +static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_addrp, gfp_t gfp, + struct dma_attrs *attrs) { + struct pci_dev *pdev = alpha_gendev_to_pci(dev); void *cpu_addr; long order = get_order(size); @@ -436,7 +445,7 @@ try_again: cpu_addr = (void *)__get_free_pages(gfp, order); if (! cpu_addr) { printk(KERN_INFO "pci_alloc_consistent: " - "get_free_pages failed from %p\n", + "get_free_pages failed from %pf\n", __builtin_return_address(0)); /* ??? Really atomic allocation? Otherwise we could play with vmalloc and sg if we can't find contiguous memory. */ @@ -454,13 +463,12 @@ try_again: gfp |= GFP_DMA; goto try_again; } - - DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n", + + DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n", size, cpu_addr, *dma_addrp, __builtin_return_address(0)); return cpu_addr; } -EXPORT_SYMBOL(__pci_alloc_consistent); /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must be values that were returned from pci_alloc_consistent. SIZE must @@ -468,17 +476,17 @@ EXPORT_SYMBOL(__pci_alloc_consistent); References to the memory and mappings associated with CPU_ADDR or DMA_ADDR past this call are illegal. */ -void -pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr, - dma_addr_t dma_addr) +static void alpha_pci_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr, + struct dma_attrs *attrs) { + struct pci_dev *pdev = alpha_gendev_to_pci(dev); pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); free_pages((unsigned long)cpu_addr, get_order(size)); - DBGA2("pci_free_consistent: [%x,%lx] from %p\n", + DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n", dma_addr, size, __builtin_return_address(0)); } -EXPORT_SYMBOL(pci_free_consistent); /* Classify the elements of the scatterlist. Write dma_address of each element with: @@ -562,7 +570,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, out->dma_address = paddr + __direct_map_base; out->dma_length = size; - DBGA(" sg_fill: [%p,%lx] -> direct %lx\n", + DBGA(" sg_fill: [%p,%lx] -> direct %llx\n", __va(paddr), size, out->dma_address); return 0; @@ -574,7 +582,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, out->dma_address = paddr + alpha_mv.pci_dac_offset; out->dma_length = size; - DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n", + DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n", __va(paddr), size, out->dma_address); return 0; @@ -584,7 +592,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, contiguous. */ paddr &= ~PAGE_MASK; - npages = calc_npages(paddr + size); + npages = iommu_num_pages(paddr, size, PAGE_SIZE); dma_ofs = iommu_arena_alloc(dev, arena, npages, 0); if (dma_ofs < 0) { /* If we attempted a direct map above but failed, die. */ @@ -600,7 +608,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; out->dma_length = size; - DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n", + DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n", __va(paddr), size, out->dma_address, npages); /* All virtually contiguous. We need to find the length of each @@ -620,7 +628,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, sg++; } - npages = calc_npages((paddr & ~PAGE_MASK) + size); + npages = iommu_num_pages(paddr, size, PAGE_SIZE); paddr &= PAGE_MASK; for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) @@ -641,23 +649,20 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, return 1; } -int -pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, - int direction) +static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) { + struct pci_dev *pdev = alpha_gendev_to_pci(dev); struct scatterlist *start, *end, *out; struct pci_controller *hose; struct pci_iommu_arena *arena; dma_addr_t max_dma; int dac_allowed; - struct device *dev; - if (direction == PCI_DMA_NONE) - BUG(); - - dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; + BUG_ON(dir == PCI_DMA_NONE); - dev = pdev ? &pdev->dev : NULL; + dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; /* Fast path single entry scatterlists. */ if (nents == 1) { @@ -714,19 +719,19 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, /* Some allocation failed while mapping the scatterlist entries. Unmap them now. */ if (out > start) - pci_unmap_sg(pdev, start, out - start, direction); + pci_unmap_sg(pdev, start, out - start, dir); return 0; } -EXPORT_SYMBOL(pci_map_sg); /* Unmap a set of streaming mode DMA translations. Again, cpu read rules concerning calls here are the same as for pci_unmap_single() above. */ -void -pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, - int direction) +static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) { + struct pci_dev *pdev = alpha_gendev_to_pci(dev); unsigned long flags; struct pci_controller *hose; struct pci_iommu_arena *arena; @@ -734,8 +739,7 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, dma_addr_t max_dma; dma_addr_t fbeg, fend; - if (direction == PCI_DMA_NONE) - BUG(); + BUG_ON(dir == PCI_DMA_NONE); if (! alpha_mv.mv_pci_tbi) return; @@ -751,7 +755,7 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, spin_lock_irqsave(&arena->lock, flags); for (end = sg + nents; sg < end; ++sg) { - dma64_addr_t addr; + dma_addr_t addr; size_t size; long npages, ofs; dma_addr_t tend; @@ -763,7 +767,7 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, if (addr > 0xffffffff) { /* It's a DAC address -- nothing to do. */ - DBGA(" (%ld) DAC [%lx,%lx]\n", + DBGA(" (%ld) DAC [%llx,%zx]\n", sg - end + nents, addr, size); continue; } @@ -771,15 +775,15 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, if (addr >= __direct_map_base && addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ - DBGA(" (%ld) direct [%lx,%lx]\n", + DBGA(" (%ld) direct [%llx,%zx]\n", sg - end + nents, addr, size); continue; } - DBGA(" (%ld) sg [%lx,%lx]\n", + DBGA(" (%ld) sg [%llx,%zx]\n", sg - end + nents, addr, size); - npages = calc_npages((addr & ~PAGE_MASK) + size); + npages = iommu_num_pages(addr, size, PAGE_SIZE); ofs = (addr - arena->dma_base) >> PAGE_SHIFT; iommu_arena_free(arena, ofs, npages); @@ -798,15 +802,13 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); } -EXPORT_SYMBOL(pci_unmap_sg); - /* Return whether the given PCI device DMA address mask can be supported properly. */ -int -pci_dma_supported(struct pci_dev *pdev, u64 mask) +static int alpha_pci_supported(struct device *dev, u64 mask) { + struct pci_dev *pdev = alpha_gendev_to_pci(dev); struct pci_controller *hose; struct pci_iommu_arena *arena; @@ -833,7 +835,6 @@ pci_dma_supported(struct pci_dev *pdev, u64 mask) return 0; } -EXPORT_SYMBOL(pci_dma_supported); /* @@ -891,7 +892,7 @@ iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) int iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, - unsigned long *physaddrs) + struct page **pages) { unsigned long flags; unsigned long *ptes; @@ -911,7 +912,7 @@ iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, } for(i = 0, j = pg_start; i < pg_count; i++, j++) - ptes[j] = mk_iommu_pte(physaddrs[i]); + ptes[j] = mk_iommu_pte(page_to_phys(pages[i])); spin_unlock_irqrestore(&arena->lock, flags); @@ -933,66 +934,32 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) return 0; } -/* True if the machine supports DAC addressing, and DEV can - make use of it given MASK. */ - -static int -pci_dac_dma_supported(struct pci_dev *dev, u64 mask) +static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr) { - dma64_addr_t dac_offset = alpha_mv.pci_dac_offset; - int ok = 1; - - /* If this is not set, the machine doesn't support DAC at all. */ - if (dac_offset == 0) - ok = 0; - - /* The device has to be able to address our DAC bit. */ - if ((dac_offset & dev->dma_mask) != dac_offset) - ok = 0; - - /* If both conditions above are met, we are fine. */ - DBGA("pci_dac_dma_supported %s from %p\n", - ok ? "yes" : "no", __builtin_return_address(0)); - - return ok; + return dma_addr == 0; } -/* Helper for generic DMA-mapping functions. */ - -struct pci_dev * -alpha_gendev_to_pci(struct device *dev) -{ - if (dev && dev->bus == &pci_bus_type) - return to_pci_dev(dev); - - /* Assume that non-PCI devices asking for DMA are either ISA or EISA, - BUG() otherwise. */ - BUG_ON(!isa_bridge); - - /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA - bridge is bus master then). */ - if (!dev || !dev->dma_mask || !*dev->dma_mask) - return isa_bridge; - - /* For EISA bus masters, return isa_bridge (it might have smaller - dma_mask due to wiring limitations). */ - if (*dev->dma_mask >= isa_bridge->dma_mask) - return isa_bridge; - - /* This assumes ISA bus master with dma_mask 0xffffff. */ - return NULL; -} -EXPORT_SYMBOL(alpha_gendev_to_pci); - -int -dma_set_mask(struct device *dev, u64 mask) +static int alpha_pci_set_mask(struct device *dev, u64 mask) { if (!dev->dma_mask || !pci_dma_supported(alpha_gendev_to_pci(dev), mask)) return -EIO; *dev->dma_mask = mask; - return 0; } -EXPORT_SYMBOL(dma_set_mask); + +struct dma_map_ops alpha_pci_ops = { + .alloc = alpha_pci_alloc_coherent, + .free = alpha_pci_free_coherent, + .map_page = alpha_pci_map_page, + .unmap_page = alpha_pci_unmap_page, + .map_sg = alpha_pci_map_sg, + .unmap_sg = alpha_pci_unmap_sg, + .mapping_error = alpha_pci_mapping_error, + .dma_supported = alpha_pci_supported, + .set_dma_mask = alpha_pci_set_mask, +}; + +struct dma_map_ops *dma_ops = &alpha_pci_ops; +EXPORT_SYMBOL(dma_ops); |
