diff options
Diffstat (limited to 'arch/powerpc/sysdev/dart_iommu.c')
| -rw-r--r-- | arch/powerpc/sysdev/dart_iommu.c | 265 |
1 files changed, 202 insertions, 63 deletions
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 61d31742861..9e5353ff6d1 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -27,36 +27,36 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include <linux/config.h> #include <linux/init.h> #include <linux/types.h> -#include <linux/slab.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/vmalloc.h> +#include <linux/suspend.h> +#include <linux/memblock.h> +#include <linux/gfp.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/iommu.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> -#include <asm/abs_addr.h> #include <asm/cacheflush.h> -#include <asm/lmb.h> #include <asm/ppc-pci.h> #include "dart.h" -extern int iommu_force_on; - /* Physical base address and size of the DART table */ unsigned long dart_tablebase; /* exported to htab_initialize */ static unsigned long dart_tablesize; /* Virtual base address of the DART table */ static u32 *dart_vbase; +#ifdef CONFIG_PM +static u32 *dart_copy; +#endif /* Mapped base address for the dart */ static unsigned int __iomem *dart; @@ -69,13 +69,20 @@ static int iommu_table_dart_inited; static int dart_dirty; static int dart_is_u4; +#define DART_U4_BYPASS_BASE 0x8000000000ull + #define DBG(...) +static DEFINE_SPINLOCK(invalidate_lock); + static inline void dart_tlb_invalidate_all(void) { unsigned long l = 0; unsigned int reg, inv_bit; unsigned long limit; + unsigned long flags; + + spin_lock_irqsave(&invalidate_lock, flags); DBG("dart: flush\n"); @@ -100,49 +107,97 @@ retry: if (l == (1L << limit)) { if (limit < 4) { limit++; - reg = DART_IN(DART_CNTL); - reg &= ~inv_bit; + reg = DART_IN(DART_CNTL); + reg &= ~inv_bit; DART_OUT(DART_CNTL, reg); goto retry; } else panic("DART: TLB did not flush after waiting a long " "time. Buggy U3 ?"); } + + spin_unlock_irqrestore(&invalidate_lock, flags); +} + +static inline void dart_tlb_invalidate_one(unsigned long bus_rpn) +{ + unsigned int reg; + unsigned int l, limit; + unsigned long flags; + + spin_lock_irqsave(&invalidate_lock, flags); + + reg = DART_CNTL_U4_ENABLE | DART_CNTL_U4_IONE | + (bus_rpn & DART_CNTL_U4_IONE_MASK); + DART_OUT(DART_CNTL, reg); + + limit = 0; +wait_more: + l = 0; + while ((DART_IN(DART_CNTL) & DART_CNTL_U4_IONE) && l < (1L << limit)) { + rmb(); + l++; + } + + if (l == (1L << limit)) { + if (limit < 4) { + limit++; + goto wait_more; + } else + panic("DART: TLB did not flush after waiting a long " + "time. Buggy U4 ?"); + } + + spin_unlock_irqrestore(&invalidate_lock, flags); } static void dart_flush(struct iommu_table *tbl) { - if (dart_dirty) + mb(); + if (dart_dirty) { dart_tlb_invalidate_all(); - dart_dirty = 0; + dart_dirty = 0; + } } -static void dart_build(struct iommu_table *tbl, long index, +static int dart_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, - enum dma_data_direction direction) + enum dma_data_direction direction, + struct dma_attrs *attrs) { unsigned int *dp; unsigned int rpn; + long l; DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); - index <<= DART_PAGE_FACTOR; - npages <<= DART_PAGE_FACTOR; - dp = ((unsigned int*)tbl->it_base) + index; - /* On U3, all memory is contigous, so we can move this + /* On U3, all memory is contiguous, so we can move this * out of the loop. */ - while (npages--) { - rpn = virt_to_abs(uaddr) >> DART_PAGE_SHIFT; + l = npages; + while (l--) { + rpn = __pa(uaddr) >> DART_PAGE_SHIFT; *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK); uaddr += DART_PAGE_SIZE; } - dart_dirty = 1; + /* make sure all updates have reached memory */ + mb(); + in_be32((unsigned __iomem *)dp); + mb(); + + if (dart_is_u4) { + rpn = index; + while (npages--) + dart_tlb_invalidate_one(rpn++); + } else { + dart_dirty = 1; + } + return 0; } @@ -157,9 +212,6 @@ static void dart_free(struct iommu_table *tbl, long index, long npages) DBG("dart: free at: %lx, %lx\n", index, npages); - index <<= DART_PAGE_FACTOR; - npages <<= DART_PAGE_FACTOR; - dp = ((unsigned int *)tbl->it_base) + index; while (npages--) @@ -167,7 +219,7 @@ static void dart_free(struct iommu_table *tbl, long index, long npages) } -static int dart_init(struct device_node *dart_node) +static int __init dart_init(struct device_node *dart_node) { unsigned int i; unsigned long tmp, base, size; @@ -193,17 +245,17 @@ static int dart_init(struct device_node *dart_node) * that to work around what looks like a problem with the HT bridge * prefetching into invalid pages and corrupting data */ - tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE); + tmp = memblock_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE); dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & DARTMAP_RPNMASK); /* Map in DART registers */ - dart = ioremap(r.start, r.end - r.start + 1); + dart = ioremap(r.start, resource_size(&r)); if (dart == NULL) panic("DART: Cannot map registers!"); /* Map in DART table */ - dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize); + dart_vbase = ioremap(__pa(dart_tablebase), dart_tablesize); /* Fill initial table */ for (i = 0; i < dart_tablesize/4; i++) @@ -239,13 +291,14 @@ static void iommu_table_dart_setup(void) iommu_table_dart.it_busno = 0; iommu_table_dart.it_offset = 0; /* it_size is in number of entries */ - iommu_table_dart.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR; + iommu_table_dart.it_size = dart_tablesize / sizeof(u32); + iommu_table_dart.it_page_shift = IOMMU_PAGE_SHIFT_4K; /* Initialize the common IOMMU code */ iommu_table_dart.it_base = (unsigned long)dart_vbase; iommu_table_dart.it_index = 0; iommu_table_dart.it_blocksize = 1; - iommu_init_table(&iommu_table_dart); + iommu_init_table(&iommu_table_dart, -1); /* Reserve the last page of the DART to avoid possible prefetch * past the DART mapped area @@ -253,42 +306,70 @@ static void iommu_table_dart_setup(void) set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map); } -static void iommu_dev_setup_dart(struct pci_dev *dev) +static void dma_dev_setup_dart(struct device *dev) { - struct device_node *dn; - /* We only have one iommu table on the mac for now, which makes * things simple. Setup all PCI devices to point to this table - * - * We must use pci_device_to_OF_node() to make sure that - * we get the real "final" pointer to the device in the - * pci_dev sysdata and not the temporary PHB one */ - dn = pci_device_to_OF_node(dev); - - if (dn) - PCI_DN(dn)->iommu_table = &iommu_table_dart; + if (get_dma_ops(dev) == &dma_direct_ops) + set_dma_offset(dev, DART_U4_BYPASS_BASE); + else + set_iommu_table_base(dev, &iommu_table_dart); } -static void iommu_bus_setup_dart(struct pci_bus *bus) +static void pci_dma_dev_setup_dart(struct pci_dev *dev) { - struct device_node *dn; + dma_dev_setup_dart(&dev->dev); +} +static void pci_dma_bus_setup_dart(struct pci_bus *bus) +{ if (!iommu_table_dart_inited) { iommu_table_dart_inited = 1; iommu_table_dart_setup(); } +} - dn = pci_bus_to_OF_node(bus); - - if (dn) - PCI_DN(dn)->iommu_table = &iommu_table_dart; +static bool dart_device_on_pcie(struct device *dev) +{ + struct device_node *np = of_node_get(dev->of_node); + + while(np) { + if (of_device_is_compatible(np, "U4-pcie") || + of_device_is_compatible(np, "u4-pcie")) { + of_node_put(np); + return true; + } + np = of_get_next_parent(np); + } + return false; } -static void iommu_dev_setup_null(struct pci_dev *dev) { } -static void iommu_bus_setup_null(struct pci_bus *bus) { } +static int dart_dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (!dev->dma_mask || !dma_supported(dev, dma_mask)) + return -EIO; + + /* U4 supports a DART bypass, we use it for 64-bit capable + * devices to improve performances. However, that only works + * for devices connected to U4 own PCIe interface, not bridged + * through hypertransport. We need the device to support at + * least 40 bits of addresses. + */ + if (dart_device_on_pcie(dev) && dma_mask >= DMA_BIT_MASK(40)) { + dev_info(dev, "Using 64-bit DMA iommu bypass\n"); + set_dma_ops(dev, &dma_direct_ops); + } else { + dev_info(dev, "Using 32-bit DMA via iommu\n"); + set_dma_ops(dev, &dma_iommu_ops); + } + dma_dev_setup_dart(dev); -void iommu_init_early_dart(void) + *dev->dma_mask = dma_mask; + return 0; +} + +void __init iommu_init_early_dart(void) { struct device_node *dn; @@ -297,42 +378,95 @@ void iommu_init_early_dart(void) if (dn == NULL) { dn = of_find_compatible_node(NULL, "dart", "u4-dart"); if (dn == NULL) - goto bail; + return; /* use default direct_dma_ops */ dart_is_u4 = 1; } + /* Initialize the DART HW */ + if (dart_init(dn) != 0) + goto bail; + /* Setup low level TCE operations for the core IOMMU code */ ppc_md.tce_build = dart_build; ppc_md.tce_free = dart_free; ppc_md.tce_flush = dart_flush; - /* Initialize the DART HW */ - if (dart_init(dn) == 0) { - ppc_md.iommu_dev_setup = iommu_dev_setup_dart; - ppc_md.iommu_bus_setup = iommu_bus_setup_dart; + /* Setup bypass if supported */ + if (dart_is_u4) + ppc_md.dma_set_mask = dart_dma_set_mask; - /* Setup pci_dma ops */ - pci_iommu_init(); + ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart; + ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart; - return; - } + /* Setup pci_dma ops */ + set_pci_dma_ops(&dma_iommu_ops); + return; bail: /* If init failed, use direct iommu and null setup functions */ - ppc_md.iommu_dev_setup = iommu_dev_setup_null; - ppc_md.iommu_bus_setup = iommu_bus_setup_null; + ppc_md.pci_dma_dev_setup = NULL; + ppc_md.pci_dma_bus_setup = NULL; /* Setup pci_dma ops */ - pci_direct_iommu_init(); + set_pci_dma_ops(&dma_direct_ops); +} + +#ifdef CONFIG_PM +static void iommu_dart_save(void) +{ + memcpy(dart_copy, dart_vbase, 2*1024*1024); +} + +static void iommu_dart_restore(void) +{ + memcpy(dart_vbase, dart_copy, 2*1024*1024); + dart_tlb_invalidate_all(); } +static int __init iommu_init_late_dart(void) +{ + unsigned long tbasepfn; + struct page *p; + + /* if no dart table exists then we won't need to save it + * and the area has also not been reserved */ + if (!dart_tablebase) + return 0; + + tbasepfn = __pa(dart_tablebase) >> PAGE_SHIFT; + register_nosave_region_late(tbasepfn, + tbasepfn + ((1<<24) >> PAGE_SHIFT)); + + /* For suspend we need to copy the dart contents because + * it is not part of the regular mapping (see above) and + * thus not saved automatically. The memory for this copy + * must be allocated early because we need 2 MB. */ + p = alloc_pages(GFP_KERNEL, 21 - PAGE_SHIFT); + BUG_ON(!p); + dart_copy = page_address(p); + + ppc_md.iommu_save = iommu_dart_save; + ppc_md.iommu_restore = iommu_dart_restore; + + return 0; +} + +late_initcall(iommu_init_late_dart); +#endif void __init alloc_dart_table(void) { - /* Only reserve DART space if machine has more than 2GB of RAM + /* Only reserve DART space if machine has more than 1GB of RAM * or if requested with iommu=on on cmdline. + * + * 1GB of RAM is picked as limit because some default devices + * (i.e. Airport Extreme) have 30 bit address range limits. */ - if (lmb_end_of_DRAM() <= 0x80000000ull && !iommu_force_on) + + if (iommu_is_off) + return; + + if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull) return; /* 512 pages (2MB) is max DART tablesize. */ @@ -341,7 +475,12 @@ void __init alloc_dart_table(void) * will blow up an entire large page anyway in the kernel mapping */ dart_tablebase = (unsigned long) - abs_to_virt(lmb_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); + __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); + /* + * The DART space is later unmapped from the kernel linear mapping and + * accessing dart_tablebase during kmemleak scanning will fault. + */ + kmemleak_no_scan((void *)dart_tablebase); printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase); } |
