diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-01 09:15:15 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-01 09:15:15 -0800 |
commit | ac0f6f927db539e03e1f3f61bcd4ed57d5cde7a9 (patch) | |
tree | 816e5ac643b15c2050c64a7075f0f7e13d86ea09 /arch/arm/mm | |
parent | b1bf9368407ae7e89d8a005bb40beb70a41df539 (diff) | |
parent | 9f33be2c3a80bdc2cc08342dd77fac87652e0548 (diff) |
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (100 commits)
ARM: Eliminate decompressor -Dstatic= PIC hack
ARM: 5958/1: ARM: U300: fix inverted clk round rate
ARM: 5956/1: misplaced parentheses
ARM: 5955/1: ep93xx: move timer defines into core.c and document
ARM: 5954/1: ep93xx: move gpio interrupt support to gpio.c
ARM: 5953/1: ep93xx: fix broken build of clock.c
ARM: 5952/1: ARM: MM: Add ARM_L1_CACHE_SHIFT_6 for handle inside each ARCH Kconfig
ARM: 5949/1: NUC900 add gpio virtual memory map
ARM: 5948/1: Enable timer0 to time4 clock support for nuc910
ARM: 5940/2: ARM: MMCI: remove custom DBG macro and printk
ARM: make_coherent(): fix problems with highpte, part 2
MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself
ARM: 5945/1: ep93xx: include correct irq.h in core.c
ARM: 5933/1: amba-pl011: support hardware flow control
ARM: 5930/1: Add PKMAP area description to memory.txt.
ARM: 5929/1: Add checks to detect overlap of memory regions.
ARM: 5928/1: Change type of VMALLOC_END to unsigned long.
ARM: 5927/1: Make delimiters of DMA area globally visibly.
ARM: 5926/1: Add "Virtual kernel memory..." printout.
ARM: 5920/1: OMAP4: Enable L2 Cache
...
Fix up trivial conflict in arch/arm/mach-mx25/clock.c
Diffstat (limited to 'arch/arm/mm')
41 files changed, 1143 insertions, 361 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index baf638487a2..c4ed9f93f64 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -399,7 +399,7 @@ config CPU_V6 config CPU_32v6K bool "Support ARM V6K processor extensions" if !SMP depends on CPU_V6 - default y if SMP && !ARCH_MX3 + default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) help Say Y here if your ARMv6 processor supports the 'K' extension. This enables the kernel to use some instructions not present @@ -410,7 +410,7 @@ config CPU_32v6K # ARMv7 config CPU_V7 bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX - select CPU_32v6K + select CPU_32v6K if !ARCH_OMAP2 select CPU_32v7 select CPU_ABRT_EV7 select CPU_PABRT_V7 @@ -754,7 +754,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ - REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK + REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 default y select OUTER_CACHE help @@ -779,5 +779,5 @@ config CACHE_XSC3L2 config ARM_L1_CACHE_SHIFT int - default 6 if ARCH_OMAP3 || ARCH_S5PC1XX + default 6 if ARM_L1_CACHE_SHIFT_6 default 5 diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 62820eda84d..edddd66faac 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -901,11 +901,7 @@ static int __init alignment_init(void) #ifdef CONFIG_PROC_FS struct proc_dir_entry *res; - res = proc_mkdir("cpu", NULL); - if (!res) - return -ENOMEM; - - res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, res); + res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL); if (!res) return -ENOMEM; diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index a89444a3c01..7148e53e607 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S @@ -157,7 +157,7 @@ ENTRY(fa_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(fa_dma_inv_range) +fa_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry @@ -180,7 +180,7 @@ ENTRY(fa_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(fa_dma_clean_range) +fa_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -205,6 +205,30 @@ ENTRY(fa_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(fa_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq fa_dma_clean_range + bcs fa_dma_inv_range + b fa_dma_flush_range +ENDPROC(fa_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(fa_dma_unmap_area) + mov pc, lr +ENDPROC(fa_dma_unmap_area) + __INITDATA .type fa_cache_fns, #object @@ -215,7 +239,7 @@ ENTRY(fa_cache_fns) .long fa_coherent_kern_range .long fa_coherent_user_range .long fa_flush_kern_dcache_area - .long fa_dma_inv_range - .long fa_dma_clean_range + .long fa_dma_map_area + .long fa_dma_unmap_area .long fa_dma_flush_range .size fa_cache_fns, . - fa_cache_fns diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index cb8fc6573b1..07334632d3e 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -42,6 +42,57 @@ static inline void cache_sync(void) cache_wait(base + L2X0_CACHE_SYNC, 1); } +static inline void l2x0_clean_line(unsigned long addr) +{ + void __iomem *base = l2x0_base; + cache_wait(base + L2X0_CLEAN_LINE_PA, 1); + writel(addr, base + L2X0_CLEAN_LINE_PA); +} + +static inline void l2x0_inv_line(unsigned long addr) +{ + void __iomem *base = l2x0_base; + cache_wait(base + L2X0_INV_LINE_PA, 1); + writel(addr, base + L2X0_INV_LINE_PA); +} + +#ifdef CONFIG_PL310_ERRATA_588369 +static void debug_writel(unsigned long val) +{ + extern void omap_smc1(u32 fn, u32 arg); + + /* + * Texas Instrument secure monitor api to modify the + * PL310 Debug Control Register. + */ + omap_smc1(0x100, val); +} + +static inline void l2x0_flush_line(unsigned long addr) +{ + void __iomem *base = l2x0_base; + + /* Clean by PA followed by Invalidate by PA */ + cache_wait(base + L2X0_CLEAN_LINE_PA, 1); + writel(addr, base + L2X0_CLEAN_LINE_PA); + cache_wait(base + L2X0_INV_LINE_PA, 1); + writel(addr, base + L2X0_INV_LINE_PA); +} +#else + +/* Optimised out for non-errata case */ +static inline void debug_writel(unsigned long val) +{ +} + +static inline void l2x0_flush_line(unsigned long addr) +{ + void __iomem *base = l2x0_base; + cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); + writel(addr, base + L2X0_CLEAN_INV_LINE_PA); +} +#endif + static inline void l2x0_inv_all(void) { unsigned long flags; @@ -62,23 +113,24 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) spin_lock_irqsave(&l2x0_lock, flags); if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); - writel(start, base + L2X0_CLEAN_INV_LINE_PA); + debug_writel(0x03); + l2x0_flush_line(start); + debug_writel(0x00); start += CACHE_LINE_SIZE; } if (end & (CACHE_LINE_SIZE - 1)) { end &= ~(CACHE_LINE_SIZE - 1); - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); - writel(end, base + L2X0_CLEAN_INV_LINE_PA); + debug_writel(0x03); + l2x0_flush_line(end); + debug_writel(0x00); } while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { - cache_wait(base + L2X0_INV_LINE_PA, 1); - writel(start, base + L2X0_INV_LINE_PA); + l2x0_inv_line(start); start += CACHE_LINE_SIZE; } @@ -103,8 +155,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { - cache_wait(base + L2X0_CLEAN_LINE_PA, 1); - writel(start, base + L2X0_CLEAN_LINE_PA); + l2x0_clean_line(start); start += CACHE_LINE_SIZE; } @@ -128,11 +179,12 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); + debug_writel(0x03); while (start < blk_end) { - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); - writel(start, base + L2X0_CLEAN_INV_LINE_PA); + l2x0_flush_line(start); start += CACHE_LINE_SIZE; } + debug_writel(0x00); if (blk_end < end) { spin_unlock_irqrestore(&l2x0_lock, flags); diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 2a482731ea3..c2ff3c599fe 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -84,20 +84,6 @@ ENTRY(v3_flush_kern_dcache_area) /* FALLTHROUGH */ /* - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_dma_inv_range) - /* FALLTHROUGH */ - -/* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. @@ -108,18 +94,29 @@ ENTRY(v3_dma_inv_range) ENTRY(v3_dma_flush_range) mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ flush ID cache + mov pc, lr + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v3_dma_unmap_area) + teq r2, #DMA_TO_DEVICE + bne v3_dma_flush_range /* FALLTHROUGH */ /* - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction */ -ENTRY(v3_dma_clean_range) +ENTRY(v3_dma_map_area) mov pc, lr +ENDPROC(v3_dma_unmap_area) +ENDPROC(v3_dma_map_area) __INITDATA @@ -131,7 +128,7 @@ ENTRY(v3_cache_fns) .long v3_coherent_kern_range .long v3_coherent_user_range .long v3_flush_kern_dcache_area - .long v3_dma_inv_range - .long v3_dma_clean_range + .long v3_dma_map_area + .long v3_dma_unmap_area .long v3_dma_flush_range .size v3_cache_fns, . - v3_cache_fns diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 5c7da3e372e..4810f7e3e81 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -94,20 +94,6 @@ ENTRY(v4_flush_kern_dcache_area) /* FALLTHROUGH */ /* - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4_dma_inv_range) - /* FALLTHROUGH */ - -/* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. @@ -120,18 +106,29 @@ ENTRY(v4_dma_flush_range) mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache #endif + mov pc, lr + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4_dma_unmap_area) + teq r2, #DMA_TO_DEVICE + bne v4_dma_flush_range /* FALLTHROUGH */ /* - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction */ -ENTRY(v4_dma_clean_range) +ENTRY(v4_dma_map_area) mov pc, lr +ENDPROC(v4_dma_unmap_area) +ENDPROC(v4_dma_map_area) __INITDATA @@ -143,7 +140,7 @@ ENTRY(v4_cache_fns) .long v4_coherent_kern_range .long v4_coherent_user_range .long v4_flush_kern_dcache_area - .long v4_dma_inv_range - .long v4_dma_clean_range + .long v4_dma_map_area + .long v4_dma_unmap_area .long v4_dma_flush_range .size v4_cache_fns, . - v4_cache_fns diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 3dbedf1ec0e..df8368afa10 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -173,7 +173,7 @@ ENTRY(v4wb_coherent_user_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wb_dma_inv_range) +v4wb_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -194,7 +194,7 @@ ENTRY(v4wb_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wb_dma_clean_range) +v4wb_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -216,6 +216,30 @@ ENTRY(v4wb_dma_clean_range) .globl v4wb_dma_flush_range .set v4wb_dma_flush_range, v4wb_coherent_kern_range +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wb_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq v4wb_dma_clean_range + bcs v4wb_dma_inv_range + b v4wb_dma_flush_range +ENDPROC(v4wb_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wb_dma_unmap_area) + mov pc, lr +ENDPROC(v4wb_dma_unmap_area) + __INITDATA .type v4wb_cache_fns, #object @@ -226,7 +250,7 @@ ENTRY(v4wb_cache_fns) .long v4wb_coherent_kern_range .long v4wb_coherent_user_range .long v4wb_flush_kern_dcache_area - .long v4wb_dma_inv_range - .long v4wb_dma_clean_range + .long v4wb_dma_map_area + .long v4wb_dma_unmap_area .long v4wb_dma_flush_range .size v4wb_cache_fns, . - v4wb_cache_fns diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index b3b7410270b..45c70312f43 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -142,23 +142,12 @@ ENTRY(v4wt_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wt_dma_inv_range) +v4wt_dma_inv_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b - /* FALLTHROUGH */ - -/* - * dma_clean_range(start, end) - * - * Clean the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4wt_dma_clean_range) mov pc, lr /* @@ -172,6 +161,29 @@ ENTRY(v4wt_dma_clean_range) .globl v4wt_dma_flush_range .equ v4wt_dma_flush_range, v4wt_dma_inv_range +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wt_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v4wt_dma_inv_range + /* FALLTHROUGH */ + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wt_dma_map_area) + mov pc, lr +ENDPROC(v4wt_dma_unmap_area) +ENDPROC(v4wt_dma_map_area) + __INITDATA .type v4wt_cache_fns, #object @@ -182,7 +194,7 @@ ENTRY(v4wt_cache_fns) .long v4wt_coherent_kern_range .long v4wt_coherent_user_range .long v4wt_flush_kern_dcache_area - .long v4wt_dma_inv_range - .long v4wt_dma_clean_range + .long v4wt_dma_map_area + .long v4wt_dma_unmap_area .long v4wt_dma_flush_range .size v4wt_cache_fns, . - v4wt_cache_fns diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 4ba0a24ce6f..9d89c67a1cc 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -195,7 +195,7 @@ ENTRY(v6_flush_kern_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v6_dma_inv_range) +v6_dma_inv_range: tst r0, #D_CACHE_LINE_SIZE - 1 bic r0, r0, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE @@ -228,7 +228,7 @@ ENTRY(v6_dma_inv_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v6_dma_clean_range) +v6_dma_clean_range: bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE @@ -263,6 +263,32 @@ ENTRY(v6_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v6_dma_map_area) + add r1, r1, r0 + teq r2, #DMA_FROM_DEVICE + beq v6_dma_inv_range + b v6_dma_clean_range +ENDPROC(v6_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v6_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v6_dma_inv_range + mov pc, lr +ENDPROC(v6_dma_unmap_area) + __INITDATA .type v6_cache_fns, #object @@ -273,7 +299,7 @@ ENTRY(v6_cache_fns) .long v6_coherent_kern_range .long v6_coherent_user_range .long v6_flush_kern_dcache_area - .long v6_dma_inv_range - .long v6_dma_clean_range + .long v6_dma_map_area + .long v6_dma_unmap_area .long v6_dma_flush_range .size v6_cache_fns, . - v6_cache_fns diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 9073db849fb..bcd64f26587 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -216,7 +216,7 @@ ENDPROC(v7_flush_kern_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v7_dma_inv_range) +v7_dma_inv_range: dcache_line_size r2, r3 sub r3, r2, #1 tst r0, r3 @@ -240,7 +240,7 @@ ENDPROC(v7_dma_inv_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v7_dma_clean_range) +v7_dma_clean_range: dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 @@ -271,6 +271,32 @@ ENTRY(v7_dma_flush_range) mov pc, lr ENDPROC(v7_dma_flush_range) +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v7_dma_map_area) + add r1, r1, r0 + teq r2, #DMA_FROM_DEVICE + beq v7_dma_inv_range + b v7_dma_clean_range +ENDPROC(v7_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v7_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v7_dma_inv_range + mov pc, lr +ENDPROC(v7_dma_unmap_area) + __INITDATA .type v7_cache_fns, #object @@ -281,7 +307,7 @@ ENTRY(v7_cache_fns) .long v7_coherent_kern_range .long v7_coherent_user_range .long v7_flush_kern_dcache_area - .long v7_dma_inv_range - .long v7_dma_clean_range + .long v7_dma_map_area + .long v7_dma_unmap_area .long v7_dma_flush_range .size v7_cache_fns, . - v7_cache_fns diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a9e22e31eaa..b0ee9ba3cfa 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -10,12 +10,17 @@ #include <linux/init.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/smp.h> +#include <linux/percpu.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> static DEFINE_SPINLOCK(cpu_asid_lock); unsigned int cpu_last_asid = ASID_FIRST_VERSION; +#ifdef CONFIG_SMP +DEFINE_PER_CPU(struct mm_struct *, current_mm); +#endif /* * We fork()ed a process, and we need a new context for the child @@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION; void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context.id = 0; + spin_lock_init(&mm->context.id_lock); } +static void flush_context(void) +{ + /* set the reserved ASID before flushing the TLB */ + asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); + isb(); + local_flush_tlb_all(); + if (icache_is_vivt_asid_tagged()) { + __flush_icache_all(); + dsb(); + } +} + +#ifdef CONFIG_SMP + +static void set_mm_context(struct mm_struct *mm, unsigned int asid) +{ + unsigned long flags; + + /* + * Locking needed for multi-threaded applications where the + * same mm->context.id could be set from different CPUs during + * the broadcast. This function is also called via IPI so the + * mm->context.id_lock has to be IRQ-safe. + */ + spin_lock_irqsave(&mm->context.id_lock, flags); + if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { + /* + * Old version of ASID found. Set the new one and + * reset mm_cpumask(mm). + */ + mm->context.id = asid; + cpumask_clear(mm_cpumask(mm)); + } + spin_unlock_irqrestore(&mm->context.id_lock, flags); + + /* + * Set the mm_cpumask(mm) bit for the current CPU. + */ + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); +} + +/* + * Reset the ASID on the current CPU. This function call is broadcast + * from the CPU handling the ASID rollover and holding cpu_asid_lock. + */ +static void reset_context(void *info) +{ + unsigned int asid; + unsigned int cpu = smp_processor_id(); + struct mm_struct *mm = per_cpu(current_mm, cpu); + + /* + * Check if a current_mm was set on this CPU as it might still + * be in the early booting stages and using the reserved ASID. + */ + if (!mm) + return; + + smp_rmb(); + asid = cpu_last_asid + cpu + 1; + + flush_context(); + set_mm_context(mm, asid); + + /* set the new ASID */ + asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); + isb(); +} + +#else + +static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) +{ + mm->context.id = asid; + cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); +} + +#endif + void __new_context(struct mm_struct *mm) { unsigned int asid; spin_lock(&cpu_asid_lock); +#ifdef CONFIG_SMP + /* + * Check the ASID again, in case the change was broadcast from + * another CPU before we acquired the lock. + */ + if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + spin_unlock(&cpu_asid_lock); + return; + } +#endif + /* + * At this point, it is guaranteed that the current mm (with + * an old ASID) isn't active on any other CPU since the ASIDs + * are changed simultaneously via IPI. + */ asid = ++cpu_last_asid; if (asid == 0) asid = cpu_last_asid = ASID_FIRST_VERSION; @@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm) * to start a new version and flush the TLB. */ if (unlikely((asid & ~ASID_MASK) == 0)) { - asid = ++cpu_last_asid; - /* set the reserved ASID before flushing the TLB */ - asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" - : - : "r" (0)); - isb(); - flush_tlb_all(); - if (icache_is_vivt_asid_tagged()) { - __flush_icache_all(); - dsb(); - } + asid = cpu_last_asid + smp_processor_id() + 1; + flush_context(); +#ifdef CONFIG_SMP + smp_w |