From 8fbac214e5c594a0c2fe78c14adf2cdbb1febc92 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 18 Jul 2013 17:20:33 +0100 Subject: ARM: 7787/1: virt: ensure visibility of __boot_cpu_mode Secondary CPUs write to __boot_cpu_mode with caches disabled, and thus a cached value of __boot_cpu_mode may be incoherent with that in memory. This could lead to a failure to detect mismatched boot modes. This patch adds flushing to ensure that writes by secondaries to __boot_cpu_mode are made visible before we test against it. Signed-off-by: Mark Rutland Acked-by: Dave Martin Acked-by: Marc Zyngier Cc: Christoffer Dall Signed-off-by: Russell King --- arch/arm/include/asm/virt.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 50af92bac73..4371f45c578 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -29,6 +29,7 @@ #define BOOT_CPU_MODE_MISMATCH PSR_N_BIT #ifndef __ASSEMBLY__ +#include #ifdef CONFIG_ARM_VIRT_EXT /* @@ -41,10 +42,21 @@ */ extern int __boot_cpu_mode; +static inline void sync_boot_mode(void) +{ + /* + * As secondaries write to __boot_cpu_mode with caches disabled, we + * must flush the corresponding cache entries to ensure the visibility + * of their writes. + */ + sync_cache_r(&__boot_cpu_mode); +} + void __hyp_set_vectors(unsigned long phys_vector_base); unsigned long __hyp_get_vectors(void); #else #define __boot_cpu_mode (SVC_MODE) +#define sync_boot_mode() #endif #ifndef ZIMAGE -- cgit v1.2.3-18-g5258 From 1f49856bb029779d8f1b63517a3a3b34ffe672c7 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 23 Jul 2013 15:13:06 +0100 Subject: ARM: 7789/1: Do not run dummy_flush_tlb_a15_erratum() on non-Cortex-A15 Commit 93dc688 (ARM: 7684/1: errata: Workaround for Cortex-A15 erratum 798181 (TLBI/DSB operations)) causes the following undefined instruction error on a mx53 (Cortex-A8): Internal error: Oops - undefined instruction: 0 [#1] SMP ARM CPU: 0 PID: 275 Comm: modprobe Not tainted 3.11.0-rc2-next-20130722-00009-g9b0f371 #881 task: df46cc00 ti: df48e000 task.ti: df48e000 PC is at check_and_switch_context+0x17c/0x4d0 LR is at check_and_switch_context+0xdc/0x4d0 This problem happens because check_and_switch_context() calls dummy_flush_tlb_a15_erratum() without checking if we are really running on a Cortex-A15 or not. To avoid this issue, only call dummy_flush_tlb_a15_erratum() inside check_and_switch_context() if erratum_a15_798181() returns true, which means that we are really running on a Cortex-A15. Signed-off-by: Fabio Estevam Acked-by: Catalin Marinas Reviewed-by: Roger Quadros Signed-off-by: Russell King --- arch/arm/include/asm/tlbflush.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index fdbb9e36974..f467e9b3f8d 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -443,7 +443,18 @@ static inline void local_flush_bp_all(void) isb(); } +#include #ifdef CONFIG_ARM_ERRATA_798181 +static inline int erratum_a15_798181(void) +{ + unsigned int midr = read_cpuid_id(); + + /* Cortex-A15 r0p0..r3p2 affected */ + if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) + return 0; + return 1; +} + static inline void dummy_flush_tlb_a15_erratum(void) { /* @@ -453,6 +464,11 @@ static inline void dummy_flush_tlb_a15_erratum(void) dsb(); } #else +static inline int erratum_a15_798181(void) +{ + return 0; +} + static inline void dummy_flush_tlb_a15_erratum(void) { } -- cgit v1.2.3-18-g5258 From bdae73cd374e28db544fdd9b77de689a36e3c129 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 23 Jul 2013 16:15:36 +0100 Subject: ARM: 7790/1: Fix deferred mm switch on VIVT processors As of commit b9d4d42ad9 (ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on pre-ARMv6 CPUs), the mm switching on VIVT processors is done in the finish_arch_post_lock_switch() function to avoid whole cache flushing with interrupts disabled. The need for deferred mm switch is stored as a thread flag (TIF_SWITCH_MM). However, with preemption enabled, we can have another thread switch before finish_arch_post_lock_switch(). If the new thread has the same mm as the previous 'next' thread, the scheduler will not call switch_mm() and the TIF_SWITCH_MM flag won't be set for the new thread. This patch moves the switch pending flag to the mm_context_t structure since this is specific to the mm rather than thread. Signed-off-by: Catalin Marinas Reported-by: Marc Kleine-Budde Tested-by: Marc Kleine-Budde Cc: # 3.5+ Signed-off-by: Russell King --- arch/arm/include/asm/mmu.h | 2 ++ arch/arm/include/asm/mmu_context.h | 20 ++++++++++++++++---- arch/arm/include/asm/thread_info.h | 1 - 3 files changed, 18 insertions(+), 5 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index e3d55547e75..d1b4998e4f4 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -6,6 +6,8 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID atomic64_t id; +#else + int switch_pending; #endif unsigned int vmalloc_seq; } mm_context_t; diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index b5792b7fd8d..9b32f76bb0d 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -56,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm, * on non-ASID CPUs, the old mm will remain valid until the * finish_arch_post_lock_switch() call. */ - set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); + mm->context.switch_pending = 1; else cpu_switch_mm(mm->pgd, mm); } @@ -65,9 +65,21 @@ static inline void check_and_switch_context(struct mm_struct *mm, finish_arch_post_lock_switch static inline void finish_arch_post_lock_switch(void) { - if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { - struct mm_struct *mm = current->mm; - cpu_switch_mm(mm->pgd, mm); + struct mm_struct *mm = current->mm; + + if (mm && mm->context.switch_pending) { + /* + * Preemption must be disabled during cpu_switch_mm() as we + * have some stateful cache flush implementations. Check + * switch_pending again in case we were preempted and the + * switch to this mm was already done. + */ + preempt_disable(); + if (mm->context.switch_pending) { + mm->context.switch_pending = 0; + cpu_switch_mm(mm->pgd, mm); + } + preempt_enable_no_resched(); } } diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 214d4158089..2b8114fcba0 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -156,7 +156,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 -#define TIF_SWITCH_MM 22 /* deferred switch_mm */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) -- cgit v1.2.3-18-g5258 From acfdd4b1f7590d02e9bae3b73bdbbc4a31b05d38 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 25 Jul 2013 11:44:48 +0100 Subject: ARM: 7791/1: a.out: remove partial a.out support a.out support on ARM requires that argc, argv and envp are passed in r0-r2 respectively, which requires hacking load_aout_binary to prevent argc being clobbered by the return code. Whilst mainline kernels do set the registers up in start_thread, the aout loader has never carried the hack in mainline. Initialising the registers in this way actually goes against the libc expectations for ELF binaries, where argc, argv and envp are passed on the stack, with r0 being used to hold a pointer to an exit function for cleaning up after the dynamic linker if required. If the pointer is NULL, then it is ignored. When execing an ELF binary, Linux currently zeroes r0, then sets it to argc and then finally clobbers it with the return value of the execve syscall, so we actually end up with: r0 = 0 stack[0] = argc r1 = stack[1] = argv r2 = stack[2] = envp libc treats r1 and r2 as undefined. The clobbering of r0 by sys_execve works for user-spawned threads, but when executing an ELF binary from a kernel thread (via call_usermodehelper), the execve is performed on the ret_from_fork path, which restores r0 from the saved pt_regs, resulting in argc being presented to the C library. This has horrible consequences when the application exits, since we have an exit function registered using argc, resulting in a jump to hyperspace. This patch solves the problem by removing the partial a.out support from arch/arm/ altogether. Cc: Cc: Ashish Sangwan Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/a.out-core.h | 45 --------------------------------------- arch/arm/include/asm/processor.h | 4 ---- 2 files changed, 49 deletions(-) delete mode 100644 arch/arm/include/asm/a.out-core.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h deleted file mode 100644 index 92f10cb5c70..00000000000 --- a/arch/arm/include/asm/a.out-core.h +++ /dev/null @@ -1,45 +0,0 @@ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_A_OUT_CORE_H -#define _ASM_A_OUT_CORE_H - -#ifdef __KERNEL__ - -#include -#include - -/* - * fill in the user structure for an a.out core dump - */ -static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) -{ - struct task_struct *tsk = current; - - dump->magic = CMAGIC; - dump->start_code = tsk->mm->start_code; - dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1); - - dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT; - dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; - dump->u_ssize = 0; - - memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg)); - - if (dump->start_stack < 0x04000000) - dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; - - dump->regs = *regs; - dump->u_fpvalid = dump_fpu (regs, &dump->u_fp); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_A_OUT_CORE_H */ diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 06e7d509eaa..413f3876341 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -54,7 +54,6 @@ struct thread_struct { #define start_thread(regs,pc,sp) \ ({ \ - unsigned long *stack = (unsigned long *)sp; \ memset(regs->uregs, 0, sizeof(regs->uregs)); \ if (current->personality & ADDR_LIMIT_32BIT) \ regs->ARM_cpsr = USR_MODE; \ @@ -65,9 +64,6 @@ struct thread_struct { regs->ARM_cpsr |= PSR_ENDSTATE; \ regs->ARM_pc = pc & ~1; /* pc */ \ regs->ARM_sp = sp; /* sp */ \ - regs->ARM_r2 = stack[2]; /* r2 (envp) */ \ - regs->ARM_r1 = stack[1]; /* r1 (argv) */ \ - regs->ARM_r0 = stack[0]; /* r0 (argc) */ \ nommu_start_thread(regs); \ }) -- cgit v1.2.3-18-g5258 From 067e710b9a982a92cc8294d7fa0f1e924c65bba1 Mon Sep 17 00:00:00 2001 From: Paul Walmsley Date: Tue, 30 Jul 2013 12:38:45 +0100 Subject: ARM: 7801/1: v6: prevent gcc 4.5 from reordering extended CP15 reads above is_smp() test Commit 621a0147d5c921f4cc33636ccd0602ad5d7cbfbc ("ARM: 7757/1: mm: don't flush icache in switch_mm with hardware broadcasting") breaks the boot on OMAP2430SDP with omap2plus_defconfig. Tracked to an undefined instruction abort from the CP15 read in cache_ops_need_broadcast(). It turns out that gcc 4.5 reorders the extended CP15 read above the is_smp() test. This breaks ARM1136 r0 cores, since they don't support several CP15 registers that later ARM cores do. ARM1136JF-S TRM section 3.2.1 "Register allocation" has the details. So mark the extended CP15 read as clobbering memory, which prevents the compiler from reordering it before the is_smp() test. Russell states that the code generated from this approach is preferable to marking the inline asm as volatile. Remove the existing condition code clobber as it's obsolete, per Nico's post: http://www.spinics.net/lists/arm-kernel/msg261208.html This patch is a collaboration with Will Deacon and Russell King. Comments from Paul Walmsley: Russell, if you accept this one, might you also add Will's ack from the lists: Comments from Paul Walmsley: I'd also be obliged if you could add a Cc: line for Jonathan Austin, since he helped test: Signed-off-by: Paul Walmsley Cc: Will Deacon Cc: Nicolas Pitre Cc: Tony Lindgren Acked-by: Will Deacon Cc: Jonathan Austin Signed-off-by: Russell King --- arch/arm/include/asm/cputype.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 8c25dc4e985..9672e978d50 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -89,13 +89,18 @@ extern unsigned int processor_id; __val; \ }) +/* + * The memory clobber prevents gcc 4.5 from reordering the mrc before + * any is_smp() tests, which can cause undefined instruction aborts on + * ARM1136 r0 due to the missing extended CP15 registers. + */ #define read_cpuid_ext(ext_reg) \ ({ \ unsigned int __val; \ asm("mrc p15, 0, %0, c0, " ext_reg \ : "=r" (__val) \ : \ - : "cc"); \ + : "memory"); \ __val; \ }) -- cgit v1.2.3-18-g5258 From 48be69a026b2c17350a5ef18a1959a919f60be7d Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 24 Jul 2013 00:29:18 +0100 Subject: ARM: move signal handlers into a vdso-like page Move the signal handlers into a VDSO page rather than keeping them in the vectors page. This allows us to place them randomly within this page, and also map the page at a random location within userspace further protecting these code fragments from ROP attacks. The new VDSO page is also poisoned in the same way as the vector page. Signed-off-by: Russell King --- arch/arm/include/asm/elf.h | 4 ++++ arch/arm/include/asm/mmu.h | 1 + 2 files changed, 5 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 38050b1c480..9c9b30717fd 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -130,4 +130,8 @@ struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define arch_randomize_brk arch_randomize_brk +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +int arch_setup_additional_pages(struct linux_binprm *, int); + #endif diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index e3d55547e75..7345e37155d 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -8,6 +8,7 @@ typedef struct { atomic64_t id; #endif unsigned int vmalloc_seq; + unsigned long sigpage; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID -- cgit v1.2.3-18-g5258 From a5463cd3435475386cbbe7b06e01292ac169d36f Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 31 Jul 2013 21:58:56 +0100 Subject: ARM: make vectors page inaccessible from userspace If kuser helpers are not provided by the kernel, disable user access to the vectors page. With the kuser helpers gone, there is no reason for this page to be visible to userspace. Signed-off-by: Russell King --- arch/arm/include/asm/page.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 6363f3d1d50..4355f0ec44d 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); +#ifdef CONFIG_KUSER_HELPERS #define __HAVE_ARCH_GATE_AREA 1 +#endif #ifdef CONFIG_ARM_LPAE #include -- cgit v1.2.3-18-g5258 From 8c0cc8a5d90bc7373a7a9e7f7a40eb41f51e03fc Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 3 Aug 2013 10:39:51 +0100 Subject: ARM: fix nommu builds with 48be69a02 (ARM: move signal handlers into a vdso-like page) Olof reports that noMMU builds error out with: arch/arm/kernel/signal.c: In function 'setup_return': arch/arm/kernel/signal.c:413:25: error: 'mm_context_t' has no member named 'sigpage' This shows one of the evilnesses of IS_ENABLED(). Get rid of it here and replace it with #ifdef's - and as no noMMU platform can make use of sigpage, depend on CONIFG_MMU not CONFIG_ARM_MPU. Reported-by: Olof Johansson Signed-off-by: Russell King --- arch/arm/include/asm/elf.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 9c9b30717fd..56211f2084e 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -130,8 +130,10 @@ struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define arch_randomize_brk arch_randomize_brk +#ifdef CONFIG_MMU #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 struct linux_binprm; int arch_setup_additional_pages(struct linux_binprm *, int); +#endif #endif -- cgit v1.2.3-18-g5258 From f0915781bd5edf78b1154e61efe962dc15872d09 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 11 Feb 2013 13:47:48 +0000 Subject: ARM: tlb: don't perform inner-shareable invalidation for local TLB ops Inner-shareable TLB invalidation is typically more expensive than local (non-shareable) invalidation, so performing the broadcasting for local_flush_tlb_* operations is a waste of cycles and needlessly clobbers entries in the TLBs of other CPUs. This patch introduces __flush_tlb_* versions for many of the TLB invalidation functions, which only respect inner-shareable variants of the invalidation instructions when presented with the TLB_V7_UIS_FULL flag. The local version is also inlined to prevent SMP_ON_UP kernels from missing flushes, where the __flush variant would be called with the UP flags. This gains us around 0.5% in hackbench scores for a dual-core A15, but I would expect this to improve as more cores (and clusters) are added to the equation. Reviewed-by: Catalin Marinas Reported-by: Albin Tonnerre Signed-off-by: Will Deacon --- arch/arm/include/asm/tlbflush.h | 138 ++++++++++++++++++++++++++++++++++------ 1 file changed, 118 insertions(+), 20 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index f467e9b3f8d..3316264916e 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -319,6 +319,16 @@ extern struct cpu_tlb_fns cpu_tlb; #define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg) #define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg) +static inline void __local_flush_tlb_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); + tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); + tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); +} + static inline void local_flush_tlb_all(void) { const int zero = 0; @@ -327,10 +337,8 @@ static inline void local_flush_tlb_all(void) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); - tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); - tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); - tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); + __local_flush_tlb_all(); + tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero); if (tlb_flag(TLB_BARRIER)) { dsb(); @@ -338,31 +346,69 @@ static inline void local_flush_tlb_all(void) } } -static inline void local_flush_tlb_mm(struct mm_struct *mm) +static inline void __flush_tlb_all(void) { const int zero = 0; - const int asid = ASID(mm); const unsigned int __tlb_flag = __cpu_tlb_flags; if (tlb_flag(TLB_WB)) dsb(); + __local_flush_tlb_all(); + tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); + + if (tlb_flag(TLB_BARRIER)) { + dsb(); + isb(); + } +} + +static inline void __local_flush_tlb_mm(struct mm_struct *mm) +{ + const int zero = 0; + const int asid = ASID(mm); + const unsigned int __tlb_flag = __cpu_tlb_flags; + if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { - if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); } - put_cpu(); } tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid); tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid); tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid); +} + +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + const int asid = ASID(mm); + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_WB)) + dsb(); + + __local_flush_tlb_mm(mm); + tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid); + + if (tlb_flag(TLB_BARRIER)) + dsb(); +} + +static inline void __flush_tlb_mm(struct mm_struct *mm) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_WB)) + dsb(); + + __local_flush_tlb_mm(mm); #ifdef CONFIG_ARM_ERRATA_720789 - tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero); + tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0); #else - tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid); + tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm)); #endif if (tlb_flag(TLB_BARRIER)) @@ -370,16 +416,13 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) } static inline void -local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { const int zero = 0; const unsigned int __tlb_flag = __cpu_tlb_flags; uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); - if (tlb_flag(TLB_WB)) - dsb(); - if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); @@ -392,6 +435,36 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr); tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr); tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr); +} + +static inline void +local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); + + if (tlb_flag(TLB_WB)) + dsb(); + + __local_flush_tlb_page(vma, uaddr); + tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr); + + if (tlb_flag(TLB_BARRIER)) + dsb(); +} + +static inline void +__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); + + if (tlb_flag(TLB_WB)) + dsb(); + + __local_flush_tlb_page(vma, uaddr); #ifdef CONFIG_ARM_ERRATA_720789 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK); #else @@ -402,16 +475,11 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) dsb(); } -static inline void local_flush_tlb_kernel_page(unsigned long kaddr) +static inline void __local_flush_tlb_kernel_page(unsigned long kaddr) { const int zero = 0; const unsigned int __tlb_flag = __cpu_tlb_flags; - kaddr &= PAGE_MASK; - - if (tlb_flag(TLB_WB)) - dsb(); - tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); @@ -421,6 +489,36 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr); tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr); tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr); +} + +static inline void local_flush_tlb_kernel_page(unsigned long kaddr) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + kaddr &= PAGE_MASK; + + if (tlb_flag(TLB_WB)) + dsb(); + + __local_flush_tlb_kernel_page(kaddr); + tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr); + + if (tlb_flag(TLB_BARRIER)) { + dsb(); + isb(); + } +} + +static inline void __flush_tlb_kernel_page(unsigned long kaddr) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + kaddr &= PAGE_MASK; + + if (tlb_flag(TLB_WB)) + dsb(); + + __local_flush_tlb_kernel_page(kaddr); tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); if (tlb_flag(TLB_BARRIER)) { -- cgit v1.2.3-18-g5258 From 587b9b6487acddf777301c867c24f31fdf4ada4a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 23 May 2013 18:29:18 +0100 Subject: ARM: tlb: don't bother with barriers for branch predictor maintenance Branch predictor maintenance is only required when we are either changing the kernel's view of memory (switching tables completely) or dealing with ASID rollover. Both of these use-cases require subsequent TLB invalidation, which has the relevant barrier instructions to ensure completion and visibility of the maintenance, so this patch removes the instruction barrier from [local_]flush_bp_all. Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm/include/asm/tlbflush.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 3316264916e..9b725d2bcb6 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -527,6 +527,10 @@ static inline void __flush_tlb_kernel_page(unsigned long kaddr) } } +/* + * Branch predictor maintenance is paired with full TLB invalidation, so + * there is no need for any barriers here. + */ static inline void local_flush_bp_all(void) { const int zero = 0; @@ -536,9 +540,6 @@ static inline void local_flush_bp_all(void) asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); else if (tlb_flag(TLB_V6_BP)) asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); - - if (tlb_flag(TLB_BARRIER)) - isb(); } #include -- cgit v1.2.3-18-g5258 From 2c813980c6113ac2c407fbed99f53242088c3038 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 18 Feb 2013 22:07:47 +0000 Subject: ARM: tlb: don't perform inner-shareable invalidation for local BP ops Now that the ASID allocator doesn't require inner-shareable maintenance, we can convert the local_bp_flush_all function to perform only non-shareable flushing, in a similar manner to the TLB invalidation routines. Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm/include/asm/tlbflush.h | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 9b725d2bcb6..84718240340 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -531,17 +531,35 @@ static inline void __flush_tlb_kernel_page(unsigned long kaddr) * Branch predictor maintenance is paired with full TLB invalidation, so * there is no need for any barriers here. */ +static inline void __local_flush_bp_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_V6_BP)) + asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); +} + static inline void local_flush_bp_all(void) { const int zero = 0; const unsigned int __tlb_flag = __cpu_tlb_flags; + __local_flush_bp_all(); if (tlb_flag(TLB_V7_UIS_BP)) - asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); - else if (tlb_flag(TLB_V6_BP)) asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); } +static inline void __flush_bp_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + __local_flush_bp_all(); + if (tlb_flag(TLB_V7_UIS_BP)) + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); +} + #include #ifdef CONFIG_ARM_ERRATA_798181 static inline int erratum_a15_798181(void) -- cgit v1.2.3-18-g5258 From 3ea128065ed20d33bd02ff6dab689f88e38000be Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 10 May 2013 18:07:19 +0100 Subject: ARM: barrier: allow options to be passed to memory barrier instructions On ARMv7, the memory barrier instructions take an optional `option' field which can be used to constrain the effects of a memory barrier based on shareability and access type. This patch allows the caller to pass these options if required, and updates the smp_*() barriers to request inner-shareable barriers, affecting only stores for the _wmb variant. wmb() is also changed to use the -st version of dsb. Reported-by: Albin Tonnerre Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm/include/asm/assembler.h | 4 ++-- arch/arm/include/asm/barrier.h | 32 ++++++++++++++++---------------- 2 files changed, 18 insertions(+), 18 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index a5fef710af3..fcc1b5bf697 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -220,9 +220,9 @@ #ifdef CONFIG_SMP #if __LINUX_ARM_ARCH__ >= 7 .ifeqs "\mode","arm" - ALT_SMP(dmb) + ALT_SMP(dmb ish) .else - ALT_SMP(W(dmb)) + ALT_SMP(W(dmb) ish) .endif #elif __LINUX_ARM_ARCH__ == 6 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 8dcd9c702d9..60f15e274e6 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -14,27 +14,27 @@ #endif #if __LINUX_ARM_ARCH__ >= 7 -#define isb() __asm__ __volatile__ ("isb" : : : "memory") -#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") -#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") +#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") +#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") +#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 -#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ : : "r" (0) : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ +#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ : : "r" (0) : "memory") #elif defined(CONFIG_CPU_FA526) -#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ : : "r" (0) : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") #else -#define isb() __asm__ __volatile__ ("" : : : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ +#define isb(x) __asm__ __volatile__ ("" : : : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") #endif #ifdef CONFIG_ARCH_HAS_BARRIERS @@ -42,7 +42,7 @@ #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) #define mb() do { dsb(); outer_sync(); } while (0) #define rmb() dsb() -#define wmb() mb() +#define wmb() do { dsb(st); outer_sync(); } while (0) #else #define mb() barrier() #define rmb() barrier() @@ -54,9 +54,9 @@ #define smp_rmb() barrier() #define smp_wmb() barrier() #else -#define smp_mb() dmb() -#define smp_rmb() dmb() -#define smp_wmb() dmb() +#define smp_mb() dmb(ish) +#define smp_rmb() smp_mb() +#define smp_wmb() dmb(ishst) #endif #define read_barrier_depends() do { } while(0) -- cgit v1.2.3-18-g5258 From 62cbbc42e0019aff6310259f275ae812463f8836 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 23 May 2013 18:43:58 +0100 Subject: ARM: tlb: reduce scope of barrier domains for TLB invalidation Our TLB invalidation routines may require a barrier before the maintenance (in order to ensure pending page table writes are visible to the hardware walker) and barriers afterwards (in order to ensure completion of the maintenance and visibility in the instruction stream). Whilst this is expensive, the cost can be reduced somewhat by reducing the scope of the barrier instructions: - The barrier before only needs to apply to stores (pte writes) - Local ops are required only to affect the non-shareable domain - Global ops are required only to affect the inner-shareable domain This patch makes these changes for the TLB flushing code. Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm/include/asm/tlbflush.h | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 84718240340..38960264040 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -335,13 +335,13 @@ static inline void local_flush_tlb_all(void) const unsigned int __tlb_flag = __cpu_tlb_flags; if (tlb_flag(TLB_WB)) - dsb(); + dsb(nshst); __local_flush_tlb_all(); tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero); if (tlb_flag(TLB_BARRIER)) { - dsb(); + dsb(nsh); isb(); } } @@ -352,13 +352,13 @@ static inline void __flush_tlb_all(void) const unsigned int __tlb_flag = __cpu_tlb_flags; if (tlb_flag(TLB_WB)) - dsb(); + dsb(ishst); __local_flush_tlb_all(); tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); if (tlb_flag(TLB_BARRIER)) { - dsb(); + dsb(ish); isb(); } } @@ -388,13 +388,13 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) const unsigned int __tlb_flag = __cpu_tlb_flags; if (tlb_flag(TLB_WB)) - dsb(); + dsb(nshst); __local_flush_tlb_mm(mm); tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid); if (tlb_flag(TLB_BARRIER)) - dsb(); + dsb(nsh); } static inline void __flush_tlb_mm(struct mm_struct *mm) @@ -402,7 +402,7 @@ static inline void __flush_tlb_mm(struct mm_struct *mm) const unsigned int __tlb_flag = __cpu_tlb_flags; if (tlb_flag(TLB_WB)) - dsb(); + dsb(ishst); __local_flush_tlb_mm(mm); #ifdef CONFIG_ARM_ERRATA_720789 @@ -412,7 +412,7 @@ static inline void __flush_tlb_mm(struct mm_struct *mm) #endif if (tlb_flag(TLB_BARRIER)) - dsb(); + dsb(ish); } static inline void @@ -445,13 +445,13 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); if (tlb_flag(TLB_WB)) - dsb(); + dsb(nshst); __local_flush_tlb_page(vma, uaddr); tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr); if (tlb_flag(TLB_BARRIER)) - dsb(); + dsb(nsh); } static inline void @@ -462,7 +462,7 @@ __flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); if (tlb_flag(TLB_WB)) - dsb(); + dsb(ishst); __local_flush_tlb_page(vma, uaddr); #ifdef CONFIG_ARM_ERRATA_720789 @@ -472,7 +472,7 @@ __flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) #endif if (tlb_flag(TLB_BARRIER)) - dsb(); + dsb(ish); } static inline void __local_flush_tlb_kernel_page(unsigned long kaddr) @@ -498,13 +498,13 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) kaddr &= PAGE_MASK; if (tlb_flag(TLB_WB)) - dsb(); + dsb(nshst); __local_flush_tlb_kernel_page(kaddr); tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr); if (tlb_flag(TLB_BARRIER)) { - dsb(); + dsb(nsh); isb(); } } @@ -516,13 +516,13 @@ static inline void __flush_tlb_kernel_page(unsigned long kaddr) kaddr &= PAGE_MASK; if (tlb_flag(TLB_WB)) - dsb(); + dsb(ishst); __local_flush_tlb_kernel_page(kaddr); tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); if (tlb_flag(TLB_BARRIER)) { - dsb(); + dsb(ish); isb(); } } @@ -578,7 +578,7 @@ static inline void dummy_flush_tlb_a15_erratum(void) * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. */ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); - dsb(); + dsb(ish); } #else static inline int erratum_a15_798181(void) @@ -612,7 +612,7 @@ static inline void flush_pmd_entry(void *pmd) tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); if (tlb_flag(TLB_WB)) - dsb(); + dsb(ishst); } static inline void clean_pmd_entry(void *pmd) -- cgit v1.2.3-18-g5258 From 73a6fdc48bf52e93c26874dc8c0f0f8d5585a809 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 13 May 2013 11:39:50 +0100 Subject: ARM: spinlock: use inner-shareable dsb variant prior to sev instruction When unlocking a spinlock, we use the sev instruction to signal other CPUs waiting on the lock. Since sev is not a memory access instruction, we require a dsb in order to ensure that the sev is not issued ahead of the store placing the lock in an unlocked state. However, as sev is only concerned with other processors in a multiprocessor system, we can restrict the scope of the preceding dsb to the inner-shareable domain. Furthermore, we can restrict the scope to consider only stores, since there are no independent loads on the unlock path. A side-effect of this change is that a spin_unlock operation no longer forces completion of pending TLB invalidation, something which we rely on when unlocking runqueues to ensure that CPU migration during TLB maintenance routines doesn't cause us to continue before the operation has completed. This patch adds the -ishst suffix to the ARMv7 definition of dsb_sev() and adds an inner-shareable dsb to the context-switch path when running a preemptible, SMP, v7 kernel. Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm/include/asm/spinlock.h | 2 +- arch/arm/include/asm/switch_to.h | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index f8b8965666e..2c1e748f52d 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -46,7 +46,7 @@ static inline void dsb_sev(void) { #if __LINUX_ARM_ARCH__ >= 7 __asm__ __volatile__ ( - "dsb\n" + "dsb ishst\n" SEV ); #else diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index fa09e6b49bf..c99e259469f 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -3,6 +3,16 @@ #include +/* + * For v7 SMP cores running a preemptible kernel we may be pre-empted + * during a TLB maintenance operation, so execute an inner-shareable dsb + * to ensure that the maintenance completes in case we migrate to another + * CPU. + */ +#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) +#define finish_arch_switch(prev) dsb(ish) +#endif + /* * switch_to(prev, next) should switch from task `prev' to `next' * `prev' will never be the same as `next'. schedule() itself -- cgit v1.2.3-18-g5258 From 6af396a6b6c698eb3834184518fc9a59bc22c817 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 12 Jun 2013 10:03:30 +0100 Subject: ARM: cacheflush: use -ishst dsb variant for ensuring flush completion flush_cache_vmap contains a dsb to ensure that any cacheflushing operations to flush out newly written ptes have completed. This patch adds the -ishst option to the dsb, since that is all that is required for completing cacheflushing in the inner-shareable domain. Signed-off-by: Will Deacon --- arch/arm/include/asm/cacheflush.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 17d0ae8672f..04d73262e00 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -352,7 +352,7 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end) * set_pte_at() called from vmap_pte_range() does not * have a DSB after cleaning the cache line. */ - dsb(); + dsb(ishst); } static inline void flush_cache_vunmap(unsigned long start, unsigned long end) -- cgit v1.2.3-18-g5258