aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/tlbflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/tlbflush.h')
-rw-r--r--arch/arm/include/asm/tlbflush.h36
1 files changed, 27 insertions, 9 deletions
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 33b546ae72d..ce7378ea15a 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -70,6 +70,10 @@
#undef _TLB
#undef MULTI_TLB
+#ifdef CONFIG_SMP_ON_UP
+#define MULTI_TLB 1
+#endif
+
#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
#ifdef CONFIG_CPU_TLB_V3
@@ -185,17 +189,23 @@
# define v6wbi_always_flags (-1UL)
#endif
-#ifdef CONFIG_SMP
-#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
+#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
-#else
-#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \
TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
-#endif
#ifdef CONFIG_CPU_TLB_V7
-# define v7wbi_possible_flags v7wbi_tlb_flags
-# define v7wbi_always_flags v7wbi_tlb_flags
+
+# ifdef CONFIG_SMP_ON_UP
+# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
+# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
+# elif defined(CONFIG_SMP)
+# define v7wbi_possible_flags v7wbi_tlb_flags_smp
+# define v7wbi_always_flags v7wbi_tlb_flags_smp
+# else
+# define v7wbi_possible_flags v7wbi_tlb_flags_up
+# define v7wbi_always_flags v7wbi_tlb_flags_up
+# endif
# ifdef _TLB
# define MULTI_TLB 1
# else
@@ -560,12 +570,20 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#endif
/*
- * if PG_dcache_dirty is set for the page, we need to ensure that any
+ * If PG_dcache_clean is not set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
- * back to the page.
+ * back to the page. On ARMv6 and later, the cache coherency is handled via
+ * the set_pte_at() function.
*/
+#if __LINUX_ARM_ARCH__ < 6
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep);
+#else
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+#endif
#endif