aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig13
-rw-r--r--arch/arm/include/asm/assembler.h27
-rw-r--r--arch/arm/include/asm/smp_mpidr.h7
-rw-r--r--arch/arm/include/asm/smp_plat.h15
-rw-r--r--arch/arm/include/asm/tlbflush.h24
-rw-r--r--arch/arm/kernel/entry-armv.S11
-rw-r--r--arch/arm/kernel/head.S50
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/kernel/vmlinux.lds.S11
-rw-r--r--arch/arm/mm/cache-v7.S14
-rw-r--r--arch/arm/mm/mmu.c46
-rw-r--r--arch/arm/mm/proc-v6.S43
-rw-r--r--arch/arm/mm/proc-v7.S41
-rw-r--r--arch/arm/mm/tlb-v7.S33
14 files changed, 237 insertions, 102 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 6a939e64cbd..f6cdc21b562 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1191,6 +1191,19 @@ config SMP
If you don't know what to do here, say N.
+config SMP_ON_UP
+ bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on SMP && !XIP && !THUMB2_KERNEL
+ default y
+ help
+ SMP kernels contain instructions which fail on non-SMP processors.
+ Enabling this option allows the kernel to modify itself to make
+ these instructions safe. Disabling it allows about 1K of space
+ savings.
+
+ If you don't know what to do here, say Y.
+
config HAVE_ARM_SCU
bool
depends on SMP
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 6e8f05c8a1c..062b58c029a 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -154,16 +154,39 @@
.long 9999b,9001f; \
.popsection
+#ifdef CONFIG_SMP
+#define ALT_SMP(instr...) \
+9998: instr
+#define ALT_UP(instr...) \
+ .pushsection ".alt.smp.init", "a" ;\
+ .long 9998b ;\
+ instr ;\
+ .popsection
+#define ALT_UP_B(label) \
+ .equ up_b_offset, label - 9998b ;\
+ .pushsection ".alt.smp.init", "a" ;\
+ .long 9998b ;\
+ b . + up_b_offset ;\
+ .popsection
+#else
+#define ALT_SMP(instr...)
+#define ALT_UP(instr...) instr
+#define ALT_UP_B(label) b label
+#endif
+
/*
* SMP data memory barrier
*/
.macro smp_dmb
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
- dmb
+ ALT_SMP(dmb)
#elif __LINUX_ARM_ARCH__ == 6
- mcr p15, 0, r0, c7, c10, 5 @ dmb
+ ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
+#else
+#error Incompatible SMP platform
#endif
+ ALT_UP(nop)
#endif
.endm
diff --git a/arch/arm/include/asm/smp_mpidr.h b/arch/arm/include/asm/smp_mpidr.h
index 7da7105e83e..6a9307d6490 100644
--- a/arch/arm/include/asm/smp_mpidr.h
+++ b/arch/arm/include/asm/smp_mpidr.h
@@ -4,7 +4,12 @@
#define hard_smp_processor_id() \
({ \
unsigned int cpunum; \
- __asm__("mrc p15, 0, %0, c0, c0, 5\n" \
+ __asm__("\n" \
+ "1: mrc p15, 0, %0, c0, c0, 5\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n"\
+ " .long 1b\n" \
+ " mov %0, #0\n" \
+ " .popsection" \
: "=r" (cpunum)); \
cpunum &= 0x0F; \
})
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index e6215305544..7de5aa56c18 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -18,4 +18,19 @@ static inline int cache_ops_need_broadcast(void)
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
}
+/*
+ * Return true if we are running on a SMP platform
+ */
+static inline bool is_smp(void)
+{
+#ifndef CONFIG_SMP
+ return false;
+#elif defined(CONFIG_SMP_ON_UP)
+ extern unsigned int smp_on_up;
+ return !!smp_on_up;
+#else
+ return true;
+#endif
+}
+
#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 33b546ae72d..cf2f018492e 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -70,6 +70,10 @@
#undef _TLB
#undef MULTI_TLB
+#ifdef CONFIG_SMP_ON_UP
+#define MULTI_TLB 1
+#endif
+
#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
#ifdef CONFIG_CPU_TLB_V3
@@ -185,17 +189,23 @@
# define v6wbi_always_flags (-1UL)
#endif
-#ifdef CONFIG_SMP
-#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
+#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
-#else
-#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \
TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
-#endif
#ifdef CONFIG_CPU_TLB_V7
-# define v7wbi_possible_flags v7wbi_tlb_flags
-# define v7wbi_always_flags v7wbi_tlb_flags
+
+# ifdef CONFIG_SMP_ON_UP
+# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
+# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
+# elif defined(CONFIG_SMP)
+# define v7wbi_possible_flags v7wbi_tlb_flags_smp
+# define v7wbi_always_flags v7wbi_tlb_flags_smp
+# else
+# define v7wbi_possible_flags v7wbi_tlb_flags_up
+# define v7wbi_always_flags v7wbi_tlb_flags_up
+# endif
# ifdef _TLB
# define MULTI_TLB 1
# else
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index bb8e93a7640..c09e3573c5d 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -46,7 +46,8 @@
* this macro assumes that irqstat (r6) and base (r5) are
* preserved from get_irqnr_and_base above
*/
- test_for_ipi r0, r6, r5, lr
+ ALT_SMP(test_for_ipi r0, r6, r5, lr)
+ ALT_UP_B(9997f)
movne r0, sp
adrne lr, BSYM(1b)
bne do_IPI
@@ -57,6 +58,7 @@
adrne lr, BSYM(1b)
bne do_local_timer
#endif
+9997:
#endif
.endm
@@ -965,11 +967,8 @@ kuser_cmpxchg_fixup:
beq 1b
rsbs r0, r3, #0
/* beware -- each __kuser slot must be 8 instructions max */
-#ifdef CONFIG_SMP
- b __kuser_memory_barrier
-#else
- usr_ret lr
-#endif
+ ALT_SMP(b __kuser_memory_barrier)
+ ALT_UP(usr_ret lr)
#endif
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index eb62bf94721..b44d21e1e34 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -86,6 +86,9 @@ ENTRY(stext)
movs r8, r5 @ invalid machine (r5=0)?
beq __error_a @ yes, error 'a'
bl __vet_atags
+#ifdef CONFIG_SMP_ON_UP
+ bl __fixup_smp
+#endif
bl __create_page_tables
/*
@@ -333,4 +336,51 @@ __create_page_tables:
ENDPROC(__create_page_tables)
.ltorg
+#ifdef CONFIG_SMP_ON_UP
+__fixup_smp:
+ mov r7, #0x00070000
+ orr r6, r7, #0xff000000 @ mask 0xff070000
+ orr r7, r7, #0x41000000 @ val 0x41070000
+ and r0, r9, r6
+ teq r0, r7 @ ARM CPU and ARMv6/v7?
+ bne __fixup_smp_on_up @ no, assume UP
+
+ orr r6, r6, #0x0000ff00
+ orr r6, r6, #0x000000f0 @ mask 0xff07fff0
+ orr r7, r7, #0x0000b000
+ orr r7, r7, #0x00000020 @ val 0x4107b020
+ and r0, r9, r6
+ teq r0, r7 @ ARM 11MPCore?
+ moveq pc, lr @ yes, assume SMP
+
+ mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
+ tst r0, #1 << 31
+ movne pc, lr @ bit 31 => SMP
+
+__fixup_smp_on_up:
+ adr r0, 1f
+ ldmia r0, {r3, r6, r7}
+ sub r3, r0, r3
+ add r6, r6, r3
+ add r7, r7, r3
+2: cmp r6, r7
+ ldmia r6!, {r0, r4}
+ strlo r4, [r0, r3]
+ blo 2b
+ mov pc, lr
+ENDPROC(__fixup_smp)
+
+1: .word .
+ .word __smpalt_begin
+ .word __smpalt_end
+
+ .pushsection .data
+ .globl smp_on_up
+smp_on_up:
+ ALT_SMP(.long 1)
+ ALT_UP(.long 0)
+ .popsection
+
+#endif
+
#include "head-common.S"
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index d5231ae7355..5a82c39ca85 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -36,6 +36,7 @@
#include <asm/procinfo.h>
#include <asm/sections.h>
#include <asm/setup.h>
+#include <asm/smp_plat.h>
#include <asm/mach-types.h>
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
@@ -825,7 +826,8 @@ void __init setup_arch(char **cmdline_p)
request_standard_resources(&meminfo, mdesc);
#ifdef CONFIG_SMP
- smp_init_cpus();
+ if (is_smp())
+ smp_init_cpus();
#endif
reserve_crashkernel();
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index b16c07914b5..fd5750b8ac8 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -40,6 +40,11 @@ SECTIONS
__tagtable_begin = .;
*(.taglist.init)
__tagtable_end = .;
+#ifdef CONFIG_SMP_ON_UP
+ __smpalt_begin = .;
+ *(.alt.smp.init)
+ __smpalt_end = .;
+#endif
INIT_SETUP(16)
@@ -237,6 +242,12 @@ SECTIONS
/* Default discards */
DISCARDS
+
+#ifndef CONFIG_SMP_ON_UP
+ /DISCARD/ : {
+ *(.alt.smp.init)
+ }
+#endif
}
/*
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 37c8157e116..e8ea1a071f4 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -91,11 +91,8 @@ ENTRY(v7_flush_kern_cache_all)
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all
mov r0, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable
-#else
- mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
-#endif
+ ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
+ ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr
@@ -171,11 +168,8 @@ ENTRY(v7_coherent_user_range)
cmp r0, r1
blo 1b
mov r0, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable
-#else
- mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
-#endif
+ ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
+ ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
dsb
isb
mov pc, lr
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6a3a2d0cd6d..e2335811c02 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -310,9 +310,8 @@ static void __init build_mem_type_table(void)
cachepolicy = CPOLICY_WRITEBACK;
ecc_mask = 0;
}
-#ifdef CONFIG_SMP
- cachepolicy = CPOLICY_WRITEALLOC;
-#endif
+ if (is_smp())
+ cachepolicy = CPOLICY_WRITEALLOC;
/*
* Strip out features not present on earlier architectures.
@@ -406,13 +405,11 @@ static void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
-#ifndef CONFIG_SMP
/*
* Only use write-through for non-SMP systems
*/
- if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
+ if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
-#endif
/*
* Enable CPU-specific coherency if supported.
@@ -436,22 +433,23 @@ static void __init build_mem_type_table(void)
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-#ifdef CONFIG_SMP
- /*
- * Mark memory with the "shared" attribute for SMP systems
- */
- user_pgprot |= L_PTE_SHARED;
- kern_pgprot |= L_PTE_SHARED;
- vecs_pgprot |= L_PTE_SHARED;
- mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
- mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
- mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
- mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
-#endif
+ if (is_smp()) {
+ /*
+ * Mark memory with the "shared" attribute
+ * for SMP systems
+ */
+ user_pgprot |= L_PTE_SHARED;
+ kern_pgprot |= L_PTE_SHARED;
+ vecs_pgprot |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
}
/*
@@ -829,8 +827,7 @@ static void __init sanity_check_meminfo(void)
* rather difficult.
*/
reason = "with VIPT aliasing cache";
-#ifdef CONFIG_SMP
- } else if (tlb_ops_need_broadcast()) {
+ } else if (is_smp() && tlb_ops_need_broadcast()) {
/*
* kmap_high needs to occasionally flush TLB entries,
* however, if the TLB entries need to be broadcast
@@ -840,7 +837,6 @@ static void __init sanity_check_meminfo(void)
* (must not be called with irqs off)
*/
reason = "without hardware TLB ops broadcasting";
-#endif
}
if (reason) {
printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 22aac851519..b95662dedb6 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -30,13 +30,10 @@
#define TTB_RGN_WT (2 << 3)
#define TTB_RGN_WB (3 << 3)
-#ifndef CONFIG_SMP
-#define TTB_FLAGS TTB_RGN_WBWA
-#define PMD_FLAGS PMD_SECT_WB
-#else
-#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
-#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
-#endif
+#define TTB_FLAGS_UP TTB_RGN_WBWA
+#define PMD_FLAGS_UP PMD_SECT_WB
+#define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S
+#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
ENTRY(cpu_v6_proc_init)
mov pc, lr
@@ -97,7 +94,8 @@ ENTRY(cpu_v6_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
- orr r0, r0, #TTB_FLAGS
+ ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
+ ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
@@ -156,9 +154,11 @@ cpu_pj4_name:
*/
__v6_setup:
#ifdef CONFIG_SMP
- mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode
+ ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode
+ ALT_UP(nop)
orr r0, r0, #0x20
- mcr p15, 0, r0, c1, c0, 1
+ ALT_SMP(mcr p15, 0, r0, c1, c0, 1)
+ ALT_UP(nop)
#endif
mov r0, #0
@@ -169,7 +169,8 @@ __v6_setup:
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
- orr r4, r4, #TTB_FLAGS
+ ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
+ ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */
adr r5, v6_crval
@@ -225,10 +226,16 @@ cpu_elf_name:
__v6_proc_info:
.long 0x0007b000
.long 0x0007f000
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
@@ -249,10 +256,16 @@ __v6_proc_info:
__pj4_v6_proc_info:
.long 0x560f5810
.long 0xff0ffff0
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 7563ff0141b..df422fee1cb 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -30,15 +30,13 @@
#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
-#ifndef CONFIG_SMP
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
-#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB
-#define PMD_FLAGS PMD_SECT_WB
-#else
+#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
+#define PMD_FLAGS_UP PMD_SECT_WB
+
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
-#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
-#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
-#endif
+#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
+#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
ENTRY(cpu_v7_proc_init)
mov pc, lr
@@ -105,7 +103,8 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
- orr r0, r0, #TTB_FLAGS
+ ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
+ ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
@@ -188,7 +187,8 @@ cpu_v7_name:
*/
__v7_ca9mp_setup:
#ifdef CONFIG_SMP
- mrc p15, 0, r0, c1, c0, 1
+ ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
+ ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
@@ -262,7 +262,8 @@ __v7_setup:
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r10, c2, c0, 2 @ TTB control register
- orr r4, r4, #TTB_FLAGS
+ ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
+ ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register
@@ -354,10 +355,16 @@ cpu_elf_name:
__v7_ca9mp_proc_info:
.long 0x410fc090 @ Required ID value
.long 0xff0ffff0 @ Mask for ID
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
@@ -380,10 +387,16 @@ __v7_ca9mp_proc_info:
__v7_proc_info:
.long 0x000f0000 @ Required ID value
.long 0x000f0000 @ Mask for ID
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index f3f288a9546..53cd5b45467 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -13,6 +13,7 @@
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
@@ -41,20 +42,15 @@ ENTRY(v7wbi_flush_user_tlb_range)
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
1:
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
-#else
- mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
-#endif
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
+
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov ip, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
-#else
- mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
-#endif
+ ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
+ ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB
dsb
mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
@@ -74,20 +70,14 @@ ENTRY(v7wbi_flush_kern_tlb_range)
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
-#else
- mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
-#endif
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov r2, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
-#else
- mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
-#endif
+ ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
+ ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB
dsb
isb
mov pc, lr
@@ -99,5 +89,6 @@ ENDPROC(v7wbi_flush_kern_tlb_range)
ENTRY(v7wbi_tlb_fns)
.long v7wbi_flush_user_tlb_range
.long v7wbi_flush_kern_tlb_range
- .long v7wbi_tlb_flags
+ ALT_SMP(.long v7wbi_tlb_flags_smp)
+ ALT_UP(.long v7wbi_tlb_flags_up)
.size v7wbi_tlb_fns, . - v7wbi_tlb_fns