aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig8
-rw-r--r--arch/arm/mm/abort-ev6.S5
-rw-r--r--arch/arm/mm/alignment.c55
-rw-r--r--arch/arm/mm/cache-v6.S9
-rw-r--r--arch/arm/mm/fault.c12
-rw-r--r--arch/arm/mm/flush.c36
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm1022.S2
-rw-r--r--arch/arm/mm/proc-arm1026.S2
-rw-r--r--arch/arm/mm/proc-arm6_7.S2
-rw-r--r--arch/arm/mm/proc-arm720.S2
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm922.S2
-rw-r--r--arch/arm/mm/proc-arm925.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-sa110.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S11
-rw-r--r--arch/arm/mm/proc-xscale.S2
20 files changed, 100 insertions, 62 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index db5e47dfc30..c54e04c995e 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -370,21 +370,21 @@ config CPU_BIG_ENDIAN
config CPU_ICACHE_DISABLE
bool "Disable I-Cache"
- depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020
+ depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6
help
Say Y here to disable the processor instruction cache. Unless
you have a reason not to or are unsure, say N.
config CPU_DCACHE_DISABLE
bool "Disable D-Cache"
- depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020
+ depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6
help
Say Y here to disable the processor data cache. Unless
you have a reason not to or are unsure, say N.
config CPU_DCACHE_WRITETHROUGH
bool "Force write through D-cache"
- depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020) && !CPU_DCACHE_DISABLE
+ depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE
default y if CPU_ARM925T
help
Say Y here to use the data cache in writethrough mode. Unless you
@@ -399,7 +399,7 @@ config CPU_CACHE_ROUND_ROBIN
config CPU_BPREDICT_DISABLE
bool "Disable branch prediction"
- depends on CPU_ARM1020
+ depends on CPU_ARM1020 || CPU_V6
help
Say Y here to disable branch prediction. If unsure, say N.
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8f76f3df7b4..dbd34603312 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -20,6 +20,11 @@
*/
.align 5
ENTRY(v6_early_abort)
+#ifdef CONFIG_CPU_MPCORE
+ clrex
+#else
+ strex r0, r1, [sp] @ Clear the exclusive monitor
+#endif
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
/*
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 4b39d867ac1..705c98921c3 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -111,7 +111,7 @@ proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
}
static int proc_alignment_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+ unsigned long count, void *data)
{
char mode;
@@ -119,7 +119,7 @@ static int proc_alignment_write(struct file *file, const char __user *buffer,
if (get_user(mode, buffer))
return -EFAULT;
if (mode >= '0' && mode <= '5')
- ai_usermode = mode - '0';
+ ai_usermode = mode - '0';
}
return count;
}
@@ -262,7 +262,7 @@ union offset_union {
goto fault; \
} while (0)
-#define put32_unaligned_check(val,addr) \
+#define put32_unaligned_check(val,addr) \
__put32_unaligned_check("strb", val, addr)
#define put32t_unaligned_check(val,addr) \
@@ -306,19 +306,19 @@ do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *r
return TYPE_LDST;
user:
- if (LDST_L_BIT(instr)) {
- unsigned long val;
- get16t_unaligned_check(val, addr);
+ if (LDST_L_BIT(instr)) {
+ unsigned long val;
+ get16t_unaligned_check(val, addr);
- /* signed half-word? */
- if (instr & 0x40)
- val = (signed long)((signed short) val);
+ /* signed half-word? */
+ if (instr & 0x40)
+ val = (signed long)((signed short) val);
- regs->uregs[rd] = val;
- } else
- put16t_unaligned_check(regs->uregs[rd], addr);
+ regs->uregs[rd] = val;
+ } else
+ put16t_unaligned_check(regs->uregs[rd], addr);
- return TYPE_LDST;
+ return TYPE_LDST;
fault:
return TYPE_FAULT;
@@ -330,6 +330,9 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
{
unsigned int rd = RD_BITS(instr);
+ if (((rd & 1) == 1) || (rd == 14))
+ goto bad;
+
ai_dword += 1;
if (user_mode(regs))
@@ -339,11 +342,11 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
unsigned long val;
get32_unaligned_check(val, addr);
regs->uregs[rd] = val;
- get32_unaligned_check(val, addr+4);
- regs->uregs[rd+1] = val;
+ get32_unaligned_check(val, addr + 4);
+ regs->uregs[rd + 1] = val;
} else {
put32_unaligned_check(regs->uregs[rd], addr);
- put32_unaligned_check(regs->uregs[rd+1], addr+4);
+ put32_unaligned_check(regs->uregs[rd + 1], addr + 4);
}
return TYPE_LDST;
@@ -353,15 +356,16 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
unsigned long val;
get32t_unaligned_check(val, addr);
regs->uregs[rd] = val;
- get32t_unaligned_check(val, addr+4);
- regs->uregs[rd+1] = val;
+ get32t_unaligned_check(val, addr + 4);
+ regs->uregs[rd + 1] = val;
} else {
put32t_unaligned_check(regs->uregs[rd], addr);
- put32t_unaligned_check(regs->uregs[rd+1], addr+4);
+ put32t_unaligned_check(regs->uregs[rd + 1], addr + 4);
}
return TYPE_LDST;
-
+ bad:
+ return TYPE_ERROR;
fault:
return TYPE_FAULT;
}
@@ -439,7 +443,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
if (LDST_P_EQ_U(instr)) /* U = P */
eaddr += 4;
- /*
+ /*
* For alignment faults on the ARM922T/ARM920T the MMU makes
* the FSR (and hence addr) equal to the updated base address
* of the multiple access rather than the restored value.
@@ -566,7 +570,7 @@ thumb2arm(u16 tinstr)
/* 6.5.1 Format 3: */
case 0x4800 >> 11: /* 7.1.28 LDR(3) */
/* NOTE: This case is not technically possible. We're
- * loading 32-bit memory data via PC relative
+ * loading 32-bit memory data via PC relative
* addressing mode. So we can and should eliminate
* this case. But I'll leave it here for now.
*/
@@ -638,7 +642,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (fault) {
type = TYPE_FAULT;
- goto bad_or_fault;
+ goto bad_or_fault;
}
if (user_mode(regs))
@@ -663,6 +667,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
else if ((instr & 0x001000f0) == 0x000000d0 || /* LDRD */
(instr & 0x001000f0) == 0x000000f0) /* STRD */
handler = do_alignment_ldrdstrd;
+ else if ((instr & 0x01f00ff0) == 0x01000090) /* SWP */
+ goto swp;
else
goto bad;
break;
@@ -733,6 +739,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
do_bad_area(current, current->mm, addr, fsr, regs);
return 0;
+ swp:
+ printk(KERN_ERR "Alignment trap: not handling swp instruction\n");
+
bad:
/*
* Oops, we didn't handle the instruction.
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 85c10a71e7c..72966d90e95 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -18,6 +18,7 @@
#define HARVARD_CACHE
#define CACHE_LINE_SIZE 32
#define D_CACHE_LINE_SIZE 32
+#define BTB_FLUSH_SIZE 8
/*
* v6_flush_cache_all()
@@ -98,7 +99,13 @@ ENTRY(v6_coherent_user_range)
mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
#endif
mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
- add r0, r0, #CACHE_LINE_SIZE
+ add r0, r0, #BTB_FLUSH_SIZE
+ mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
+ add r0, r0, #BTB_FLUSH_SIZE
+ mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
+ add r0, r0, #BTB_FLUSH_SIZE
+ mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
+ add r0, r0, #BTB_FLUSH_SIZE
cmp r0, r1
blo 1b
#ifdef HARVARD_CACHE
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 0b6c4db44e0..4a884baf3b9 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -233,7 +233,17 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (in_interrupt() || !mm)
goto no_context;
- down_read(&mm->mmap_sem);
+ /*
+ * As per x86, we may deadlock here. However, since the kernel only
+ * validly references user space from well defined areas of the code,
+ * we can bug out early if this is from code which shouldn't.
+ */
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
+ goto no_context;
+ down_read(&mm->mmap_sem);
+ }
+
fault = __do_page_fault(mm, addr, fsr, tsk);
up_read(&mm->mmap_sem);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index b0208c99257..c9a03981b78 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -17,6 +17,24 @@
#ifdef CONFIG_CPU_CACHE_VIPT
+#define ALIAS_FLUSH_START 0xffff4000
+
+#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
+
+static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
+{
+ unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
+
+ set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
+ flush_tlb_kernel_page(to);
+
+ asm( "mcrr p15, 0, %1, %0, c14\n"
+ " mcrr p15, 0, %1, %0, c5\n"
+ :
+ : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES)
+ : "cc");
+}
+
void flush_cache_mm(struct mm_struct *mm)
{
if (cache_is_vivt()) {
@@ -67,24 +85,6 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
if (cache_is_vipt_aliasing())
flush_pfn_alias(pfn, user_addr);
}
-
-#define ALIAS_FLUSH_START 0xffff4000
-
-#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
-
-static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
-{
- unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
-
- set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
- flush_tlb_kernel_page(to);
-
- asm( "mcrr p15, 0, %1, %0, c14\n"
- " mcrr p15, 0, %1, %0, c5\n"
- :
- : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES)
- : "cc");
-}
#else
#define flush_pfn_alias(pfn,vaddr) do { } while (0)
#endif
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 1d739d282a4..82ec954e45b 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -509,7 +509,7 @@ cpu_arm1020_name:
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __arm1020_proc_info,#object
__arm1020_proc_info:
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 9b725665b5c..7375fe930f7 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -491,7 +491,7 @@ cpu_arm1020e_name:
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __arm1020e_proc_info,#object
__arm1020e_proc_info:
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 37b70fa21c7..6ca639094d6 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -473,7 +473,7 @@ cpu_arm1022_name:
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __arm1022_proc_info,#object
__arm1022_proc_info:
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 931b690d1be..10317e4f55d 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -469,7 +469,7 @@ cpu_arm1026_name:
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __arm1026_proc_info,#object
__arm1026_proc_info:
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index d0f1bbb48f6..8e7e1e70ab0 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -332,7 +332,7 @@ cpu_arm710_name:
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __arm6_proc_info, #object
__arm6_proc_info:
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index c69c9de3239..a13e0184d34 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -222,7 +222,7 @@ cpu_arm720_name:
* See linux/include/asm-arm/procinfo.h for a definition of this structure.
*/
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __arm710_proc_info, #object
__arm710_proc_info:
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 0f490a0fcb7..d1651389999 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -452,7 +452,7 @@ cpu_arm920_name:
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __arm920_proc_info,#object
__arm920_proc_info:
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 62bc34a139e..23b8ed97f4e 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -456,7 +456,7 @@ cpu_arm922_name:
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __arm922_proc_info,#object
__arm922_proc_info:
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index ee49aa2ca78..ee95c52db51 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -521,7 +521,7 @@ cpu_arm925_name:
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __arm925_proc_info,#object
__arm925_proc_info:
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index bb95cc9fed0..7d042dc20c4 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -471,7 +471,7 @@ cpu_arm926_name:
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __arm926_proc_info,#object
__arm926_proc_info:
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 34f7e7d3f41..bd330c4075a 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -249,7 +249,7 @@ cpu_sa110_name:
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __sa110_proc_info,#object
__sa110_proc_info:
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index ca14f80d5ab..91b89124c0d 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -280,7 +280,7 @@ cpu_sa1110_name:
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __sa1100_proc_info,#object
__sa1100_proc_info:
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index eb34823c9db..9bb5fff406f 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -55,7 +55,14 @@ ENTRY(cpu_v6_proc_init)
mov pc, lr
ENTRY(cpu_v6_proc_fin)
- mov pc, lr
+ stmfd sp!, {lr}
+ cpsid if @ disable interrupts
+ bl v6_flush_kern_cache_all
+ mrc p15, 0, r0, c1, c0, 0 @ ctrl register
+ bic r0, r0, #0x1000 @ ...i............
+ bic r0, r0, #0x0006 @ .............ca.
+ mcr p15, 0, r0, c1, c0, 0 @ disable caches
+ ldmfd sp!, {pc}
/*
* cpu_v6_reset(loc)
@@ -240,7 +247,7 @@ cpu_elf_name:
.size cpu_elf_name, . - cpu_elf_name
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
/*
* Match any ARMv6 processor core.
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index b88de270014..861b3594728 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -578,7 +578,7 @@ cpu_pxa270_name:
.align
- .section ".proc.info", #alloc, #execinstr
+ .section ".proc.info.init", #alloc, #execinstr
.type __80200_proc_info,#object
__80200_proc_info: