aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig18
-rw-r--r--arch/arm/mm/copypage-v6.c16
-rw-r--r--arch/arm/mm/init.c9
-rw-r--r--arch/arm/mm/mm-armv.c8
4 files changed, 34 insertions, 17 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c54e04c995e..e3c14d6b432 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -120,8 +120,8 @@ config CPU_ARM925T
# ARM926T
config CPU_ARM926T
- bool "Support ARM926T processor" if ARCH_INTEGRATOR
- depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX
+ bool "Support ARM926T processor"
+ depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || MACH_REALVIEW_EB
default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX
select CPU_32v5
select CPU_ABRT_EV5TJ
@@ -242,7 +242,7 @@ config CPU_XSCALE
# ARMv6
config CPU_V6
bool "Support ARM V6 processor"
- depends on ARCH_INTEGRATOR
+ depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB
select CPU_32v6
select CPU_ABRT_EV6
select CPU_CACHE_V6
@@ -250,6 +250,18 @@ config CPU_V6
select CPU_COPY_V6
select CPU_TLB_V6
+# ARMv6k
+config CPU_32v6K
+ bool "Support ARM V6K processor extensions" if !SMP
+ depends on CPU_V6
+ default y if SMP
+ help
+ Say Y here if your ARMv6 processor supports the 'K' extension.
+ This enables the kernel to use some instructions not present
+ on previous processors, and as such a kernel build with this
+ enabled will not boot on processors with do not support these
+ instructions.
+
# Figure out what processor architecture version we should be using.
# This defines the compiler instruction set which depends on the machine type.
config CPU_32v3
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 27d041574ea..269ce6913ee 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -22,9 +22,7 @@
#endif
#define from_address (0xffff8000)
-#define from_pgprot PAGE_KERNEL
#define to_address (0xffffc000)
-#define to_pgprot PAGE_KERNEL
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
@@ -34,7 +32,7 @@ static DEFINE_SPINLOCK(v6_lock);
* Copy the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of these pages.
*/
-void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
+static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
copy_page(kto, kfrom);
}
@@ -43,7 +41,7 @@ void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long v
* Clear the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of this page.
*/
-void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
+static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
{
clear_page(kaddr);
}
@@ -51,7 +49,7 @@ void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
/*
* Copy the page, taking account of the cache colour.
*/
-void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
+static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long from, to;
@@ -72,8 +70,8 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
*/
spin_lock(&v6_lock);
- set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
- set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot));
+ set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL));
+ set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL));
from = from_address + (offset << PAGE_SHIFT);
to = to_address + (offset << PAGE_SHIFT);
@@ -91,7 +89,7 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
* so remap the kernel page into the same cache colour as the user
* page.
*/
-void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
+static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long to = to_address + (offset << PAGE_SHIFT);
@@ -112,7 +110,7 @@ void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
*/
spin_lock(&v6_lock);
- set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
+ set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL));
flush_tlb_kernel_page(to);
clear_page((void *)to);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index fd079ff1fc5..c168f322ef8 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -486,10 +486,17 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
/*
* Ask the machine support to map in the statically mapped devices.
- * After this point, we can start to touch devices again.
*/
if (mdesc->map_io)
mdesc->map_io();
+
+ /*
+ * Finally flush the tlb again - this ensures that we're in a
+ * consistent state wrt the writebuffer if the writebuffer needs
+ * draining. After this point, we can start to touch devices
+ * again.
+ */
+ local_flush_tlb_all();
}
/*
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 1221fdde176..fb5b40289de 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -469,14 +469,14 @@ void __init create_mapping(struct map_desc *md)
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
printk(KERN_WARNING "BUG: not creating mapping for "
- "0x%016llx at 0x%08lx in user region\n",
+ "0x%08llx at 0x%08lx in user region\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
- printk(KERN_WARNING "BUG: mapping for 0x%016llx at 0x%08lx "
+ printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
"overlaps vmalloc space\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
}
@@ -492,14 +492,14 @@ void __init create_mapping(struct map_desc *md)
if(md->pfn >= 0x100000) {
if(domain) {
printk(KERN_ERR "MM: invalid domain in supersection "
- "mapping for 0x%016llx at 0x%08lx\n",
+ "mapping for 0x%08llx at 0x%08lx\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if((md->virtual | md->length | __pfn_to_phys(md->pfn))
& ~SUPERSECTION_MASK) {
printk(KERN_ERR "MM: cannot create mapping for "
- "0x%016llx at 0x%08lx invalid alignment\n",
+ "0x%08llx at 0x%08lx invalid alignment\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}