aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm/idmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/idmap.c')
-rw-r--r--arch/arm/mm/idmap.c125
1 files changed, 97 insertions, 28 deletions
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 57299446f78..c447ec70e86 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -1,13 +1,58 @@
+#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <asm/cputype.h>
+#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/system_info.h>
-static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end,
+/*
+ * Note: accesses outside of the kernel image and the identity map area
+ * are not supported on any CPU using the idmap tables as its current
+ * page tables.
+ */
+pgd_t *idmap_pgd;
+phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
+
+#ifdef CONFIG_ARM_LPAE
+static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
+ unsigned long prot)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
+ pmd = pmd_alloc_one(&init_mm, addr);
+ if (!pmd) {
+ pr_warning("Failed to allocate identity pmd.\n");
+ return;
+ }
+ /*
+ * Copy the original PMD to ensure that the PMD entries for
+ * the kernel image are preserved.
+ */
+ if (!pud_none(*pud))
+ memcpy(pmd, pmd_offset(pud, 0),
+ PTRS_PER_PMD * sizeof(pmd_t));
+ pud_populate(&init_mm, pud, pmd);
+ pmd += pmd_index(addr);
+ } else
+ pmd = pmd_offset(pud, addr);
+
+ do {
+ next = pmd_addr_end(addr, end);
+ *pmd = __pmd((addr & PMD_MASK) | prot);
+ flush_pmd_entry(pmd);
+ } while (pmd++, addr = next, addr != end);
+}
+#else /* !CONFIG_ARM_LPAE */
+static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long prot)
{
- pmd_t *pmd = pmd_offset(pgd, addr);
+ pmd_t *pmd = pmd_offset(pud, addr);
addr = (addr & PMD_MASK) | prot;
pmd[0] = __pmd(addr);
@@ -15,53 +60,77 @@ static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end,
pmd[1] = __pmd(addr);
flush_pmd_entry(pmd);
}
+#endif /* CONFIG_ARM_LPAE */
+
+static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
+ unsigned long prot)
+{
+ pud_t *pud = pud_offset(pgd, addr);
+ unsigned long next;
+
+ do {
+ next = pud_addr_end(addr, end);
+ idmap_add_pmd(pud, addr, next, prot);
+ } while (pud++, addr = next, addr != end);
+}
-void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
+static void identity_mapping_add(pgd_t *pgd, const char *text_start,
+ const char *text_end, unsigned long prot)
{
- unsigned long prot, next;
+ unsigned long addr, end;
+ unsigned long next;
+
+ addr = virt_to_idmap(text_start);
+ end = virt_to_idmap(text_end);
+ pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
+
+ prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
- prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
prot |= PMD_BIT4;
pgd += pgd_index(addr);
do {
next = pgd_addr_end(addr, end);
- idmap_add_pmd(pgd, addr, next, prot);
+ idmap_add_pud(pgd, addr, next, prot);
} while (pgd++, addr = next, addr != end);
}
-#ifdef CONFIG_SMP
-static void idmap_del_pmd(pgd_t *pgd, unsigned long addr, unsigned long end)
-{
- pmd_t *pmd = pmd_offset(pgd, addr);
- pmd_clear(pmd);
-}
+extern char __idmap_text_start[], __idmap_text_end[];
-void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
+static int __init init_static_idmap(void)
{
- unsigned long next;
+ idmap_pgd = pgd_alloc(&init_mm);
+ if (!idmap_pgd)
+ return -ENOMEM;
- pgd += pgd_index(addr);
- do {
- next = pgd_addr_end(addr, end);
- idmap_del_pmd(pgd, addr, next);
- } while (pgd++, addr = next, addr != end);
+ identity_mapping_add(idmap_pgd, __idmap_text_start,
+ __idmap_text_end, 0);
+
+ /* Flush L1 for the hardware to see this page table content */
+ flush_cache_louis();
+
+ return 0;
}
-#endif
+early_initcall(init_static_idmap);
/*
- * In order to soft-boot, we need to insert a 1:1 mapping in place of
- * the user-mode pages. This will then ensure that we have predictable
- * results when turning the mmu off
+ * In order to soft-boot, we need to switch to a 1:1 mapping for the
+ * cpu_reset functions. This will then ensure that we have predictable
+ * results when turning off the mmu.
*/
-void setup_mm_for_reboot(char mode)
+void setup_mm_for_reboot(void)
{
+ /* Switch to the identity mapping. */
+ cpu_switch_mm(idmap_pgd, &init_mm);
+ local_flush_bp_all();
+
+#ifdef CONFIG_CPU_HAS_ASID
/*
- * We need to access to user-mode page tables here. For kernel threads
- * we don't have any user-mode mappings so we use the context that we
- * "borrowed".
+ * We don't have a clean ASID for the identity mapping, which
+ * may clash with virtual addresses of the previous page tables
+ * and therefore potentially in the TLB.
*/
- identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
local_flush_tlb_all();
+#endif
}