aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/pgtable.h')
-rw-r--r--arch/arm/include/asm/pgtable.h103
1 files changed, 40 insertions, 63 deletions
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9b419abd133..5478e5d6ad8 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -11,20 +11,27 @@
#define _ASMARM_PGTABLE_H
#include <linux/const.h>
-#include <asm-generic/4level-fixup.h>
#include <asm/proc-fns.h>
#ifndef CONFIG_MMU
-#include "pgtable-nommu.h"
+#include <asm-generic/4level-fixup.h>
+#include <asm/pgtable-nommu.h>
#else
+#include <asm-generic/pgtable-nopud.h>
#include <asm/memory.h>
-#include <mach/vmalloc.h>
#include <asm/pgtable-hwdef.h>
+
+#include <asm/tlbflush.h>
+
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level.h>
+#else
#include <asm/pgtable-2level.h>
+#endif
/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
@@ -33,15 +40,10 @@
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
- *
- * Note that platforms may override VMALLOC_START, but they must provide
- * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
- * which may not overlap IO space.
*/
-#ifndef VMALLOC_START
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
-#endif
+#define VMALLOC_END 0xff000000UL
#define LIBRARY_TEXT_START 0x0c000000
@@ -59,7 +61,16 @@ extern void __pgd_error(const char *file, int line, pgd_t);
* mapping to be mapped at. This is particularly important for
* non-high vector CPUs.
*/
-#define FIRST_USER_ADDRESS PAGE_SIZE
+#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
+
+/*
+ * Use TASK_SIZE as the ceiling argument for free_pgtables() and
+ * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
+ * page shared between user and kernel).
+ */
+#ifdef CONFIG_ARM_LPAE
+#define USER_PGTABLES_CEILING TASK_SIZE
+#endif
/*
* The pgprot_* and protection_map entries will be fixed up in runtime
@@ -71,10 +82,13 @@ extern void __pgd_error(const char *file, int line, pgd_t);
extern pgprot_t pgprot_user;
extern pgprot_t pgprot_kernel;
+extern pgprot_t pgprot_hyp_device;
+extern pgprot_t pgprot_s2;
+extern pgprot_t pgprot_s2_device;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
-#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
+#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
@@ -83,8 +97,12 @@ extern pgprot_t pgprot_kernel;
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
#define PAGE_KERNEL_EXEC pgprot_kernel
+#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
+#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
+#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR)
-#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
+#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
@@ -163,39 +181,8 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-#define pgd_none(pgd) (0)
-#define pgd_bad(pgd) (0)
-#define pgd_present(pgd) (1)
-#define pgd_clear(pgdp) do { } while (0)
-#define set_pgd(pgd,pgdp) do { } while (0)
-#define set_pud(pud,pudp) do { } while (0)
-
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(dir, addr) ((pmd_t *)(dir))
-
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
-#define pmd_bad(pmd) (pmd_val(pmd) & 2)
-
-#define copy_pmd(pmdpd,pmdps) \
- do { \
- pmdpd[0] = pmdps[0]; \
- pmdpd[1] = pmdps[1]; \
- flush_pmd_entry(pmdpd); \
- } while (0)
-
-#define pmd_clear(pmdp) \
- do { \
- pmdp[0] = __pmd(0); \
- pmdp[1] = __pmd(0); \
- clean_pmd_entry(pmdp); \
- } while (0)
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
@@ -204,10 +191,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-/* we don't need complex calculations here as the pmd is folded into the pgd */
-#define pmd_addr_end(addr,end) (end)
-
-
#ifndef CONFIG_HIGHPTE
#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
#define __pte_unmap(pte) do { } while (0)
@@ -229,20 +212,20 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
-#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
+#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID)
+#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
#define pte_special(pte) (0)
-#define pte_present_user(pte) \
- ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
- (L_PTE_PRESENT | L_PTE_USER))
+#define pte_valid_user(pte) \
+ (pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte))
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
@@ -257,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
{
unsigned long ext = 0;
- if (addr < TASK_SIZE && pte_present_user(pteval)) {
+ if (addr < TASK_SIZE && pte_valid_user(pteval)) {
__sync_icache_dcache(pteval);
ext |= PTE_EXT_NG;
}
@@ -274,12 +257,15 @@ PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
+PTE_BIT_FUNC(mkexec, &= ~L_PTE_XN);
+PTE_BIT_FUNC(mknexec, |= L_PTE_XN);
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
+ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
+ L_PTE_NONE | L_PTE_VALID;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
@@ -338,19 +324,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
#define HAVE_ARCH_UNMAPPED_AREA
-
-/*
- * remap a physical page `pfn' of size `size' with page protection `prot'
- * into virtual address `from'
- */
-#define io_remap_pfn_range(vma,from,pfn,size,prot) \
- remap_pfn_range(vma, from, pfn, size, prot)
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define pgtable_cache_init() do { } while (0)
-void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
-void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
-
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */