diff options
Diffstat (limited to 'arch/mips/include/asm/pgtable-64.h')
| -rw-r--r-- | arch/mips/include/asm/pgtable-64.h | 37 |
1 files changed, 26 insertions, 11 deletions
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 26dc69d792a..e1c49a96807 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -9,6 +9,7 @@ #ifndef _ASM_PGTABLE_64_H #define _ASM_PGTABLE_64_H +#include <linux/compiler.h> #include <linux/linkage.h> #include <asm/addrspace.h> @@ -113,16 +114,21 @@ #endif #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) -#if PGDIR_SIZE >= TASK_SIZE -#define USER_PTRS_PER_PGD (1) +#if PGDIR_SIZE >= TASK_SIZE64 +#define USER_PTRS_PER_PGD (1) #else -#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +#define USER_PTRS_PER_PGD (TASK_SIZE64 / PGDIR_SIZE) #endif #define FIRST_USER_ADDRESS 0UL -#define VMALLOC_START MAP_BASE +/* + * TLB refill handlers also map the vmalloc area into xuseg. Avoid + * the first couple of pages so NULL pointer dereferences will still + * reliably trap. + */ +#define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE)) #define VMALLOC_END \ - (VMALLOC_START + \ + (MAP_BASE + \ min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \ (1UL << cpu_vmbits)) - (1UL << 32)) @@ -157,7 +163,6 @@ typedef struct { unsigned long pmd; } pmd_t; extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; -extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD]; #endif /* @@ -168,7 +173,19 @@ static inline int pmd_none(pmd_t pmd) return pmd_val(pmd) == (unsigned long) invalid_pte_table; } -#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) +static inline int pmd_bad(pmd_t pmd) +{ +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT + /* pmd_huge(pmd) but inline */ + if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) + return 0; +#endif + + if (unlikely(pmd_val(pmd) & ~PAGE_MASK)) + return 1; + + return 0; +} static inline int pmd_present(pmd_t pmd) { @@ -213,6 +230,7 @@ static inline void pud_clear(pud_t *pudp) #else #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) +#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) #endif #define __pgd_offset(address) pgd_index(address) @@ -252,10 +270,7 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) #define pte_unmap(pte) ((void)(pte)) -#define pte_unmap_nested(pte) ((void)(pte)) /* * Initialize a new pgd / pmd table with invalid pointers. @@ -273,7 +288,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define __swp_type(x) (((x).val >> 32) & 0xff) #define __swp_offset(x) ((x).val >> 40) #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) -#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) /* |
