diff options
Diffstat (limited to 'arch/x86/include/asm/pgtable.h')
| -rw-r--r-- | arch/x86/include/asm/pgtable.h | 293 | 
1 files changed, 279 insertions, 14 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index ada823a13c7..0ec05601261 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -15,14 +15,16 @@  	 : (prot))  #ifndef __ASSEMBLY__ -  #include <asm/x86_init.h> +void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); +  /*   * ZERO_PAGE is a global shared page that is always zero: used   * for zero-mapped memory areas etc..   */ -extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] +	__visible;  #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))  extern spinlock_t pgd_lock; @@ -35,6 +37,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);  #else  /* !CONFIG_PARAVIRT */  #define set_pte(ptep, pte)		native_set_pte(ptep, pte)  #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte) +#define set_pmd_at(mm, addr, pmdp, pmd)	native_set_pmd_at(mm, addr, pmdp, pmd)  #define set_pte_atomic(ptep, pte)					\  	native_set_pte_atomic(ptep, pte) @@ -59,6 +62,8 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);  #define pte_update(mm, addr, ptep)              do { } while (0)  #define pte_update_defer(mm, addr, ptep)        do { } while (0) +#define pmd_update(mm, addr, ptep)              do { } while (0) +#define pmd_update_defer(mm, addr, ptep)        do { } while (0)  #define pgd_val(x)	native_pgd_val(x)  #define __pgd(x)	native_make_pgd(x) @@ -94,6 +99,11 @@ static inline int pte_young(pte_t pte)  	return pte_flags(pte) & _PAGE_ACCESSED;  } +static inline int pmd_young(pmd_t pmd) +{ +	return pmd_flags(pmd) & _PAGE_ACCESSED; +} +  static inline int pte_write(pte_t pte)  {  	return pte_flags(pte) & _PAGE_RW; @@ -121,7 +131,8 @@ static inline int pte_exec(pte_t pte)  static inline int pte_special(pte_t pte)  { -	return pte_flags(pte) & _PAGE_SPECIAL; +	return (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_SPECIAL)) == +				 (_PAGE_PRESENT|_PAGE_SPECIAL);  }  static inline unsigned long pte_pfn(pte_t pte) @@ -134,13 +145,34 @@ static inline unsigned long pmd_pfn(pmd_t pmd)  	return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;  } +static inline unsigned long pud_pfn(pud_t pud) +{ +	return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; +} +  #define pte_page(pte)	pfn_to_page(pte_pfn(pte))  static inline int pmd_large(pmd_t pte)  { -	return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == -		(_PAGE_PSE | _PAGE_PRESENT); +	return pmd_flags(pte) & _PAGE_PSE; +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_splitting(pmd_t pmd) +{ +	return pmd_val(pmd) & _PAGE_SPLITTING; +} + +static inline int pmd_trans_huge(pmd_t pmd) +{ +	return pmd_val(pmd) & _PAGE_PSE; +} + +static inline int has_transparent_hugepage(void) +{ +	return cpu_has_pse;  } +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */  static inline pte_t pte_set_flags(pte_t pte, pteval_t set)  { @@ -178,7 +210,7 @@ static inline pte_t pte_mkexec(pte_t pte)  static inline pte_t pte_mkdirty(pte_t pte)  { -	return pte_set_flags(pte, _PAGE_DIRTY); +	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);  }  static inline pte_t pte_mkyoung(pte_t pte) @@ -216,6 +248,93 @@ static inline pte_t pte_mkspecial(pte_t pte)  	return pte_set_flags(pte, _PAGE_SPECIAL);  } +static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) +{ +	pmdval_t v = native_pmd_val(pmd); + +	return __pmd(v | set); +} + +static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) +{ +	pmdval_t v = native_pmd_val(pmd); + +	return __pmd(v & ~clear); +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ +	return pmd_clear_flags(pmd, _PAGE_ACCESSED); +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ +	return pmd_clear_flags(pmd, _PAGE_RW); +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ +	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ +	return pmd_set_flags(pmd, _PAGE_PSE); +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ +	return pmd_set_flags(pmd, _PAGE_ACCESSED); +} + +static inline pmd_t pmd_mkwrite(pmd_t pmd) +{ +	return pmd_set_flags(pmd, _PAGE_RW); +} + +static inline pmd_t pmd_mknotpresent(pmd_t pmd) +{ +	return pmd_clear_flags(pmd, _PAGE_PRESENT); +} + +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +static inline int pte_soft_dirty(pte_t pte) +{ +	return pte_flags(pte) & _PAGE_SOFT_DIRTY; +} + +static inline int pmd_soft_dirty(pmd_t pmd) +{ +	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; +} + +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ +	return pte_set_flags(pte, _PAGE_SOFT_DIRTY); +} + +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ +	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); +} + +static inline pte_t pte_file_clear_soft_dirty(pte_t pte) +{ +	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); +} + +static inline pte_t pte_file_mksoft_dirty(pte_t pte) +{ +	return pte_set_flags(pte, _PAGE_SOFT_DIRTY); +} + +static inline int pte_file_soft_dirty(pte_t pte) +{ +	return pte_flags(pte) & _PAGE_SOFT_DIRTY; +} + +#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ +  /*   * Mask out unsupported bits in a present pgprot.  Non-present pgprots   * can use those bits for other purposes, so leave them be. @@ -256,6 +375,16 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)  	return __pte(val);  } +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ +	pmdval_t val = pmd_val(pmd); + +	val &= _HPAGE_CHG_MASK; +	val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; + +	return __pmd(val); +} +  /* mprotect needs to preserve PAT bits when updating vm_page_prot */  #define pgprot_modify pgprot_modify  static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) @@ -300,13 +429,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);  #endif	/* __ASSEMBLY__ */  #ifdef CONFIG_X86_32 -# include "pgtable_32.h" +# include <asm/pgtable_32.h>  #else -# include "pgtable_64.h" +# include <asm/pgtable_64.h>  #endif  #ifndef __ASSEMBLY__  #include <linux/mm_types.h> +#include <linux/mmdebug.h> +#include <linux/log2.h>  static inline int pte_none(pte_t pte)  { @@ -321,9 +452,29 @@ static inline int pte_same(pte_t a, pte_t b)  static inline int pte_present(pte_t a)  { +	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | +			       _PAGE_NUMA); +} + +#define pte_present_nonuma pte_present_nonuma +static inline int pte_present_nonuma(pte_t a) +{  	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);  } +#define pte_accessible pte_accessible +static inline bool pte_accessible(struct mm_struct *mm, pte_t a) +{ +	if (pte_flags(a) & _PAGE_PRESENT) +		return true; + +	if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && +			mm_tlb_flush_pending(mm)) +		return true; + +	return false; +} +  static inline int pte_hidden(pte_t pte)  {  	return pte_flags(pte) & _PAGE_HIDDEN; @@ -331,7 +482,14 @@ static inline int pte_hidden(pte_t pte)  static inline int pmd_present(pmd_t pmd)  { -	return pmd_flags(pmd) & _PAGE_PRESENT; +	/* +	 * Checking for _PAGE_PSE is needed too because +	 * split_huge_page will temporarily clear the present bit (but +	 * the _PAGE_PSE flag will remain set at all times while the +	 * _PAGE_PRESENT bit is clear). +	 */ +	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE | +				 _PAGE_NUMA);  }  static inline int pmd_none(pmd_t pmd) @@ -350,7 +508,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)   * Currently stuck as a macro due to indirect forward reference to   * linux/mmzone.h's __section_mem_map_addr() definition:   */ -#define pmd_page(pmd)	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) +#define pmd_page(pmd)	pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)  /*   * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] @@ -390,6 +548,11 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)  static inline int pmd_bad(pmd_t pmd)  { +#ifdef CONFIG_NUMA_BALANCING +	/* pmd_numa check */ +	if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA) +		return 0; +#endif  	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;  } @@ -398,9 +561,6 @@ static inline unsigned long pages_to_mb(unsigned long npg)  	return npg >> (20 - PAGE_SHIFT);  } -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)	\ -	remap_pfn_range(vma, vaddr, pfn, size, prot) -  #if PAGETABLE_LEVELS > 2  static inline int pud_none(pud_t pud)  { @@ -513,6 +673,8 @@ static inline int pgd_none(pgd_t pgd)  #ifndef __ASSEMBLY__  extern int direct_gbpages; +void init_mem_mapping(void); +void early_alloc_pgt_buf(void);  /* local pte updates need not use xchg for locking */  static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) @@ -524,12 +686,26 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)  	return res;  } +static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) +{ +	pmd_t res = *pmdp; + +	native_pmd_clear(pmdp); +	return res; +} +  static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,  				     pte_t *ptep , pte_t pte)  {  	native_set_pte(ptep, pte);  } +static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, +				     pmd_t *pmdp , pmd_t pmd) +{ +	native_set_pmd(pmdp, pmd); +} +  #ifndef CONFIG_PARAVIRT  /*   * Rules for using pte_update - it must be called after any PTE update which @@ -605,7 +781,50 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,  	pte_update(mm, addr, ptep);  } -#define flush_tlb_fix_spurious_fault(vma, address) +#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) + +#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot)) + +#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, +				 unsigned long address, pmd_t *pmdp, +				 pmd_t entry, int dirty); + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, +				     unsigned long addr, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, +				  unsigned long address, pmd_t *pmdp); + + +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern void pmdp_splitting_flush(struct vm_area_struct *vma, +				 unsigned long addr, pmd_t *pmdp); + +#define __HAVE_ARCH_PMD_WRITE +static inline int pmd_write(pmd_t pmd) +{ +	return pmd_flags(pmd) & _PAGE_RW; +} + +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr, +				       pmd_t *pmdp) +{ +	pmd_t pmd = native_pmdp_get_and_clear(pmdp); +	pmd_update(mm, addr, pmdp); +	return pmd; +} + +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, +				      unsigned long addr, pmd_t *pmdp) +{ +	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); +	pmd_update(mm, addr, pmdp); +}  /*   * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); @@ -622,6 +841,52 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)         memcpy(dst, src, count * sizeof(pgd_t));  } +#define PTE_SHIFT ilog2(PTRS_PER_PTE) +static inline int page_level_shift(enum pg_level level) +{ +	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT; +} +static inline unsigned long page_level_size(enum pg_level level) +{ +	return 1UL << page_level_shift(level); +} +static inline unsigned long page_level_mask(enum pg_level level) +{ +	return ~(page_level_size(level) - 1); +} + +/* + * The x86 doesn't have any external MMU info: the kernel page + * tables contain all the necessary information. + */ +static inline void update_mmu_cache(struct vm_area_struct *vma, +		unsigned long addr, pte_t *ptep) +{ +} +static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, +		unsigned long addr, pmd_t *pmd) +{ +} + +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ +	VM_BUG_ON(pte_present_nonuma(pte)); +	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); +} + +static inline int pte_swp_soft_dirty(pte_t pte) +{ +	VM_BUG_ON(pte_present_nonuma(pte)); +	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; +} + +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ +	VM_BUG_ON(pte_present_nonuma(pte)); +	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); +} +#endif  #include <asm-generic/pgtable.h>  #endif	/* __ASSEMBLY__ */  | 
