diff options
Diffstat (limited to 'arch/powerpc/include/asm/pgtable.h')
-rw-r--r-- | arch/powerpc/include/asm/pgtable.h | 134 |
1 files changed, 134 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 07f55e60169..eb17da78112 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -6,7 +6,17 @@ #include <asm/processor.h> /* For TASK_SIZE */ #include <asm/mmu.h> #include <asm/page.h> + struct mm_struct; + +#ifdef CONFIG_DEBUG_VM +extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr); +#else /* CONFIG_DEBUG_VM */ +static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) +{ +} +#endif /* !CONFIG_DEBUG_VM */ + #endif /* !__ASSEMBLY__ */ #if defined(CONFIG_PPC64) @@ -17,6 +27,130 @@ struct mm_struct; #ifndef __ASSEMBLY__ +/* Generic accessors to PTE bits */ +static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } +static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; } +static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } +static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } + +/* Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + * + * Even if PTEs can be unsigned long long, a PFN is always an unsigned + * long for now. + */ +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { + return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | + pgprot_val(pgprot)); } +static inline unsigned long pte_pfn(pte_t pte) { + return pte_val(pte) >> PTE_RPN_SHIFT; } + +/* Keep these as a macros to avoid include dependency mess */ +#define pte_page(x) pfn_to_page(pte_pfn(x)) +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) + +/* Generic modifiers for PTE bits */ +static inline pte_t pte_wrprotect(pte_t pte) { + pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } +static inline pte_t pte_mkclean(pte_t pte) { + pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } +static inline pte_t pte_mkold(pte_t pte) { + pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } +static inline pte_t pte_mkwrite(pte_t pte) { + pte_val(pte) |= _PAGE_RW; return pte; } +static inline pte_t pte_mkdirty(pte_t pte) { + pte_val(pte) |= _PAGE_DIRTY; return pte; } +static inline pte_t pte_mkyoung(pte_t pte) { + pte_val(pte) |= _PAGE_ACCESSED; return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { + pte_val(pte) |= _PAGE_SPECIAL; return pte; } +static inline pte_t pte_mkhuge(pte_t pte) { + return pte; } +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pte; +} + + +/* Insert a PTE, top-level function is out of line. It uses an inline + * low level function in the respective pgtable-* files + */ +extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte); + +/* This low level function performs the actual PTE insertion + * Setting the PTE depends on the MMU type and other factors. It's + * an horrible mess that I'm not going to try to clean up now but + * I'm keeping it in one place rather than spread around + */ +static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, int percpu) +{ +#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) + /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the + * helper pte_update() which does an atomic update. We need to do that + * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a + * per-CPU PTE such as a kmap_atomic, we do a simple update preserving + * the hash bits instead (ie, same as the non-SMP case) + */ + if (percpu) + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + else + pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); + +#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) + /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we + * can just store as long as we do the two halves in the right order + * with a barrier in between. This is possible because we take care, + * in the hash code, to pre-invalidate if the PTE was already hashed, + * which synchronizes us with any concurrent invalidation. + * In the percpu case, we also fallback to the simple update preserving + * the hash bits + */ + if (percpu) { + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + return; + } +#if _PAGE_HASHPTE != 0 + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); +#endif + __asm__ __volatile__("\ + stw%U0%X0 %2,%0\n\ + eieio\n\ + stw%U0%X0 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + +#elif defined(CONFIG_PPC_STD_MMU_32) + /* Third case is 32-bit hash table in UP mode, we need to preserve + * the _PAGE_HASHPTE bit since we may not have invalidated the previous + * translation in the hash yet (done in a subsequent flush_tlb_xxx()) + * and see we need to keep track that this PTE needs invalidating + */ + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + +#else + /* Anything else just stores the PTE normally. That covers all 64-bit + * cases, and 32-bit non-hash with 64-bit PTEs in UP mode + */ + *ptep = pte; +#endif +} + + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, pte_t entry, int dirty); + /* * Macro to mark a page protection value as "uncacheable". */ |