diff options
Diffstat (limited to 'arch/powerpc/mm/pgtable_64.c')
| -rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 39 | 
1 files changed, 29 insertions, 10 deletions
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 536eec72c0f..f6ce1f111f5 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -33,7 +33,6 @@  #include <linux/swap.h>  #include <linux/stddef.h>  #include <linux/vmalloc.h> -#include <linux/init.h>  #include <linux/bootmem.h>  #include <linux/memblock.h>  #include <linux/slab.h> @@ -153,6 +152,18 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags)  		}  #endif /* !CONFIG_PPC_MMU_NOHASH */  	} + +#ifdef CONFIG_PPC_BOOK3E_64 +	/* +	 * With hardware tablewalk, a sync is needed to ensure that +	 * subsequent accesses see the PTE we just wrote.  Unlike userspace +	 * mappings, we can't tolerate spurious faults, so make sure +	 * the new PTE will be seen the first time. +	 */ +	mb(); +#else +	smp_wmb(); +#endif  	return 0;  } @@ -378,6 +389,10 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)  				       __GFP_REPEAT | __GFP_ZERO);  	if (!page)  		return NULL; +	if (!kernel && !pgtable_page_ctor(page)) { +		__free_page(page); +		return NULL; +	}  	ret = page_address(page);  	spin_lock(&mm->page_table_lock); @@ -392,9 +407,6 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)  	}  	spin_unlock(&mm->page_table_lock); -	if (!kernel) -		pgtable_page_ctor(page); -  	return (pte_t *)ret;  } @@ -498,7 +510,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,  }  unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, -				  pmd_t *pmdp, unsigned long clr) +				  pmd_t *pmdp, unsigned long clr, +				  unsigned long set)  {  	unsigned long old, tmp; @@ -514,14 +527,15 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,  		andi.	%1,%0,%6\n\  		bne-	1b \n\  		andc	%1,%0,%4 \n\ +		or	%1,%1,%7\n\  		stdcx.	%1,0,%3 \n\  		bne-	1b"  	: "=&r" (old), "=&r" (tmp), "=m" (*pmdp) -	: "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY) +	: "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set)  	: "cc" );  #else  	old = pmd_val(*pmdp); -	*pmdp = __pmd(old & ~clr); +	*pmdp = __pmd((old & ~clr) | set);  #endif  	if (old & _PAGE_HASHPTE)  		hpte_do_hugepage_flush(mm, addr, pmdp); @@ -633,6 +647,11 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,  		if (old & _PAGE_HASHPTE)  			hpte_do_hugepage_flush(vma->vm_mm, address, pmdp);  	} +	/* +	 * This ensures that generic code that rely on IRQ disabling +	 * to prevent a parallel THP split work as expected. +	 */ +	kick_all_cpus_sync();  }  /* @@ -686,7 +705,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,  		pmd_t *pmdp, pmd_t pmd)  {  #ifdef CONFIG_DEBUG_VM -	WARN_ON(!pmd_none(*pmdp)); +	WARN_ON(pmd_val(*pmdp) & _PAGE_PRESENT);  	assert_spin_locked(&mm->page_table_lock);  	WARN_ON(!pmd_trans_huge(pmd));  #endif @@ -696,7 +715,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,  void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,  		     pmd_t *pmdp)  { -	pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT); +	pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);  }  /* @@ -823,7 +842,7 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,  	unsigned long old;  	pgtable_t *pgtable_slot; -	old = pmd_hugepage_update(mm, addr, pmdp, ~0UL); +	old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);  	old_pmd = __pmd(old);  	/*  	 * We have pmd == none and we are holding page_table_lock.  | 
