diff options
Diffstat (limited to 'arch/powerpc/mm/pgtable.c')
| -rw-r--r-- | arch/powerpc/mm/pgtable.c | 137 | 
1 files changed, 14 insertions, 123 deletions
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 2c7e801ab20..c695943a513 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -24,119 +24,13 @@  #include <linux/kernel.h>  #include <linux/gfp.h>  #include <linux/mm.h> -#include <linux/init.h>  #include <linux/percpu.h>  #include <linux/hardirq.h> +#include <linux/hugetlb.h>  #include <asm/pgalloc.h>  #include <asm/tlbflush.h>  #include <asm/tlb.h> -#include "mmu_decl.h" - -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - -#ifdef CONFIG_SMP - -/* - * Handle batching of page table freeing on SMP. Page tables are - * queued up and send to be freed later by RCU in order to avoid - * freeing a page table page that is being walked without locks - */ - -static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); -static unsigned long pte_freelist_forced_free; - -struct pte_freelist_batch -{ -	struct rcu_head	rcu; -	unsigned int	index; -	unsigned long	tables[0]; -}; - -#define PTE_FREELIST_SIZE \ -	((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ -	  / sizeof(unsigned long)) - -static void pte_free_smp_sync(void *arg) -{ -	/* Do nothing, just ensure we sync with all CPUs */ -} - -/* This is only called when we are critically out of memory - * (and fail to get a page in pte_free_tlb). - */ -static void pgtable_free_now(void *table, unsigned shift) -{ -	pte_freelist_forced_free++; - -	smp_call_function(pte_free_smp_sync, NULL, 1); - -	pgtable_free(table, shift); -} - -static void pte_free_rcu_callback(struct rcu_head *head) -{ -	struct pte_freelist_batch *batch = -		container_of(head, struct pte_freelist_batch, rcu); -	unsigned int i; - -	for (i = 0; i < batch->index; i++) { -		void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE); -		unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE; - -		pgtable_free(table, shift); -	} - -	free_page((unsigned long)batch); -} - -static void pte_free_submit(struct pte_freelist_batch *batch) -{ -	call_rcu(&batch->rcu, pte_free_rcu_callback); -} - -void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) -{ -	/* This is safe since tlb_gather_mmu has disabled preemption */ -	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); -	unsigned long pgf; - -	if (atomic_read(&tlb->mm->mm_users) < 2 || -	    cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ -		pgtable_free(table, shift); -		return; -	} - -	if (*batchp == NULL) { -		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); -		if (*batchp == NULL) { -			pgtable_free_now(table, shift); -			return; -		} -		(*batchp)->index = 0; -	} -	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); -	pgf = (unsigned long)table | shift; -	(*batchp)->tables[(*batchp)->index++] = pgf; -	if ((*batchp)->index == PTE_FREELIST_SIZE) { -		pte_free_submit(*batchp); -		*batchp = NULL; -	} -} - -void pte_free_finish(void) -{ -	/* This is safe since tlb_gather_mmu has disabled preemption */ -	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); - -	if (*batchp == NULL) -		return; -	pte_free_submit(*batchp); -	*batchp = NULL; -} - -#endif /* CONFIG_SMP */ -  static inline int is_exec_fault(void)  {  	return current->thread.regs && TRAP(current->thread.regs) == 0x400; @@ -175,7 +69,7 @@ struct page * maybe_pte_to_page(pte_t pte)   * support falls into the same category.   */ -static pte_t set_pte_filter(pte_t pte, unsigned long addr) +static pte_t set_pte_filter(pte_t pte)  {  	pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);  	if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || @@ -184,17 +78,6 @@ static pte_t set_pte_filter(pte_t pte, unsigned long addr)  		if (!pg)  			return pte;  		if (!test_bit(PG_arch_1, &pg->flags)) { -#ifdef CONFIG_8xx -			/* On 8xx, cache control instructions (particularly -			 * "dcbst" from flush_dcache_icache) fault as write -			 * operation if there is an unpopulated TLB entry -			 * for the address in question. To workaround that, -			 * we invalidate the TLB here, thus avoiding dcbst -			 * misbehaviour. -			 */ -			/* 8xx doesn't care about PID, size or ind args */ -			_tlbil_va(addr, 0, 0, 0); -#endif /* CONFIG_8xx */  			flush_dcache_icache_page(pg);  			set_bit(PG_arch_1, &pg->flags);  		} @@ -214,7 +97,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,   * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so   * instead we "filter out" the exec permission for non clean pages.   */ -static pte_t set_pte_filter(pte_t pte, unsigned long addr) +static pte_t set_pte_filter(pte_t pte)  {  	struct page *pg; @@ -290,13 +173,13 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,  		pte_t pte)  {  #ifdef CONFIG_DEBUG_VM -	WARN_ON(pte_present(*ptep)); +	WARN_ON(pte_val(*ptep) & _PAGE_PRESENT);  #endif  	/* Note: mm->context.id might not yet have been assigned as  	 * this context might not have been activated yet when this  	 * is called.  	 */ -	pte = set_pte_filter(pte, addr); +	pte = set_pte_filter(pte);  	/* Perform the setting of the PTE */  	__set_pte_at(mm, addr, ptep, pte, 0); @@ -316,7 +199,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,  	entry = set_access_flags_filter(entry, vma, dirty);  	changed = !pte_same(*(ptep), entry);  	if (changed) { -		if (!(vma->vm_flags & VM_HUGETLB)) +		if (!is_vm_hugetlb_page(vma))  			assert_pte_locked(vma->vm_mm, address);  		__ptep_set_access_flags(ptep, entry);  		flush_tlb_page_nohash(vma, address); @@ -338,6 +221,14 @@ void assert_pte_locked(struct mm_struct *mm, unsigned long addr)  	pud = pud_offset(pgd, addr);  	BUG_ON(pud_none(*pud));  	pmd = pmd_offset(pud, addr); +	/* +	 * khugepaged to collapse normal pages to hugepage, first set +	 * pmd to none to force page fault/gup to take mmap_sem. After +	 * pmd is set to none, we do a pte_clear which does this assertion +	 * so if we find pmd none, return. +	 */ +	if (pmd_none(*pmd)) +		return;  	BUG_ON(!pmd_present(*pmd));  	assert_spin_locked(pte_lockptr(mm, pmd));  }  | 
