diff options
Diffstat (limited to 'arch/x86/mm/pgtable.c')
| -rw-r--r-- | arch/x86/mm/pgtable.c | 50 | 
1 files changed, 35 insertions, 15 deletions
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index dfa537a03be..6fb6927f9e7 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -25,8 +25,12 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)  	struct page *pte;  	pte = alloc_pages(__userpte_alloc_gfp, 0); -	if (pte) -		pgtable_page_ctor(pte); +	if (!pte) +		return NULL; +	if (!pgtable_page_ctor(pte)) { +		__free_page(pte); +		return NULL; +	}  	return pte;  } @@ -57,6 +61,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)  #if PAGETABLE_LEVELS > 2  void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)  { +	struct page *page = virt_to_page(pmd);  	paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);  	/*  	 * NOTE! For PAE, any changes to the top page-directory-pointer-table @@ -65,7 +70,8 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)  #ifdef CONFIG_X86_PAE  	tlb->need_flush_all = 1;  #endif -	tlb_remove_page(tlb, virt_to_page(pmd)); +	pgtable_pmd_page_dtor(page); +	tlb_remove_page(tlb, page);  }  #if PAGETABLE_LEVELS > 3 @@ -189,8 +195,10 @@ static void free_pmds(pmd_t *pmds[])  	int i;  	for(i = 0; i < PREALLOCATED_PMDS; i++) -		if (pmds[i]) +		if (pmds[i]) { +			pgtable_pmd_page_dtor(virt_to_page(pmds[i]));  			free_page((unsigned long)pmds[i]); +		}  }  static int preallocate_pmds(pmd_t *pmds[]) @@ -200,8 +208,13 @@ static int preallocate_pmds(pmd_t *pmds[])  	for(i = 0; i < PREALLOCATED_PMDS; i++) {  		pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); -		if (pmd == NULL) +		if (!pmd) +			failed = true; +		if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { +			free_page((unsigned long)pmd); +			pmd = NULL;  			failed = true; +		}  		pmds[i] = pmd;  	} @@ -386,13 +399,20 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,  int ptep_clear_flush_young(struct vm_area_struct *vma,  			   unsigned long address, pte_t *ptep)  { -	int young; - -	young = ptep_test_and_clear_young(vma, address, ptep); -	if (young) -		flush_tlb_page(vma, address); - -	return young; +	/* +	 * On x86 CPUs, clearing the accessed bit without a TLB flush +	 * doesn't cause data corruption. [ It could cause incorrect +	 * page aging and the (mistaken) reclaim of hot pages, but the +	 * chance of that should be relatively low. ] +	 * +	 * So as a performance optimization don't flush the TLB when +	 * clearing the accessed bit, it will eventually be flushed by +	 * a context switch or a VM operation anyway. [ In the rare +	 * event of it not getting flushed for a long time the delay +	 * shouldn't really matter because there's no real memory +	 * pressure for swapout to react to. ] +	 */ +	return ptep_test_and_clear_young(vma, address, ptep);  }  #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -436,9 +456,9 @@ void __init reserve_top_address(unsigned long reserve)  {  #ifdef CONFIG_X86_32  	BUG_ON(fixmaps_set > 0); -	printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", -	       (int)-reserve); -	__FIXADDR_TOP = -reserve - PAGE_SIZE; +	__FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE; +	printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n", +	       -reserve, __FIXADDR_TOP + PAGE_SIZE);  #endif  }  | 
