diff options
Diffstat (limited to 'arch/tile/mm')
| -rw-r--r-- | arch/tile/mm/fault.c | 2 | ||||
| -rw-r--r-- | arch/tile/mm/homecache.c | 2 | ||||
| -rw-r--r-- | arch/tile/mm/hugetlbpage.c | 5 | ||||
| -rw-r--r-- | arch/tile/mm/init.c | 12 | ||||
| -rw-r--r-- | arch/tile/mm/pgtable.c | 9 | 
5 files changed, 11 insertions, 19 deletions
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 4c288f19945..6c0571216a9 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -149,8 +149,6 @@ static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)  	pmd_k = vmalloc_sync_one(pgd, address);  	if (!pmd_k)  		return -1; -	if (pmd_huge(*pmd_k)) -		return 0;   /* support TILE huge_vmap() API */  	pte_k = pte_offset_kernel(pmd_k, address);  	if (!pte_present(*pte_k))  		return -1; diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 004ba568d93..33294fdc402 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c @@ -417,7 +417,7 @@ void __homecache_free_pages(struct page *page, unsigned int order)  	if (put_page_testzero(page)) {  		homecache_change_page_home(page, order, PAGE_HOME_HASH);  		if (order == 0) { -			free_hot_cold_page(page, 0); +			free_hot_cold_page(page, false);  		} else {  			init_page_count(page);  			__free_pages(page, order); diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 0cb3bbaa580..e514899e110 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -166,11 +166,6 @@ int pud_huge(pud_t pud)  	return !!(pud_val(pud) & _PAGE_HUGE_PAGE);  } -int pmd_huge_support(void) -{ -	return 1; -} -  struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,  			     pmd_t *pmd, int write)  { diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 4e316deb92f..bfb3127b4df 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -273,9 +273,9 @@ static pgprot_t __init init_pgprot(ulong address)  	/*  	 * Otherwise we just hand out consecutive cpus.  To avoid  	 * requiring this function to hold state, we just walk forward from -	 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach -	 * the requested address, while walking cpu home around kdata_mask. -	 * This is typically no more than a dozen or so iterations. +	 * __end_rodata by PAGE_SIZE, skipping the readonly and init data, to +	 * reach the requested address, while walking cpu home around +	 * kdata_mask. This is typically no more than a dozen or so iterations.  	 */  	page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK;  	BUG_ON(address < page || address >= (ulong)_end); @@ -828,10 +828,6 @@ void __init mem_init(void)  	printk(KERN_DEBUG "  PKMAP   %#lx - %#lx\n",  	       PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1);  #endif -#ifdef CONFIG_HUGEVMAP -	printk(KERN_DEBUG "  HUGEMAP %#lx - %#lx\n", -	       HUGE_VMAP_BASE, HUGE_VMAP_END - 1); -#endif  	printk(KERN_DEBUG "  VMALLOC %#lx - %#lx\n",  	       _VMALLOC_START, _VMALLOC_END - 1);  #ifdef __tilegx__ @@ -916,7 +912,7 @@ static long __write_once initfree = 1;  static int __init set_initfree(char *str)  {  	long val; -	if (strict_strtol(str, 0, &val) == 0) { +	if (kstrtol(str, 0, &val) == 0) {  		initfree = val;  		pr_info("initfree: %s free init pages\n",  			initfree ? "will" : "won't"); diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 2deaddf3e01..5e86eac4bfa 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -127,8 +127,7 @@ void shatter_huge_page(unsigned long addr)  	}  	/* Shatter the huge page into the preallocated L2 page table. */ -	pmd_populate_kernel(&init_mm, pmd, -			    get_prealloc_pte(pte_pfn(*(pte_t *)pmd))); +	pmd_populate_kernel(&init_mm, pmd, get_prealloc_pte(pmd_pfn(*pmd)));  #ifdef __PAGETABLE_PMD_FOLDED  	/* Walk every pgd on the system and update the pmd there. */ @@ -242,6 +241,11 @@ struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,  	if (p == NULL)  		return NULL; +	if (!pgtable_page_ctor(p)) { +		__free_pages(p, L2_USER_PGTABLE_ORDER); +		return NULL; +	} +  	/*  	 * Make every page have a page_count() of one, not just the first.  	 * We don't use __GFP_COMP since it doesn't look like it works @@ -252,7 +256,6 @@ struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,  		inc_zone_page_state(p+i, NR_PAGETABLE);  	} -	pgtable_page_ctor(p);  	return p;  }  | 
