aboutsummaryrefslogtreecommitdiff
path: root/arch/tile/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/mm')
-rw-r--r--arch/tile/mm/fault.c15
-rw-r--r--arch/tile/mm/homecache.c2
-rw-r--r--arch/tile/mm/init.c12
-rw-r--r--arch/tile/mm/pgtable.c9
4 files changed, 16 insertions, 22 deletions
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 111d5a9b76f..6c0571216a9 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -149,8 +149,6 @@ static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
pmd_k = vmalloc_sync_one(pgd, address);
if (!pmd_k)
return -1;
- if (pmd_huge(*pmd_k))
- return 0; /* support TILE huge_vmap() API */
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
return -1;
@@ -280,8 +278,7 @@ static int handle_page_fault(struct pt_regs *regs,
if (!is_page_fault)
write = 1;
- flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
- (write ? FAULT_FLAG_WRITE : 0));
+ flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
is_kernel_mode = !user_mode(regs);
@@ -365,6 +362,9 @@ static int handle_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
}
+ if (!is_kernel_mode)
+ flags |= FAULT_FLAG_USER;
+
/*
* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
@@ -425,12 +425,12 @@ good_area:
#endif
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
} else {
if (!is_page_fault || !(vma->vm_flags & VM_READ))
goto bad_area;
}
- survive:
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
@@ -555,11 +555,6 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(tsk)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
if (is_kernel_mode)
goto no_context;
pagefault_out_of_memory();
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 004ba568d93..33294fdc402 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -417,7 +417,7 @@ void __homecache_free_pages(struct page *page, unsigned int order)
if (put_page_testzero(page)) {
homecache_change_page_home(page, order, PAGE_HOME_HASH);
if (order == 0) {
- free_hot_cold_page(page, 0);
+ free_hot_cold_page(page, false);
} else {
init_page_count(page);
__free_pages(page, order);
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 4e316deb92f..bfb3127b4df 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -273,9 +273,9 @@ static pgprot_t __init init_pgprot(ulong address)
/*
* Otherwise we just hand out consecutive cpus. To avoid
* requiring this function to hold state, we just walk forward from
- * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach
- * the requested address, while walking cpu home around kdata_mask.
- * This is typically no more than a dozen or so iterations.
+ * __end_rodata by PAGE_SIZE, skipping the readonly and init data, to
+ * reach the requested address, while walking cpu home around
+ * kdata_mask. This is typically no more than a dozen or so iterations.
*/
page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK;
BUG_ON(address < page || address >= (ulong)_end);
@@ -828,10 +828,6 @@ void __init mem_init(void)
printk(KERN_DEBUG " PKMAP %#lx - %#lx\n",
PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1);
#endif
-#ifdef CONFIG_HUGEVMAP
- printk(KERN_DEBUG " HUGEMAP %#lx - %#lx\n",
- HUGE_VMAP_BASE, HUGE_VMAP_END - 1);
-#endif
printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n",
_VMALLOC_START, _VMALLOC_END - 1);
#ifdef __tilegx__
@@ -916,7 +912,7 @@ static long __write_once initfree = 1;
static int __init set_initfree(char *str)
{
long val;
- if (strict_strtol(str, 0, &val) == 0) {
+ if (kstrtol(str, 0, &val) == 0) {
initfree = val;
pr_info("initfree: %s free init pages\n",
initfree ? "will" : "won't");
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 2deaddf3e01..5e86eac4bfa 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -127,8 +127,7 @@ void shatter_huge_page(unsigned long addr)
}
/* Shatter the huge page into the preallocated L2 page table. */
- pmd_populate_kernel(&init_mm, pmd,
- get_prealloc_pte(pte_pfn(*(pte_t *)pmd)));
+ pmd_populate_kernel(&init_mm, pmd, get_prealloc_pte(pmd_pfn(*pmd)));
#ifdef __PAGETABLE_PMD_FOLDED
/* Walk every pgd on the system and update the pmd there. */
@@ -242,6 +241,11 @@ struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
if (p == NULL)
return NULL;
+ if (!pgtable_page_ctor(p)) {
+ __free_pages(p, L2_USER_PGTABLE_ORDER);
+ return NULL;
+ }
+
/*
* Make every page have a page_count() of one, not just the first.
* We don't use __GFP_COMP since it doesn't look like it works
@@ -252,7 +256,6 @@ struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
inc_zone_page_state(p+i, NR_PAGETABLE);
}
- pgtable_page_ctor(p);
return p;
}