diff options
-rw-r--r-- | arch/ia64/kernel/sys_ia64.c | 37 | ||||
-rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 20 |
2 files changed, 21 insertions, 36 deletions
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index d9439ef2f66..41e33f84c18 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -25,9 +25,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len unsigned long pgoff, unsigned long flags) { long map_shared = (flags & MAP_SHARED); - unsigned long start_addr, align_mask = PAGE_SIZE - 1; + unsigned long align_mask = 0; struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_unmapped_area_info info; if (len > RGN_MAP_LIMIT) return -ENOMEM; @@ -44,7 +44,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len addr = 0; #endif if (!addr) - addr = mm->free_area_cache; + addr = TASK_UNMAPPED_BASE; if (map_shared && (TASK_SIZE > 0xfffffffful)) /* @@ -53,28 +53,15 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len * tasks, we prefer to avoid exhausting the address space too quickly by * limiting alignment to a single page. */ - align_mask = SHMLBA - 1; - - full_search: - start_addr = addr = (addr + align_mask) & ~align_mask; - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { - if (start_addr != TASK_UNMAPPED_BASE) { - /* Start a new search --- just in case we missed some holes. */ - addr = TASK_UNMAPPED_BASE; - goto full_search; - } - return -ENOMEM; - } - if (!vma || addr + len <= vma->vm_start) { - /* Remember the address where we stopped this search: */ - mm->free_area_cache = addr + len; - return addr; - } - addr = (vma->vm_end + align_mask) & ~align_mask; - } + align_mask = PAGE_MASK & (SHMLBA - 1); + + info.flags = 0; + info.length = len; + info.low_limit = addr; + info.high_limit = TASK_SIZE; + info.align_mask = align_mask; + info.align_offset = 0; + return vm_unmapped_area(&info); } asmlinkage long diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 5ca674b7473..76069c18ee4 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -148,7 +148,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - struct vm_area_struct *vmm; + struct vm_unmapped_area_info info; if (len > RGN_MAP_LIMIT) return -ENOMEM; @@ -165,16 +165,14 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u /* This code assumes that RGN_HPAGE != 0. */ if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1))) addr = HPAGE_REGION_BASE; - else - addr = ALIGN(addr, HPAGE_SIZE); - for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { - /* At this point: (!vmm || addr < vmm->vm_end). */ - if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) - return -ENOMEM; - if (!vmm || (addr + len) <= vmm->vm_start) - return addr; - addr = ALIGN(vmm->vm_end, HPAGE_SIZE); - } + + info.flags = 0; + info.length = len; + info.low_limit = addr; + info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT; + info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1); + info.align_offset = 0; + return vm_unmapped_area(&info); } static int __init hugetlb_setup_sz(char *str) |