diff options
Diffstat (limited to 'mm/mmap.c')
| -rw-r--r-- | mm/mmap.c | 50 | 
1 files changed, 21 insertions, 29 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index 84f997da78d..68b9ad2ef1d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -29,6 +29,7 @@  #include <asm/uaccess.h>  #include <asm/cacheflush.h>  #include <asm/tlb.h> +#include <asm/mmu_context.h>  #ifndef arch_mmap_check  #define arch_mmap_check(addr, len, flags)	(0) @@ -1199,6 +1200,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,  	if (len > TASK_SIZE)  		return -ENOMEM; +	if (flags & MAP_FIXED) +		return addr; +  	if (addr) {  		addr = PAGE_ALIGN(addr);  		vma = find_vma(mm, addr); @@ -1272,6 +1276,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,  	if (len > TASK_SIZE)  		return -ENOMEM; +	if (flags & MAP_FIXED) +		return addr; +  	/* requesting a specific address */  	if (addr) {  		addr = PAGE_ALIGN(addr); @@ -1359,39 +1366,21 @@ unsigned long  get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,  		unsigned long pgoff, unsigned long flags)  { -	unsigned long ret; - -	if (!(flags & MAP_FIXED)) { -		unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); - -		get_area = current->mm->get_unmapped_area; -		if (file && file->f_op && file->f_op->get_unmapped_area) -			get_area = file->f_op->get_unmapped_area; -		addr = get_area(file, addr, len, pgoff, flags); -		if (IS_ERR_VALUE(addr)) -			return addr; -	} +	unsigned long (*get_area)(struct file *, unsigned long, +				  unsigned long, unsigned long, unsigned long); + +	get_area = current->mm->get_unmapped_area; +	if (file && file->f_op && file->f_op->get_unmapped_area) +		get_area = file->f_op->get_unmapped_area; +	addr = get_area(file, addr, len, pgoff, flags); +	if (IS_ERR_VALUE(addr)) +		return addr;  	if (addr > TASK_SIZE - len)  		return -ENOMEM;  	if (addr & ~PAGE_MASK)  		return -EINVAL; -	if (file && is_file_hugepages(file))  { -		/* -		 * Check if the given range is hugepage aligned, and -		 * can be made suitable for hugepages. -		 */ -		ret = prepare_hugepage_range(addr, len, pgoff); -	} else { -		/* -		 * Ensure that a normal request is not falling in a -		 * reserved hugepage range.  For some archs like IA-64, -		 * there is a separate region for hugepages. -		 */ -		ret = is_hugepage_only_range(current->mm, addr, len); -	} -	if (ret) -		return -EINVAL; +  	return addr;  } @@ -1731,7 +1720,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,  /*   * Split a vma into two pieces at address 'addr', a new vma is allocated - * either for the first part or the the tail. + * either for the first part or the tail.   */  int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,  	      unsigned long addr, int new_below) @@ -1979,6 +1968,9 @@ void exit_mmap(struct mm_struct *mm)  	unsigned long nr_accounted = 0;  	unsigned long end; +	/* mm's last user has gone, and its about to be pulled down */ +	arch_exit_mmap(mm); +  	lru_add_drain();  	flush_cache_mm(mm);  	tlb = tlb_gather_mmu(mm, 1);  | 
