diff options
Diffstat (limited to 'mm/nommu.c')
| -rw-r--r-- | mm/nommu.c | 62 | 
1 files changed, 38 insertions, 24 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index ecd1f158548..4a852f6c570 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -13,8 +13,11 @@   *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>   */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +  #include <linux/export.h>  #include <linux/mm.h> +#include <linux/vmacache.h>  #include <linux/mman.h>  #include <linux/swap.h>  #include <linux/file.h> @@ -24,12 +27,14 @@  #include <linux/vmalloc.h>  #include <linux/blkdev.h>  #include <linux/backing-dev.h> +#include <linux/compiler.h>  #include <linux/mount.h>  #include <linux/personality.h>  #include <linux/security.h>  #include <linux/syscalls.h>  #include <linux/audit.h>  #include <linux/sched/sysctl.h> +#include <linux/printk.h>  #include <asm/uaccess.h>  #include <asm/tlb.h> @@ -60,6 +65,7 @@ unsigned long highest_memmap_pfn;  struct percpu_counter vm_committed_as;  int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */  int sysctl_overcommit_ratio = 50; /* default is 50% */ +unsigned long sysctl_overcommit_kbytes __read_mostly;  int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;  int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;  unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ @@ -295,7 +301,7 @@ long vwrite(char *buf, char *addr, unsigned long count)  		count = -(unsigned long) addr;  	memcpy(addr, buf, count); -	return(count); +	return count;  }  /* @@ -458,7 +464,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases);   * Implement a stub for vmalloc_sync_all() if the architecture chose not to   * have one.   */ -void  __attribute__((weak)) vmalloc_sync_all(void) +void __weak vmalloc_sync_all(void)  {  } @@ -767,16 +773,23 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)   */  static void delete_vma_from_mm(struct vm_area_struct *vma)  { +	int i;  	struct address_space *mapping;  	struct mm_struct *mm = vma->vm_mm; +	struct task_struct *curr = current;  	kenter("%p", vma);  	protect_vma(vma, 0);  	mm->map_count--; -	if (mm->mmap_cache == vma) -		mm->mmap_cache = NULL; +	for (i = 0; i < VMACACHE_SIZE; i++) { +		/* if the vma is cached, invalidate the entire cache */ +		if (curr->vmacache[i] == vma) { +			vmacache_invalidate(mm); +			break; +		} +	}  	/* remove the VMA from the mapping */  	if (vma->vm_file) { @@ -824,8 +837,8 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)  	struct vm_area_struct *vma;  	/* check the cache first */ -	vma = ACCESS_ONCE(mm->mmap_cache); -	if (vma && vma->vm_start <= addr && vma->vm_end > addr) +	vma = vmacache_find(mm, addr); +	if (likely(vma))  		return vma;  	/* trawl the list (there may be multiple mappings in which addr @@ -834,7 +847,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)  		if (vma->vm_start > addr)  			return NULL;  		if (vma->vm_end > addr) { -			mm->mmap_cache = vma; +			vmacache_update(addr, vma);  			return vma;  		}  	} @@ -873,8 +886,8 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,  	unsigned long end = addr + len;  	/* check the cache first */ -	vma = mm->mmap_cache; -	if (vma && vma->vm_start == addr && vma->vm_end == end) +	vma = vmacache_find_exact(mm, addr, end); +	if (vma)  		return vma;  	/* trawl the list (there may be multiple mappings in which addr @@ -885,7 +898,7 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,  		if (vma->vm_start > addr)  			return NULL;  		if (vma->vm_end == end) { -			mm->mmap_cache = vma; +			vmacache_update(addr, vma);  			return vma;  		}  	} @@ -937,7 +950,7 @@ static int validate_mmap_request(struct file *file,  		struct address_space *mapping;  		/* files must support mmap */ -		if (!file->f_op || !file->f_op->mmap) +		if (!file->f_op->mmap)  			return -ENODEV;  		/* work out if what we've got could possibly be shared @@ -994,7 +1007,7 @@ static int validate_mmap_request(struct file *file,  			    (file->f_mode & FMODE_WRITE))  				return -EACCES; -			if (locks_verify_locked(file_inode(file))) +			if (locks_verify_locked(file))  				return -EAGAIN;  			if (!(capabilities & BDI_CAP_MAP_DIRECT)) @@ -1002,8 +1015,7 @@ static int validate_mmap_request(struct file *file,  			/* we mustn't privatise shared mappings */  			capabilities &= ~BDI_CAP_MAP_COPY; -		} -		else { +		} else {  			/* we're going to read the file into private memory we  			 * allocate */  			if (!(capabilities & BDI_CAP_MAP_COPY)) @@ -1034,23 +1046,20 @@ static int validate_mmap_request(struct file *file,  		if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {  			if (prot & PROT_EXEC)  				return -EPERM; -		} -		else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { +		} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {  			/* handle implication of PROT_EXEC by PROT_READ */  			if (current->personality & READ_IMPLIES_EXEC) {  				if (capabilities & BDI_CAP_EXEC_MAP)  					prot |= PROT_EXEC;  			} -		} -		else if ((prot & PROT_READ) && +		} else if ((prot & PROT_READ) &&  			 (prot & PROT_EXEC) &&  			 !(capabilities & BDI_CAP_EXEC_MAP)  			 ) {  			/* backing file is not executable, try to copy */  			capabilities &= ~BDI_CAP_MAP_DIRECT;  		} -	} -	else { +	} else {  		/* anonymous mappings are always memory backed and can be  		 * privately mapped  		 */ @@ -1240,7 +1249,7 @@ error_free:  	return ret;  enomem: -	printk("Allocation of length %lu from process %d (%s) failed\n", +	pr_err("Allocation of length %lu from process %d (%s) failed\n",  	       len, current->pid, current->comm);  	show_free_areas(0);  	return -ENOMEM; @@ -1658,7 +1667,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)  	/* find the first potentially overlapping VMA */  	vma = find_vma(mm, start);  	if (!vma) { -		static int limit = 0; +		static int limit;  		if (limit < 5) {  			printk(KERN_WARNING  			       "munmap of memory not mmapped by process %d" @@ -1948,13 +1957,12 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)  		goto error;  	} -	allowed = totalram_pages * sysctl_overcommit_ratio / 100; +	allowed = vm_commit_limit();  	/*  	 * Reserve some 3% for root  	 */  	if (!cap_sys_admin)  		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); -	allowed += total_swap_pages;  	/*  	 * Don't let a single process grow so big a user can't recover @@ -1985,6 +1993,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)  }  EXPORT_SYMBOL(filemap_fault); +void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) +{ +	BUG(); +} +EXPORT_SYMBOL(filemap_map_pages); +  int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,  			     unsigned long size, pgoff_t pgoff)  {  | 
