diff options
Diffstat (limited to 'mm/process_vm_access.c')
| -rw-r--r-- | mm/process_vm_access.c | 272 | 
1 files changed, 84 insertions, 188 deletions
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index fd26d043350..5077afcd9e1 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -23,129 +23,40 @@  /**   * process_vm_rw_pages - read/write pages from task specified - * @task: task to read/write from - * @mm: mm for task - * @process_pages: struct pages area that can store at least - *  nr_pages_to_copy struct page pointers - * @pa: address of page in task to start copying from/to + * @pages: array of pointers to pages we want to copy   * @start_offset: offset in page to start copying from/to   * @len: number of bytes to copy - * @lvec: iovec array specifying where to copy to/from - * @lvec_cnt: number of elements in iovec array - * @lvec_current: index in iovec array we are up to - * @lvec_offset: offset in bytes from current iovec iov_base we are up to + * @iter: where to copy to/from locally   * @vm_write: 0 means copy from, 1 means copy to - * @nr_pages_to_copy: number of pages to copy - * @bytes_copied: returns number of bytes successfully copied   * Returns 0 on success, error code otherwise   */ -static int process_vm_rw_pages(struct task_struct *task, -			       struct mm_struct *mm, -			       struct page **process_pages, -			       unsigned long pa, -			       unsigned long start_offset, -			       unsigned long len, -			       const struct iovec *lvec, -			       unsigned long lvec_cnt, -			       unsigned long *lvec_current, -			       size_t *lvec_offset, -			       int vm_write, -			       unsigned int nr_pages_to_copy, -			       ssize_t *bytes_copied) +static int process_vm_rw_pages(struct page **pages, +			       unsigned offset, +			       size_t len, +			       struct iov_iter *iter, +			       int vm_write)  { -	int pages_pinned; -	void *target_kaddr; -	int pgs_copied = 0; -	int j; -	int ret; -	ssize_t bytes_to_copy; -	ssize_t rc = 0; - -	*bytes_copied = 0; - -	/* Get the pages we're interested in */ -	down_read(&mm->mmap_sem); -	pages_pinned = get_user_pages(task, mm, pa, -				      nr_pages_to_copy, -				      vm_write, 0, process_pages, NULL); -	up_read(&mm->mmap_sem); - -	if (pages_pinned != nr_pages_to_copy) { -		rc = -EFAULT; -		goto end; -	} -  	/* Do the copy for each page */ -	for (pgs_copied = 0; -	     (pgs_copied < nr_pages_to_copy) && (*lvec_current < lvec_cnt); -	     pgs_copied++) { -		/* Make sure we have a non zero length iovec */ -		while (*lvec_current < lvec_cnt -		       && lvec[*lvec_current].iov_len == 0) -			(*lvec_current)++; -		if (*lvec_current == lvec_cnt) -			break; +	while (len && iov_iter_count(iter)) { +		struct page *page = *pages++; +		size_t copy = PAGE_SIZE - offset; +		size_t copied; -		/* -		 * Will copy smallest of: -		 * - bytes remaining in page -		 * - bytes remaining in destination iovec -		 */ -		bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset, -				      len - *bytes_copied); -		bytes_to_copy = min_t(ssize_t, bytes_to_copy, -				      lvec[*lvec_current].iov_len -				      - *lvec_offset); - -		target_kaddr = kmap(process_pages[pgs_copied]) + start_offset; - -		if (vm_write) -			ret = copy_from_user(target_kaddr, -					     lvec[*lvec_current].iov_base -					     + *lvec_offset, -					     bytes_to_copy); -		else -			ret = copy_to_user(lvec[*lvec_current].iov_base -					   + *lvec_offset, -					   target_kaddr, bytes_to_copy); -		kunmap(process_pages[pgs_copied]); -		if (ret) { -			*bytes_copied += bytes_to_copy - ret; -			pgs_copied++; -			rc = -EFAULT; -			goto end; -		} -		*bytes_copied += bytes_to_copy; -		*lvec_offset += bytes_to_copy; -		if (*lvec_offset == lvec[*lvec_current].iov_len) { -			/* -			 * Need to copy remaining part of page into the -			 * next iovec if there are any bytes left in page -			 */ -			(*lvec_current)++; -			*lvec_offset = 0; -			start_offset = (start_offset + bytes_to_copy) -				% PAGE_SIZE; -			if (start_offset) -				pgs_copied--; -		} else { -			start_offset = 0; -		} -	} +		if (copy > len) +			copy = len; -end: -	if (vm_write) { -		for (j = 0; j < pages_pinned; j++) { -			if (j < pgs_copied) -				set_page_dirty_lock(process_pages[j]); -			put_page(process_pages[j]); +		if (vm_write) { +			copied = copy_page_from_iter(page, offset, copy, iter); +			set_page_dirty_lock(page); +		} else { +			copied = copy_page_to_iter(page, offset, copy, iter);  		} -	} else { -		for (j = 0; j < pages_pinned; j++) -			put_page(process_pages[j]); +		len -= copied; +		if (copied < copy && iov_iter_count(iter)) +			return -EFAULT; +		offset = 0;  	} - -	return rc; +	return 0;  }  /* Maximum number of pages kmalloc'd to hold struct page's during copy */ @@ -155,67 +66,60 @@ end:   * process_vm_rw_single_vec - read/write pages from task specified   * @addr: start memory address of target process   * @len: size of area to copy to/from - * @lvec: iovec array specifying where to copy to/from locally - * @lvec_cnt: number of elements in iovec array - * @lvec_current: index in iovec array we are up to - * @lvec_offset: offset in bytes from current iovec iov_base we are up to + * @iter: where to copy to/from locally   * @process_pages: struct pages area that can store at least   *  nr_pages_to_copy struct page pointers   * @mm: mm for task   * @task: task to read/write from   * @vm_write: 0 means copy from, 1 means copy to - * @bytes_copied: returns number of bytes successfully copied   * Returns 0 on success or on failure error code   */  static int process_vm_rw_single_vec(unsigned long addr,  				    unsigned long len, -				    const struct iovec *lvec, -				    unsigned long lvec_cnt, -				    unsigned long *lvec_current, -				    size_t *lvec_offset, +				    struct iov_iter *iter,  				    struct page **process_pages,  				    struct mm_struct *mm,  				    struct task_struct *task, -				    int vm_write, -				    ssize_t *bytes_copied) +				    int vm_write)  {  	unsigned long pa = addr & PAGE_MASK;  	unsigned long start_offset = addr - pa;  	unsigned long nr_pages; -	ssize_t bytes_copied_loop;  	ssize_t rc = 0; -	unsigned long nr_pages_copied = 0; -	unsigned long nr_pages_to_copy;  	unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES  		/ sizeof(struct pages *); -	*bytes_copied = 0; -  	/* Work out address and page range required */  	if (len == 0)  		return 0;  	nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; -	while ((nr_pages_copied < nr_pages) && (*lvec_current < lvec_cnt)) { -		nr_pages_to_copy = min(nr_pages - nr_pages_copied, -				       max_pages_per_loop); +	while (!rc && nr_pages && iov_iter_count(iter)) { +		int pages = min(nr_pages, max_pages_per_loop); +		size_t bytes; -		rc = process_vm_rw_pages(task, mm, process_pages, pa, -					 start_offset, len, -					 lvec, lvec_cnt, -					 lvec_current, lvec_offset, -					 vm_write, nr_pages_to_copy, -					 &bytes_copied_loop); -		start_offset = 0; -		*bytes_copied += bytes_copied_loop; +		/* Get the pages we're interested in */ +		down_read(&mm->mmap_sem); +		pages = get_user_pages(task, mm, pa, pages, +				      vm_write, 0, process_pages, NULL); +		up_read(&mm->mmap_sem); -		if (rc < 0) { -			return rc; -		} else { -			len -= bytes_copied_loop; -			nr_pages_copied += nr_pages_to_copy; -			pa += nr_pages_to_copy * PAGE_SIZE; -		} +		if (pages <= 0) +			return -EFAULT; + +		bytes = pages * PAGE_SIZE - start_offset; +		if (bytes > len) +			bytes = len; + +		rc = process_vm_rw_pages(process_pages, +					 start_offset, bytes, iter, +					 vm_write); +		len -= bytes; +		start_offset = 0; +		nr_pages -= pages; +		pa += pages * PAGE_SIZE; +		while (pages) +			put_page(process_pages[--pages]);  	}  	return rc; @@ -228,8 +132,7 @@ static int process_vm_rw_single_vec(unsigned long addr,  /**   * process_vm_rw_core - core of reading/writing pages from task specified   * @pid: PID of process to read/write from/to - * @lvec: iovec array specifying where to copy to/from locally - * @liovcnt: size of lvec array + * @iter: where to copy to/from locally   * @rvec: iovec array specifying where to copy to/from in the other process   * @riovcnt: size of rvec array   * @flags: currently unused @@ -238,8 +141,7 @@ static int process_vm_rw_single_vec(unsigned long addr,   *  return less bytes than expected if an error occurs during the copying   *  process.   */ -static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, -				  unsigned long liovcnt, +static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,  				  const struct iovec *rvec,  				  unsigned long riovcnt,  				  unsigned long flags, int vm_write) @@ -250,13 +152,10 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,  	struct mm_struct *mm;  	unsigned long i;  	ssize_t rc = 0; -	ssize_t bytes_copied_loop; -	ssize_t bytes_copied = 0;  	unsigned long nr_pages = 0;  	unsigned long nr_pages_iov; -	unsigned long iov_l_curr_idx = 0; -	size_t iov_l_curr_offset = 0;  	ssize_t iov_len; +	size_t total_len = iov_iter_count(iter);  	/*  	 * Work out how many pages of struct pages we're going to need @@ -310,24 +209,20 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,  		goto put_task_struct;  	} -	for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { +	for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)  		rc = process_vm_rw_single_vec(  			(unsigned long)rvec[i].iov_base, rvec[i].iov_len, -			lvec, liovcnt, &iov_l_curr_idx, &iov_l_curr_offset, -			process_pages, mm, task, vm_write, &bytes_copied_loop); -		bytes_copied += bytes_copied_loop; -		if (rc != 0) { -			/* If we have managed to copy any data at all then -			   we return the number of bytes copied. Otherwise -			   we return the error code */ -			if (bytes_copied) -				rc = bytes_copied; -			goto put_mm; -		} -	} +			iter, process_pages, mm, task, vm_write); + +	/* copied = space before - space after */ +	total_len -= iov_iter_count(iter); + +	/* If we have managed to copy any data at all then +	   we return the number of bytes copied. Otherwise +	   we return the error code */ +	if (total_len) +		rc = total_len; -	rc = bytes_copied; -put_mm:  	mmput(mm);  put_task_struct: @@ -363,6 +258,7 @@ static ssize_t process_vm_rw(pid_t pid,  	struct iovec iovstack_r[UIO_FASTIOV];  	struct iovec *iov_l = iovstack_l;  	struct iovec *iov_r = iovstack_r; +	struct iov_iter iter;  	ssize_t rc;  	if (flags != 0) @@ -378,13 +274,14 @@ static ssize_t process_vm_rw(pid_t pid,  	if (rc <= 0)  		goto free_iovecs; +	iov_iter_init(&iter, vm_write ? WRITE : READ, iov_l, liovcnt, rc); +  	rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,  				   iovstack_r, &iov_r);  	if (rc <= 0)  		goto free_iovecs; -	rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags, -				vm_write); +	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);  free_iovecs:  	if (iov_r != iovstack_r) @@ -412,7 +309,7 @@ SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,  #ifdef CONFIG_COMPAT -asmlinkage ssize_t +static ssize_t  compat_process_vm_rw(compat_pid_t pid,  		     const struct compat_iovec __user *lvec,  		     unsigned long liovcnt, @@ -424,6 +321,7 @@ compat_process_vm_rw(compat_pid_t pid,  	struct iovec iovstack_r[UIO_FASTIOV];  	struct iovec *iov_l = iovstack_l;  	struct iovec *iov_r = iovstack_r; +	struct iov_iter iter;  	ssize_t rc = -EFAULT;  	if (flags != 0) @@ -439,14 +337,14 @@ compat_process_vm_rw(compat_pid_t pid,  						  &iov_l);  	if (rc <= 0)  		goto free_iovecs; +	iov_iter_init(&iter, vm_write ? WRITE : READ, iov_l, liovcnt, rc);  	rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,  					  UIO_FASTIOV, iovstack_r,  					  &iov_r);  	if (rc <= 0)  		goto free_iovecs; -	rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags, -			   vm_write); +	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);  free_iovecs:  	if (iov_r != iovstack_r) @@ -456,25 +354,23 @@ free_iovecs:  	return rc;  } -asmlinkage ssize_t -compat_sys_process_vm_readv(compat_pid_t pid, -			    const struct compat_iovec __user *lvec, -			    unsigned long liovcnt, -			    const struct compat_iovec __user *rvec, -			    unsigned long riovcnt, -			    unsigned long flags) +COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid, +		       const struct compat_iovec __user *, lvec, +		       compat_ulong_t, liovcnt, +		       const struct compat_iovec __user *, rvec, +		       compat_ulong_t, riovcnt, +		       compat_ulong_t, flags)  {  	return compat_process_vm_rw(pid, lvec, liovcnt, rvec,  				    riovcnt, flags, 0);  } -asmlinkage ssize_t -compat_sys_process_vm_writev(compat_pid_t pid, -			     const struct compat_iovec __user *lvec, -			     unsigned long liovcnt, -			     const struct compat_iovec __user *rvec, -			     unsigned long riovcnt, -			     unsigned long flags) +COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid, +		       const struct compat_iovec __user *, lvec, +		       compat_ulong_t, liovcnt, +		       const struct compat_iovec __user *, rvec, +		       compat_ulong_t, riovcnt, +		       compat_ulong_t, flags)  {  	return compat_process_vm_rw(pid, lvec, liovcnt, rvec,  				    riovcnt, flags, 1);  | 
