diff options
Diffstat (limited to 'kernel/fork.c')
| -rw-r--r-- | kernel/fork.c | 65 | 
1 files changed, 41 insertions, 24 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index ba0d1726132..b77fd559c78 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -76,6 +76,9 @@  #include <trace/events/sched.h> +#define CREATE_TRACE_POINTS +#include <trace/events/task.h> +  /*   * Protected counters by write_lock_irq(&tasklist_lock)   */ @@ -162,7 +165,6 @@ static void account_kernel_stack(struct thread_info *ti, int account)  void free_task(struct task_struct *tsk)  { -	prop_local_destroy_single(&tsk->dirties);  	account_kernel_stack(tsk->stack, -1);  	free_thread_info(tsk->stack);  	rt_mutex_debug_task_free(tsk); @@ -274,10 +276,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)  	tsk->stack = ti; -	err = prop_local_init_single(&tsk->dirties); -	if (err) -		goto out; -  	setup_thread_stack(tsk, orig);  	clear_user_return_notifier(tsk);  	clear_tsk_need_resched(tsk); @@ -649,6 +647,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)  }  EXPORT_SYMBOL_GPL(get_task_mm); +struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) +{ +	struct mm_struct *mm; +	int err; + +	err =  mutex_lock_killable(&task->signal->cred_guard_mutex); +	if (err) +		return ERR_PTR(err); + +	mm = get_task_mm(task); +	if (mm && mm != current->mm && +			!ptrace_may_access(task, mode)) { +		mmput(mm); +		mm = ERR_PTR(-EACCES); +	} +	mutex_unlock(&task->signal->cred_guard_mutex); + +	return mm; +} +  /* Please note the differences between mmput and mm_release.   * mmput is called whenever we stop holding onto a mm_struct,   * error success whatever. @@ -875,6 +893,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)  {  #ifdef CONFIG_BLOCK  	struct io_context *ioc = current->io_context; +	struct io_context *new_ioc;  	if (!ioc)  		return 0; @@ -886,11 +905,12 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)  		if (unlikely(!tsk->io_context))  			return -ENOMEM;  	} else if (ioprio_valid(ioc->ioprio)) { -		tsk->io_context = alloc_io_context(GFP_KERNEL, -1); -		if (unlikely(!tsk->io_context)) +		new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); +		if (unlikely(!new_ioc))  			return -ENOMEM; -		tsk->io_context->ioprio = ioc->ioprio; +		new_ioc->ioprio = ioc->ioprio; +		put_io_context(new_ioc);  	}  #endif  	return 0; @@ -977,7 +997,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)  	sched_autogroup_fork(sig);  #ifdef CONFIG_CGROUPS -	init_rwsem(&sig->threadgroup_fork_lock); +	init_rwsem(&sig->group_rwsem);  #endif  	sig->oom_adj = current->signal->oom_adj; @@ -997,7 +1017,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)  	new_flags |= PF_FORKNOEXEC;  	new_flags |= PF_STARTING;  	p->flags = new_flags; -	clear_freeze_flag(p);  }  SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) @@ -1028,8 +1047,8 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)   */  static void posix_cpu_timers_init(struct task_struct *tsk)  { -	tsk->cputime_expires.prof_exp = cputime_zero; -	tsk->cputime_expires.virt_exp = cputime_zero; +	tsk->cputime_expires.prof_exp = 0; +	tsk->cputime_expires.virt_exp = 0;  	tsk->cputime_expires.sched_exp = 0;  	INIT_LIST_HEAD(&tsk->cpu_timers[0]);  	INIT_LIST_HEAD(&tsk->cpu_timers[1]); @@ -1137,14 +1156,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,  	init_sigpending(&p->pending); -	p->utime = cputime_zero; -	p->stime = cputime_zero; -	p->gtime = cputime_zero; -	p->utimescaled = cputime_zero; -	p->stimescaled = cputime_zero; +	p->utime = p->stime = p->gtime = 0; +	p->utimescaled = p->stimescaled = 0;  #ifndef CONFIG_VIRT_CPU_ACCOUNTING -	p->prev_utime = cputime_zero; -	p->prev_stime = cputime_zero; +	p->prev_utime = p->prev_stime = 0;  #endif  #if defined(SPLIT_RSS_COUNTING)  	memset(&p->rss_stat, 0, sizeof(p->rss_stat)); @@ -1163,7 +1178,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,  	p->io_context = NULL;  	p->audit_context = NULL;  	if (clone_flags & CLONE_THREAD) -		threadgroup_fork_read_lock(current); +		threadgroup_change_begin(current);  	cgroup_fork(p);  #ifdef CONFIG_NUMA  	p->mempolicy = mpol_dup(p->mempolicy); @@ -1301,6 +1316,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,  	p->nr_dirtied = 0;  	p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); +	p->dirty_paused_when = 0;  	/*  	 * Ok, make it visible to the rest of the system. @@ -1378,8 +1394,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,  	proc_fork_connector(p);  	cgroup_post_fork(p);  	if (clone_flags & CLONE_THREAD) -		threadgroup_fork_read_unlock(current); +		threadgroup_change_end(current);  	perf_event_fork(p); + +	trace_task_newtask(p, clone_flags); +  	return p;  bad_fork_free_pid: @@ -1413,7 +1432,7 @@ bad_fork_cleanup_policy:  bad_fork_cleanup_cgroup:  #endif  	if (clone_flags & CLONE_THREAD) -		threadgroup_fork_read_unlock(current); +		threadgroup_change_end(current);  	cgroup_exit(p, cgroup_callbacks_done);  	delayacct_tsk_free(p);  	module_put(task_thread_info(p)->exec_domain->module); @@ -1528,8 +1547,6 @@ long do_fork(unsigned long clone_flags,  			init_completion(&vfork);  		} -		audit_finish_fork(p); -  		/*  		 * We set PF_STARTING at creation in case tracing wants to  		 * use this to distinguish a fully live task from one that  | 
