diff options
| author | Arnd Bergmann <arnd@arndb.de> | 2012-03-15 16:11:36 +0000 | 
|---|---|---|
| committer | Arnd Bergmann <arnd@arndb.de> | 2012-03-15 16:11:40 +0000 | 
| commit | 86a30bece9ad4cc91c393a829a7b128291e0fb65 (patch) | |
| tree | 6cb3e6ad413d74118535f77436056c8d3cfae0eb /kernel | |
| parent | 243d58ec5792299fa212d05a4113c0ebac2df6a3 (diff) | |
| parent | a323f66439c04d1c3ae4dc20cc2d44d52ee43c9f (diff) | |
Merge branch 'fixes-non-critical' of git://github.com/hzhuang1/linux into next/maintainers
* 'fixes-non-critical' of git://github.com/hzhuang1/linux:
  MAINTAINERS: update MAINTAINERS email entry
  MAINTAINERS: update maintainer entry for pxa/hx4700
(update to v3.3-rc7)
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/fork.c | 60 | ||||
| -rw-r--r-- | kernel/hung_task.c | 11 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 44 | ||||
| -rw-r--r-- | kernel/kprobes.c | 12 | ||||
| -rw-r--r-- | kernel/printk.c | 6 | ||||
| -rw-r--r-- | kernel/sched/core.c | 4 | 
6 files changed, 99 insertions, 38 deletions
| diff --git a/kernel/fork.c b/kernel/fork.c index e2cd3e2a5ae..26a7a6707fa 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -668,6 +668,38 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)  	return mm;  } +static void complete_vfork_done(struct task_struct *tsk) +{ +	struct completion *vfork; + +	task_lock(tsk); +	vfork = tsk->vfork_done; +	if (likely(vfork)) { +		tsk->vfork_done = NULL; +		complete(vfork); +	} +	task_unlock(tsk); +} + +static int wait_for_vfork_done(struct task_struct *child, +				struct completion *vfork) +{ +	int killed; + +	freezer_do_not_count(); +	killed = wait_for_completion_killable(vfork); +	freezer_count(); + +	if (killed) { +		task_lock(child); +		child->vfork_done = NULL; +		task_unlock(child); +	} + +	put_task_struct(child); +	return killed; +} +  /* Please note the differences between mmput and mm_release.   * mmput is called whenever we stop holding onto a mm_struct,   * error success whatever. @@ -683,8 +715,6 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)   */  void mm_release(struct task_struct *tsk, struct mm_struct *mm)  { -	struct completion *vfork_done = tsk->vfork_done; -  	/* Get rid of any futexes when releasing the mm */  #ifdef CONFIG_FUTEX  	if (unlikely(tsk->robust_list)) { @@ -704,17 +734,15 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)  	/* Get rid of any cached register state */  	deactivate_mm(tsk, mm); -	/* notify parent sleeping on vfork() */ -	if (vfork_done) { -		tsk->vfork_done = NULL; -		complete(vfork_done); -	} +	if (tsk->vfork_done) +		complete_vfork_done(tsk);  	/*  	 * If we're exiting normally, clear a user-space tid field if  	 * requested.  We leave this alone when dying by signal, to leave  	 * the value intact in a core dump, and to save the unnecessary -	 * trouble otherwise.  Userland only wants this done for a sys_exit. +	 * trouble, say, a killed vfork parent shouldn't touch this mm. +	 * Userland only wants this done for a sys_exit.  	 */  	if (tsk->clear_child_tid) {  		if (!(tsk->flags & PF_SIGNALED) && @@ -1018,7 +1046,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)  	new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);  	new_flags |= PF_FORKNOEXEC; -	new_flags |= PF_STARTING;  	p->flags = new_flags;  } @@ -1548,16 +1575,9 @@ long do_fork(unsigned long clone_flags,  		if (clone_flags & CLONE_VFORK) {  			p->vfork_done = &vfork;  			init_completion(&vfork); +			get_task_struct(p);  		} -		/* -		 * We set PF_STARTING at creation in case tracing wants to -		 * use this to distinguish a fully live task from one that -		 * hasn't finished SIGSTOP raising yet.  Now we clear it -		 * and set the child going. -		 */ -		p->flags &= ~PF_STARTING; -  		wake_up_new_task(p);  		/* forking complete and child started to run, tell ptracer */ @@ -1565,10 +1585,8 @@ long do_fork(unsigned long clone_flags,  			ptrace_event(trace, nr);  		if (clone_flags & CLONE_VFORK) { -			freezer_do_not_count(); -			wait_for_completion(&vfork); -			freezer_count(); -			ptrace_event(PTRACE_EVENT_VFORK_DONE, nr); +			if (!wait_for_vfork_done(p, &vfork)) +				ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);  		}  	} else {  		nr = PTR_ERR(p); diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 2e48ec0c2e9..c21449f85a2 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -119,15 +119,20 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)   * For preemptible RCU it is sufficient to call rcu_read_unlock in order   * to exit the grace period. For classic RCU, a reschedule is required.   */ -static void rcu_lock_break(struct task_struct *g, struct task_struct *t) +static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)  { +	bool can_cont; +  	get_task_struct(g);  	get_task_struct(t);  	rcu_read_unlock();  	cond_resched();  	rcu_read_lock(); +	can_cont = pid_alive(g) && pid_alive(t);  	put_task_struct(t);  	put_task_struct(g); + +	return can_cont;  }  /* @@ -154,9 +159,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)  			goto unlock;  		if (!--batch_count) {  			batch_count = HUNG_TASK_BATCHING; -			rcu_lock_break(g, t); -			/* Exit if t or g was unhashed during refresh. */ -			if (t->state == TASK_DEAD || g->state == TASK_DEAD) +			if (!rcu_lock_break(g, t))  				goto unlock;  		}  		/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 32313c08444..0f0d4704ddd 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -985,6 +985,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)  		/* add new interrupt at end of irq queue */  		do { +			/* +			 * Or all existing action->thread_mask bits, +			 * so we can find the next zero bit for this +			 * new action. +			 */  			thread_mask |= old->thread_mask;  			old_ptr = &old->next;  			old = *old_ptr; @@ -993,14 +998,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)  	}  	/* -	 * Setup the thread mask for this irqaction. Unlikely to have -	 * 32 resp 64 irqs sharing one line, but who knows. +	 * Setup the thread mask for this irqaction for ONESHOT. For +	 * !ONESHOT irqs the thread mask is 0 so we can avoid a +	 * conditional in irq_wake_thread().  	 */ -	if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { -		ret = -EBUSY; -		goto out_mask; +	if (new->flags & IRQF_ONESHOT) { +		/* +		 * Unlikely to have 32 resp 64 irqs sharing one line, +		 * but who knows. +		 */ +		if (thread_mask == ~0UL) { +			ret = -EBUSY; +			goto out_mask; +		} +		/* +		 * The thread_mask for the action is or'ed to +		 * desc->thread_active to indicate that the +		 * IRQF_ONESHOT thread handler has been woken, but not +		 * yet finished. The bit is cleared when a thread +		 * completes. When all threads of a shared interrupt +		 * line have completed desc->threads_active becomes +		 * zero and the interrupt line is unmasked. See +		 * handle.c:irq_wake_thread() for further information. +		 * +		 * If no thread is woken by primary (hard irq context) +		 * interrupt handlers, then desc->threads_active is +		 * also checked for zero to unmask the irq line in the +		 * affected hard irq flow handlers +		 * (handle_[fasteoi|level]_irq). +		 * +		 * The new action gets the first zero bit of +		 * thread_mask assigned. See the loop above which or's +		 * all existing action->thread_mask bits. +		 */ +		new->thread_mask = 1 << ffz(thread_mask);  	} -	new->thread_mask = 1 << ffz(thread_mask);  	if (!shared) {  		init_waitqueue_head(&desc->wait_for_threads); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 9788c0ec6f4..c62b8546cc9 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1334,8 +1334,10 @@ int __kprobes register_kprobe(struct kprobe *p)  	if (!kernel_text_address((unsigned long) p->addr) ||  	    in_kprobes_functions((unsigned long) p->addr) ||  	    ftrace_text_reserved(p->addr, p->addr) || -	    jump_label_text_reserved(p->addr, p->addr)) -		goto fail_with_jump_label; +	    jump_label_text_reserved(p->addr, p->addr)) { +		ret = -EINVAL; +		goto cannot_probe; +	}  	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */  	p->flags &= KPROBE_FLAG_DISABLED; @@ -1352,7 +1354,7 @@ int __kprobes register_kprobe(struct kprobe *p)  		 * its code to prohibit unexpected unloading.  		 */  		if (unlikely(!try_module_get(probed_mod))) -			goto fail_with_jump_label; +			goto cannot_probe;  		/*  		 * If the module freed .init.text, we couldn't insert @@ -1361,7 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)  		if (within_module_init((unsigned long)p->addr, probed_mod) &&  		    probed_mod->state != MODULE_STATE_COMING) {  			module_put(probed_mod); -			goto fail_with_jump_label; +			goto cannot_probe;  		}  		/* ret will be updated by following code */  	} @@ -1409,7 +1411,7 @@ out:  	return ret; -fail_with_jump_label: +cannot_probe:  	preempt_enable();  	jump_label_unlock();  	return ret; diff --git a/kernel/printk.c b/kernel/printk.c index 13c0a1143f4..32690a0b7a1 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -702,6 +702,9 @@ static bool printk_time = 0;  #endif  module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); +static bool always_kmsg_dump; +module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR); +  /* Check if we have any console registered that can be called early in boot. */  static int have_callable_console(void)  { @@ -1732,6 +1735,9 @@ void kmsg_dump(enum kmsg_dump_reason reason)  	unsigned long l1, l2;  	unsigned long flags; +	if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump) +		return; +  	/* Theoretically, the log could move on after we do this, but  	   there's not a lot we can do about that. The new messages  	   will overwrite the start of what we dump. */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 33a0676ea74..b342f57879e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6728,7 +6728,7 @@ int __init sched_create_sysfs_power_savings_entries(struct device *dev)  static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,  			     void *hcpu)  { -	switch (action) { +	switch (action & ~CPU_TASKS_FROZEN) {  	case CPU_ONLINE:  	case CPU_DOWN_FAILED:  		cpuset_update_active_cpus(); @@ -6741,7 +6741,7 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,  static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,  			       void *hcpu)  { -	switch (action) { +	switch (action & ~CPU_TASKS_FROZEN) {  	case CPU_DOWN_PREPARE:  		cpuset_update_active_cpus();  		return NOTIFY_OK; | 
