diff options
| author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-05-15 10:26:50 -0400 | 
|---|---|---|
| committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-05-15 10:26:50 -0400 | 
| commit | 12e04ffcd93b25dfd726d46338c2ee7d23de556e (patch) | |
| tree | f91479a62805619168994fd3ee55e3ffa23fc24e /kernel/kthread.c | |
| parent | 9eff37a8713939f218ab8bf0dc93f1d67af7b8b4 (diff) | |
| parent | f722406faae2d073cc1d01063d1123c35425939e (diff) | |
Merge tag 'v3.10-rc1' into stable/for-linus-3.10
Linux 3.10-rc1
* tag 'v3.10-rc1': (12273 commits)
  Linux 3.10-rc1
  [SCSI] qla2xxx: Update firmware link in Kconfig file.
  [SCSI] iscsi class, qla4xxx: fix sess/conn refcounting when find fns are used
  [SCSI] sas: unify the pointlessly separated enums sas_dev_type and sas_device_type
  [SCSI] pm80xx: thermal, sas controller config and error handling update
  [SCSI] pm80xx: NCQ error handling changes
  [SCSI] pm80xx: WWN Modification for PM8081/88/89 controllers
  [SCSI] pm80xx: Changed module name and debug messages update
  [SCSI] pm80xx: Firmware flash memory free fix, with addition of new memory region for it
  [SCSI] pm80xx: SPC new firmware changes for device id 0x8081 alone
  [SCSI] pm80xx: Added SPCv/ve specific hardware functionalities and relevant changes in common files
  [SCSI] pm80xx: MSI-X implementation for using 64 interrupts
  [SCSI] pm80xx: Updated common functions common for SPC and SPCv/ve
  [SCSI] pm80xx: Multiple inbound/outbound queue configuration
  [SCSI] pm80xx: Added SPCv/ve specific ids, variables and modify for SPC
  [SCSI] lpfc: fix up Kconfig dependencies
  [SCSI] Handle MLQUEUE busy response in scsi_send_eh_cmnd
  dm cache: set config value
  dm cache: move config fns
  dm thin: generate event when metadata threshold passed
  ...
Diffstat (limited to 'kernel/kthread.c')
| -rw-r--r-- | kernel/kthread.c | 111 | 
1 files changed, 67 insertions, 44 deletions
| diff --git a/kernel/kthread.c b/kernel/kthread.c index 691dc2ef9ba..760e86df8c2 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -17,6 +17,7 @@  #include <linux/slab.h>  #include <linux/freezer.h>  #include <linux/ptrace.h> +#include <linux/uaccess.h>  #include <trace/events/sched.h>  static DEFINE_SPINLOCK(kthread_create_lock); @@ -52,8 +53,21 @@ enum KTHREAD_BITS {  	KTHREAD_IS_PARKED,  }; -#define to_kthread(tsk)	\ -	container_of((tsk)->vfork_done, struct kthread, exited) +#define __to_kthread(vfork)	\ +	container_of(vfork, struct kthread, exited) + +static inline struct kthread *to_kthread(struct task_struct *k) +{ +	return __to_kthread(k->vfork_done); +} + +static struct kthread *to_live_kthread(struct task_struct *k) +{ +	struct completion *vfork = ACCESS_ONCE(k->vfork_done); +	if (likely(vfork)) +		return __to_kthread(vfork); +	return NULL; +}  /**   * kthread_should_stop - should this kthread return now? @@ -122,14 +136,32 @@ void *kthread_data(struct task_struct *task)  	return to_kthread(task)->data;  } +/** + * probe_kthread_data - speculative version of kthread_data() + * @task: possible kthread task in question + * + * @task could be a kthread task.  Return the data value specified when it + * was created if accessible.  If @task isn't a kthread task or its data is + * inaccessible for any reason, %NULL is returned.  This function requires + * that @task itself is safe to dereference. + */ +void *probe_kthread_data(struct task_struct *task) +{ +	struct kthread *kthread = to_kthread(task); +	void *data = NULL; + +	probe_kernel_read(&data, &kthread->data, sizeof(data)); +	return data; +} +  static void __kthread_parkme(struct kthread *self)  { -	__set_current_state(TASK_INTERRUPTIBLE); +	__set_current_state(TASK_PARKED);  	while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {  		if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))  			complete(&self->parked);  		schedule(); -		__set_current_state(TASK_INTERRUPTIBLE); +		__set_current_state(TASK_PARKED);  	}  	clear_bit(KTHREAD_IS_PARKED, &self->flags);  	__set_current_state(TASK_RUNNING); @@ -256,11 +288,16 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),  }  EXPORT_SYMBOL(kthread_create_on_node); -static void __kthread_bind(struct task_struct *p, unsigned int cpu) +static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)  { +	/* Must have done schedule() in kthread() before we set_task_cpu */ +	if (!wait_task_inactive(p, state)) { +		WARN_ON(1); +		return; +	}  	/* It's safe because the task is inactive. */  	do_set_cpus_allowed(p, cpumask_of(cpu)); -	p->flags |= PF_THREAD_BOUND; +	p->flags |= PF_NO_SETAFFINITY;  }  /** @@ -274,12 +311,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu)   */  void kthread_bind(struct task_struct *p, unsigned int cpu)  { -	/* Must have done schedule() in kthread() before we set_task_cpu */ -	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { -		WARN_ON(1); -		return; -	} -	__kthread_bind(p, cpu); +	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);  }  EXPORT_SYMBOL(kthread_bind); @@ -311,17 +343,20 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),  	return p;  } -static struct kthread *task_get_live_kthread(struct task_struct *k) +static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)  { -	struct kthread *kthread; - -	get_task_struct(k); -	kthread = to_kthread(k); -	/* It might have exited */ -	barrier(); -	if (k->vfork_done != NULL) -		return kthread; -	return NULL; +	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); +	/* +	 * We clear the IS_PARKED bit here as we don't wait +	 * until the task has left the park code. So if we'd +	 * park before that happens we'd see the IS_PARKED bit +	 * which might be about to be cleared. +	 */ +	if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { +		if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) +			__kthread_bind(k, kthread->cpu, TASK_PARKED); +		wake_up_state(k, TASK_PARKED); +	}  }  /** @@ -334,23 +369,10 @@ static struct kthread *task_get_live_kthread(struct task_struct *k)   */  void kthread_unpark(struct task_struct *k)  { -	struct kthread *kthread = task_get_live_kthread(k); +	struct kthread *kthread = to_live_kthread(k); -	if (kthread) { -		clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); -		/* -		 * We clear the IS_PARKED bit here as we don't wait -		 * until the task has left the park code. So if we'd -		 * park before that happens we'd see the IS_PARKED bit -		 * which might be about to be cleared. -		 */ -		if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { -			if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) -				__kthread_bind(k, kthread->cpu); -			wake_up_process(k); -		} -	} -	put_task_struct(k); +	if (kthread) +		__kthread_unpark(k, kthread);  }  /** @@ -367,7 +389,7 @@ void kthread_unpark(struct task_struct *k)   */  int kthread_park(struct task_struct *k)  { -	struct kthread *kthread = task_get_live_kthread(k); +	struct kthread *kthread = to_live_kthread(k);  	int ret = -ENOSYS;  	if (kthread) { @@ -380,7 +402,6 @@ int kthread_park(struct task_struct *k)  		}  		ret = 0;  	} -	put_task_struct(k);  	return ret;  } @@ -401,21 +422,23 @@ int kthread_park(struct task_struct *k)   */  int kthread_stop(struct task_struct *k)  { -	struct kthread *kthread = task_get_live_kthread(k); +	struct kthread *kthread;  	int ret;  	trace_sched_kthread_stop(k); + +	get_task_struct(k); +	kthread = to_live_kthread(k);  	if (kthread) {  		set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); -		clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); +		__kthread_unpark(k, kthread);  		wake_up_process(k);  		wait_for_completion(&kthread->exited);  	}  	ret = k->exit_code; -  	put_task_struct(k); -	trace_sched_kthread_stop_ret(ret); +	trace_sched_kthread_stop_ret(ret);  	return ret;  }  EXPORT_SYMBOL(kthread_stop); | 
