diff options
| author | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2009-11-06 15:46:18 +0000 | 
|---|---|---|
| committer | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2009-11-06 15:46:18 +0000 | 
| commit | 330f28f691e9b349e34adcaf82b273cf061bb491 (patch) | |
| tree | fca3bfe41eff25ef19f576cef1979c68f6521af5 /kernel/workqueue.c | |
| parent | fe3e78e073d25308756f38019956061153267769 (diff) | |
| parent | 6fc786d5034ed7ce2d43c459211137de6d99dd28 (diff) | |
Merge branch 'for-2.6.32' into for-2.6.33
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 39 | 
1 files changed, 37 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index addfe2df93b..12328147132 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -640,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork,  EXPORT_SYMBOL(schedule_delayed_work);  /** + * flush_delayed_work - block until a dwork_struct's callback has terminated + * @dwork: the delayed work which is to be flushed + * + * Any timeout is cancelled, and any pending work is run immediately. + */ +void flush_delayed_work(struct delayed_work *dwork) +{ +	if (del_timer_sync(&dwork->timer)) { +		struct cpu_workqueue_struct *cwq; +		cwq = wq_per_cpu(keventd_wq, get_cpu()); +		__queue_work(cwq, &dwork->work); +		put_cpu(); +	} +	flush_work(&dwork->work); +} +EXPORT_SYMBOL(flush_delayed_work); + +/**   * schedule_delayed_work_on - queue work in global workqueue on CPU after delay   * @cpu: cpu to use   * @dwork: job to be done @@ -667,21 +685,38 @@ EXPORT_SYMBOL(schedule_delayed_work_on);  int schedule_on_each_cpu(work_func_t func)  {  	int cpu; +	int orig = -1;  	struct work_struct *works;  	works = alloc_percpu(struct work_struct);  	if (!works)  		return -ENOMEM; +	/* +	 * when running in keventd don't schedule a work item on itself. +	 * Can just call directly because the work queue is already bound. +	 * This also is faster. +	 * Make this a generic parameter for other workqueues? +	 */ +	if (current_is_keventd()) { +		orig = raw_smp_processor_id(); +		INIT_WORK(per_cpu_ptr(works, orig), func); +		func(per_cpu_ptr(works, orig)); +	} +  	get_online_cpus();  	for_each_online_cpu(cpu) {  		struct work_struct *work = per_cpu_ptr(works, cpu); +		if (cpu == orig) +			continue;  		INIT_WORK(work, func);  		schedule_work_on(cpu, work);  	} -	for_each_online_cpu(cpu) -		flush_work(per_cpu_ptr(works, cpu)); +	for_each_online_cpu(cpu) { +		if (cpu != orig) +			flush_work(per_cpu_ptr(works, cpu)); +	}  	put_online_cpus();  	free_percpu(works);  	return 0;  | 
