diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 19 | 
1 files changed, 15 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4aa9f5bc6b2..ee8e29a2320 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -296,7 +296,7 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);  static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];  struct workqueue_struct *system_wq __read_mostly; -EXPORT_SYMBOL_GPL(system_wq); +EXPORT_SYMBOL(system_wq);  struct workqueue_struct *system_highpri_wq __read_mostly;  EXPORT_SYMBOL_GPL(system_highpri_wq);  struct workqueue_struct *system_long_wq __read_mostly; @@ -1411,7 +1411,7 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,  	local_irq_restore(flags);  	return ret;  } -EXPORT_SYMBOL_GPL(queue_work_on); +EXPORT_SYMBOL(queue_work_on);  void delayed_work_timer_fn(unsigned long __data)  { @@ -1485,7 +1485,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,  	local_irq_restore(flags);  	return ret;  } -EXPORT_SYMBOL_GPL(queue_delayed_work_on); +EXPORT_SYMBOL(queue_delayed_work_on);  /**   * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU @@ -2059,6 +2059,7 @@ static bool manage_workers(struct worker *worker)  	if (unlikely(!mutex_trylock(&pool->manager_mutex))) {  		spin_unlock_irq(&pool->lock);  		mutex_lock(&pool->manager_mutex); +		spin_lock_irq(&pool->lock);  		ret = true;  	} @@ -4311,6 +4312,12 @@ bool current_is_workqueue_rescuer(void)   * no synchronization around this function and the test result is   * unreliable and only useful as advisory hints or for debugging.   * + * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. + * Note that both per-cpu and unbound workqueues may be associated with + * multiple pool_workqueues which have separate congested states.  A + * workqueue being congested on one CPU doesn't mean the workqueue is also + * contested on other CPUs / NUMA nodes. + *   * RETURNS:   * %true if congested, %false otherwise.   */ @@ -4321,6 +4328,9 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)  	rcu_read_lock_sched(); +	if (cpu == WORK_CPU_UNBOUND) +		cpu = smp_processor_id(); +  	if (!(wq->flags & WQ_UNBOUND))  		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);  	else @@ -4895,7 +4905,8 @@ static void __init wq_numa_init(void)  	BUG_ON(!tbl);  	for_each_node(node) -		BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node)); +		BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, +				node_online(node) ? node : NUMA_NO_NODE));  	for_each_possible_cpu(cpu) {  		node = cpu_to_node(cpu);  | 
