diff options
-rw-r--r-- | kernel/workqueue.c | 29 |
1 files changed, 27 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d79a18d0c42..dc7b8458e27 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1326,6 +1326,15 @@ static void idle_worker_rebind(struct worker *worker) /* we did our part, wait for rebind_workers() to finish up */ wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND)); + + /* + * rebind_workers() shouldn't finish until all workers passed the + * above WORKER_REBIND wait. Tell it when done. + */ + spin_lock_irq(&worker->pool->gcwq->lock); + if (!--worker->idle_rebind->cnt) + complete(&worker->idle_rebind->done); + spin_unlock_irq(&worker->pool->gcwq->lock); } /* @@ -1448,12 +1457,28 @@ retry: * be cleared inside idle_worker_rebind(). Clear and release. * Clearing %WORKER_REBIND from this foreign context is safe * because these workers are still guaranteed to be idle. + * + * We need to make sure all idle workers passed WORKER_REBIND wait + * in idle_worker_rebind() before returning; otherwise, workers can + * get stuck at the wait if hotplug cycle repeats. */ - for_each_worker_pool(pool, gcwq) - list_for_each_entry(worker, &pool->idle_list, entry) + idle_rebind.cnt = 1; + INIT_COMPLETION(idle_rebind.done); + + for_each_worker_pool(pool, gcwq) { + list_for_each_entry(worker, &pool->idle_list, entry) { worker->flags &= ~WORKER_REBIND; + idle_rebind.cnt++; + } + } wake_up_all(&gcwq->rebind_hold); + + if (--idle_rebind.cnt) { + spin_unlock_irq(&gcwq->lock); + wait_for_completion(&idle_rebind.done); + spin_lock_irq(&gcwq->lock); + } } static struct worker *alloc_worker(void) |