aboutsummaryrefslogtreecommitdiff
path: root/kernel/padata.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/padata.c')
-rw-r--r--kernel/padata.c103
1 files changed, 39 insertions, 64 deletions
diff --git a/kernel/padata.c b/kernel/padata.c
index 751019415d2..161402f0b51 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -1,6 +1,8 @@
/*
* padata.c - generic interface to process data streams in parallel
*
+ * See Documentation/padata.txt for an api documentation.
+ *
* Copyright (C) 2008, 2009 secunet Security Networks AG
* Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
*
@@ -18,7 +20,7 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/cpu.h>
@@ -29,7 +31,6 @@
#include <linux/sysfs.h>
#include <linux/rcupdate.h>
-#define MAX_SEQ_NR (INT_MAX - NR_CPUS)
#define MAX_OBJ_NUM 1000
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
@@ -43,18 +44,18 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
return target_cpu;
}
-static int padata_cpu_hash(struct padata_priv *padata)
+static int padata_cpu_hash(struct parallel_data *pd)
{
+ unsigned int seq_nr;
int cpu_index;
- struct parallel_data *pd;
-
- pd = padata->pd;
/*
* Hash the sequence numbers to the cpus by taking
* seq_nr mod. number of cpus in use.
*/
- cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask.pcpu);
+
+ seq_nr = atomic_inc_return(&pd->seq_nr);
+ cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
return padata_index_to_cpu(pd, cpu_index);
}
@@ -111,7 +112,7 @@ int padata_do_parallel(struct padata_instance *pinst,
rcu_read_lock_bh();
- pd = rcu_dereference(pinst->pd);
+ pd = rcu_dereference_bh(pinst->pd);
err = -EINVAL;
if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
@@ -132,12 +133,7 @@ int padata_do_parallel(struct padata_instance *pinst,
padata->pd = pd;
padata->cb_cpu = cb_cpu;
- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
- atomic_set(&pd->seq_nr, -1);
-
- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
-
- target_cpu = padata_cpu_hash(padata);
+ target_cpu = padata_cpu_hash(pd);
queue = per_cpu_ptr(pd->pqueue, target_cpu);
spin_lock(&queue->parallel.lock);
@@ -173,8 +169,8 @@ EXPORT_SYMBOL(padata_do_parallel);
static struct padata_priv *padata_get_next(struct parallel_data *pd)
{
int cpu, num_cpus;
- int next_nr, next_index;
- struct padata_parallel_queue *queue, *next_queue;
+ unsigned int next_nr, next_index;
+ struct padata_parallel_queue *next_queue;
struct padata_priv *padata;
struct padata_list *reorder;
@@ -189,14 +185,6 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
cpu = padata_index_to_cpu(pd, next_index);
next_queue = per_cpu_ptr(pd->pqueue, cpu);
- if (unlikely(next_nr > pd->max_seq_nr)) {
- next_nr = next_nr - pd->max_seq_nr - 1;
- next_index = next_nr % num_cpus;
- cpu = padata_index_to_cpu(pd, next_index);
- next_queue = per_cpu_ptr(pd->pqueue, cpu);
- pd->processed = 0;
- }
-
padata = NULL;
reorder = &next_queue->reorder;
@@ -205,8 +193,6 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
padata = list_entry(reorder->list.next,
struct padata_priv, list);
- BUG_ON(next_nr != padata->seq_nr);
-
spin_lock(&reorder->lock);
list_del_init(&padata->list);
atomic_dec(&pd->reorder_objects);
@@ -217,8 +203,7 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
goto out;
}
- queue = per_cpu_ptr(pd->pqueue, smp_processor_id());
- if (queue->cpu_index == next_queue->cpu_index) {
+ if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
padata = ERR_PTR(-ENODATA);
goto out;
}
@@ -230,6 +215,7 @@ out:
static void padata_reorder(struct parallel_data *pd)
{
+ int cb_cpu;
struct padata_priv *padata;
struct padata_serial_queue *squeue;
struct padata_instance *pinst = pd->pinst;
@@ -262,7 +248,7 @@ static void padata_reorder(struct parallel_data *pd)
/*
* This cpu has to do the parallel processing of the next
* object. It's waiting in the cpu's parallelization queue,
- * so exit imediately.
+ * so exit immediately.
*/
if (PTR_ERR(padata) == -ENODATA) {
del_timer(&pd->timer);
@@ -270,13 +256,14 @@ static void padata_reorder(struct parallel_data *pd)
return;
}
- squeue = per_cpu_ptr(pd->squeue, padata->cb_cpu);
+ cb_cpu = padata->cb_cpu;
+ squeue = per_cpu_ptr(pd->squeue, cb_cpu);
spin_lock(&squeue->serial.lock);
list_add_tail(&padata->list, &squeue->serial.list);
spin_unlock(&squeue->serial.lock);
- queue_work_on(padata->cb_cpu, pinst->wq, &squeue->work);
+ queue_work_on(cb_cpu, pinst->wq, &squeue->work);
}
spin_unlock_bh(&pd->lock);
@@ -284,7 +271,7 @@ static void padata_reorder(struct parallel_data *pd)
/*
* The next object that needs serialization might have arrived to
* the reorder queues in the meantime, we will be called again
- * from the timer function if noone else cares for it.
+ * from the timer function if no one else cares for it.
*/
if (atomic_read(&pd->reorder_objects)
&& !(pinst->flags & PADATA_RESET))
@@ -367,13 +354,13 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
return -ENOMEM;
- cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask);
+ cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
free_cpumask_var(pd->cpumask.cbcpu);
return -ENOMEM;
}
- cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask);
+ cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
return 0;
}
@@ -400,7 +387,7 @@ static void padata_init_squeues(struct parallel_data *pd)
/* Initialize all percpu queues used by parallel workers */
static void padata_init_pqueues(struct parallel_data *pd)
{
- int cpu_index, num_cpus, cpu;
+ int cpu_index, cpu;
struct padata_parallel_queue *pqueue;
cpu_index = 0;
@@ -415,9 +402,6 @@ static void padata_init_pqueues(struct parallel_data *pd)
INIT_WORK(&pqueue->work, padata_parallel_worker);
atomic_set(&pqueue->num_obj, 0);
}
-
- num_cpus = cpumask_weight(pd->cpumask.pcpu);
- pd->max_seq_nr = num_cpus ? (MAX_SEQ_NR / num_cpus) * num_cpus - 1 : 0;
}
/* Allocate and initialize the internal cpumask dependend resources. */
@@ -515,7 +499,7 @@ static void __padata_stop(struct padata_instance *pinst)
put_online_cpus();
}
-/* Replace the internal control stucture with a new one. */
+/* Replace the internal control structure with a new one. */
static void padata_replace(struct padata_instance *pinst,
struct parallel_data *pd_new)
{
@@ -580,7 +564,7 @@ EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
static bool padata_validate_cpumask(struct padata_instance *pinst,
const struct cpumask *cpumask)
{
- if (!cpumask_intersects(cpumask, cpu_active_mask)) {
+ if (!cpumask_intersects(cpumask, cpu_online_mask)) {
pinst->flags |= PADATA_INVALID;
return false;
}
@@ -694,7 +678,7 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
{
struct parallel_data *pd;
- if (cpumask_test_cpu(cpu, cpu_active_mask)) {
+ if (cpumask_test_cpu(cpu, cpu_online_mask)) {
pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
pinst->cpumask.cbcpu);
if (!pd)
@@ -762,13 +746,16 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
return -ENOMEM;
padata_replace(pinst, pd);
+
+ cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
+ cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
}
return 0;
}
/**
- * padata_remove_cpu - remove a cpu from the one or both(serial and paralell)
+ * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
* padata cpumasks.
*
* @pinst: padata instance
@@ -858,6 +845,8 @@ static int padata_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
if (!pinst_has_cpu(pinst, cpu))
break;
mutex_lock(&pinst->lock);
@@ -869,6 +858,8 @@ static int padata_cpu_callback(struct notifier_block *nfb,
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
if (!pinst_has_cpu(pinst, cpu))
break;
mutex_lock(&pinst->lock);
@@ -877,22 +868,6 @@ static int padata_cpu_callback(struct notifier_block *nfb,
if (err)
return notifier_from_errno(err);
break;
-
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- if (!pinst_has_cpu(pinst, cpu))
- break;
- mutex_lock(&pinst->lock);
- __padata_remove_cpu(pinst, cpu);
- mutex_unlock(&pinst->lock);
-
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- if (!pinst_has_cpu(pinst, cpu))
- break;
- mutex_lock(&pinst->lock);
- __padata_add_cpu(pinst, cpu);
- mutex_unlock(&pinst->lock);
}
return NOTIFY_OK;
@@ -1098,18 +1073,18 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq,
pinst->flags = 0;
-#ifdef CONFIG_HOTPLUG_CPU
- pinst->cpu_notifier.notifier_call = padata_cpu_callback;
- pinst->cpu_notifier.priority = 0;
- register_hotcpu_notifier(&pinst->cpu_notifier);
-#endif
-
put_online_cpus();
BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
kobject_init(&pinst->kobj, &padata_attr_type);
mutex_init(&pinst->lock);
+#ifdef CONFIG_HOTPLUG_CPU
+ pinst->cpu_notifier.notifier_call = padata_cpu_callback;
+ pinst->cpu_notifier.priority = 0;
+ register_hotcpu_notifier(&pinst->cpu_notifier);
+#endif
+
return pinst;
err_free_masks: