aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cgroup.c15
-rw-r--r--kernel/exit.c14
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/kprobes.c34
-rw-r--r--kernel/ksysfs.c8
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/lockdep.c18
-rw-r--r--kernel/notifier.c6
-rw-r--r--kernel/padata.c690
-rw-r--r--kernel/perf_event.c638
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/power/Kconfig19
-rw-r--r--kernel/power/main.c31
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--kernel/power/swap.c4
-rw-r--r--kernel/power/swsusp.c58
-rw-r--r--kernel/power/user.c23
-rw-r--r--kernel/ptrace.c88
-rw-r--r--kernel/rcupdate.c29
-rw-r--r--kernel/rcutorture.c94
-rw-r--r--kernel/rcutree.c268
-rw-r--r--kernel/rcutree.h61
-rw-r--r--kernel/rcutree_plugin.h229
-rw-r--r--kernel/rcutree_trace.c14
-rw-r--r--kernel/resource.c57
-rw-r--r--kernel/sched.c2206
-rw-r--r--kernel/sched_cpupri.c4
-rw-r--r--kernel/sched_fair.c1699
-rw-r--r--kernel/sched_idletask.c23
-rw-r--r--kernel/sched_rt.c54
-rw-r--r--kernel/smp.c8
-rw-r--r--kernel/srcu.c52
-rw-r--r--kernel/sys.c7
-rw-r--r--kernel/trace/Kconfig11
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/ftrace.c105
-rw-r--r--kernel/trace/trace.c144
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--kernel/trace/trace_branch.c19
-rw-r--r--kernel/trace/trace_event_profile.c52
-rw-r--r--kernel/trace/trace_events.c81
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_export.c87
-rw-r--r--kernel/trace/trace_functions_graph.c78
-rw-r--r--kernel/trace/trace_kprobe.c304
-rw-r--r--kernel/trace/trace_syscalls.c189
-rw-r--r--kernel/user.c305
48 files changed, 4280 insertions, 3570 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 864ff75d65f..6aebdeb2aa3 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
+obj-$(CONFIG_PADATA) += padata.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index aa3bee56644..4fd90e12977 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -23,6 +23,7 @@
*/
#include <linux/cgroup.h>
+#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -166,6 +167,20 @@ static DEFINE_SPINLOCK(hierarchy_id_lock);
*/
static int need_forkexit_callback __read_mostly;
+#ifdef CONFIG_PROVE_LOCKING
+int cgroup_lock_is_held(void)
+{
+ return lockdep_is_held(&cgroup_mutex);
+}
+#else /* #ifdef CONFIG_PROVE_LOCKING */
+int cgroup_lock_is_held(void)
+{
+ return mutex_is_locked(&cgroup_mutex);
+}
+#endif /* #else #ifdef CONFIG_PROVE_LOCKING */
+
+EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
+
/* convenient tests for these bits */
inline int cgroup_is_removed(const struct cgroup *cgrp)
{
diff --git a/kernel/exit.c b/kernel/exit.c
index 546774a31a6..45ed043b8bf 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -85,7 +85,9 @@ static void __exit_signal(struct task_struct *tsk)
BUG_ON(!sig);
BUG_ON(!atomic_read(&sig->count));
- sighand = rcu_dereference(tsk->sighand);
+ sighand = rcu_dereference_check(tsk->sighand,
+ rcu_read_lock_held() ||
+ lockdep_is_held(&tasklist_lock));
spin_lock(&sighand->siglock);
posix_cpu_timers_exit(tsk);
@@ -170,8 +172,10 @@ void release_task(struct task_struct * p)
repeat:
tracehook_prepare_release_task(p);
/* don't need to get the RCU readlock here - the process is dead and
- * can't be modifying its own credentials */
+ * can't be modifying its own credentials. But shut RCU-lockdep up */
+ rcu_read_lock();
atomic_dec(&__task_cred(p)->user->processes);
+ rcu_read_unlock();
proc_flush_task(p);
@@ -473,9 +477,11 @@ static void close_files(struct files_struct * files)
/*
* It is safe to dereference the fd table without RCU or
* ->file_lock because this is the last reference to the
- * files structure.
+ * files structure. But use RCU to shut RCU-lockdep up.
*/
+ rcu_read_lock();
fdt = files_fdtable(files);
+ rcu_read_unlock();
for (;;) {
unsigned long set;
i = j * __NFDBITS;
@@ -521,10 +527,12 @@ void put_files_struct(struct files_struct *files)
* at the end of the RCU grace period. Otherwise,
* you can free files immediately.
*/
+ rcu_read_lock();
fdt = files_fdtable(files);
if (fdt != &files->fdtab)
kmem_cache_free(files_cachep, files);
free_fdtable(fdt);
+ rcu_read_unlock();
}
}
diff --git a/kernel/fork.c b/kernel/fork.c
index f88bd984df3..17bbf093356 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -86,6 +86,7 @@ int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
+EXPORT_SYMBOL_GPL(tasklist_lock);
int nr_processes(void)
{
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index b7df302a020..ccec774c716 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -44,6 +44,7 @@
#include <linux/debugfs.h>
#include <linux/kdebug.h>
#include <linux/memory.h>
+#include <linux/ftrace.h>
#include <asm-generic/sections.h>
#include <asm/cacheflush.h>
@@ -93,6 +94,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
{"native_get_debugreg",},
{"irq_entries_start",},
{"common_interrupt",},
+ {"mcount",}, /* mcount can be called from everywhere */
{NULL} /* Terminator */
};
@@ -124,30 +126,6 @@ static LIST_HEAD(kprobe_insn_pages);
static int kprobe_garbage_slots;
static int collect_garbage_slots(void);
-static int __kprobes check_safety(void)
-{
- int ret = 0;
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
- ret = freeze_processes();
- if (ret == 0) {
- struct task_struct *p, *q;
- do_each_thread(p, q) {
- if (p != current && p->state == TASK_RUNNING &&
- p->pid != 0) {
- printk("Check failed: %s is running\n",p->comm);
- ret = -1;
- goto loop_end;
- }
- } while_each_thread(p, q);
- }
-loop_end:
- thaw_processes();
-#else
- synchronize_sched();
-#endif
- return ret;
-}
-
/**
* __get_insn_slot() - Find a slot on an executable page for an instruction.
* We allocate an executable page if there's no room on existing ones.
@@ -235,9 +213,8 @@ static int __kprobes collect_garbage_slots(void)
{
struct kprobe_insn_page *kip, *next;
- /* Ensure no-one is preepmted on the garbages */
- if (check_safety())
- return -EAGAIN;
+ /* Ensure no-one is interrupted on the garbages */
+ synchronize_sched();
list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
int i;
@@ -728,7 +705,8 @@ int __kprobes register_kprobe(struct kprobe *p)
preempt_disable();
if (!kernel_text_address((unsigned long) p->addr) ||
- in_kprobes_functions((unsigned long) p->addr)) {
+ in_kprobes_functions((unsigned long) p->addr) ||
+ ftrace_text_reserved(p->addr, p->addr)) {
preempt_enable();
return -EINVAL;
}
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 3feaf5a7451..6b1ccc3f020 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -197,16 +197,8 @@ static int __init ksysfs_init(void)
goto group_exit;
}
- /* create the /sys/kernel/uids/ directory */
- error = uids_sysfs_init();
- if (error)
- goto notes_exit;
-
return 0;
-notes_exit:
- if (notes_size > 0)
- sysfs_remove_bin_file(kernel_kobj, &notes_attr);
group_exit:
sysfs_remove_group(kernel_kobj, &kernel_attr_group);
kset_exit:
diff --git a/kernel/kthread.c b/kernel/kthread.c
index fbb6222fe7e..82ed0ea1519 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -101,7 +101,7 @@ static void create_kthread(struct kthread_create_info *create)
*
* Description: This helper function creates and names a kernel
* thread. The thread will be stopped: use wake_up_process() to start
- * it. See also kthread_run(), kthread_create_on_cpu().
+ * it. See also kthread_run().
*
* When woken, the thread will run @threadfn() with @data as its
* argument. @threadfn() can either call do_exit() directly if it is a
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index c62ec14609b..0c30d0455de 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3809,3 +3809,21 @@ void lockdep_sys_exit(void)
lockdep_print_held_locks(curr);
}
}
+
+void lockdep_rcu_dereference(const char *file, const int line)
+{
+ struct task_struct *curr = current;
+
+ if (!debug_locks_off())
+ return;
+ printk("\n===================================================\n");
+ printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n");
+ printk( "---------------------------------------------------\n");
+ printk("%s:%d invoked rcu_dereference_check() without protection!\n",
+ file, line);
+ printk("\nother info that might help us debug this:\n\n");
+ lockdep_print_held_locks(curr);
+ printk("\nstack backtrace:\n");
+ dump_stack();
+}
+EXPORT_SYMBOL_GPL(lockdep_rcu_dereference);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index acd24e7643e..2488ba7eb56 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -78,10 +78,10 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
int ret = NOTIFY_DONE;
struct notifier_block *nb, *next_nb;
- nb = rcu_dereference(*nl);
+ nb = rcu_dereference_raw(*nl);
while (nb && nr_to_call) {
- next_nb = rcu_dereference(nb->next);
+ next_nb = rcu_dereference_raw(nb->next);
#ifdef CONFIG_DEBUG_NOTIFIERS
if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
@@ -309,7 +309,7 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
* racy then it does not matter what the result of the test
* is, we re-check the list after having taken the lock anyway:
*/
- if (rcu_dereference(nh->head)) {
+ if (rcu_dereference_raw(nh->head)) {
down_read(&nh->rwsem);
ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
nr_calls);
diff --git a/kernel/padata.c b/kernel/padata.c
new file mode 100644
index 00000000000..6f9bcb8313d
--- /dev/null
+++ b/kernel/padata.c
@@ -0,0 +1,690 @@
+/*
+ * padata.c - generic interface to process data streams in parallel
+ *
+ * Copyright (C) 2008, 2009 secunet Security Networks AG
+ * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/padata.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/rcupdate.h>
+
+#define MAX_SEQ_NR INT_MAX - NR_CPUS
+#define MAX_OBJ_NUM 10000 * NR_CPUS
+
+static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
+{
+ int cpu, target_cpu;
+
+ target_cpu = cpumask_first(pd->cpumask);
+ for (cpu = 0; cpu < cpu_index; cpu++)
+ target_cpu = cpumask_next(target_cpu, pd->cpumask);
+
+ return target_cpu;
+}
+
+static int padata_cpu_hash(struct padata_priv *padata)
+{
+ int cpu_index;
+ struct parallel_data *pd;
+
+ pd = padata->pd;
+
+ /*
+ * Hash the sequence numbers to the cpus by taking
+ * seq_nr mod. number of cpus in use.
+ */
+ cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask);
+
+ return padata_index_to_cpu(pd, cpu_index);
+}
+
+static void padata_parallel_worker(struct work_struct *work)
+{
+ struct padata_queue *queue;
+ struct parallel_data *pd;
+ struct padata_instance *pinst;
+ LIST_HEAD(local_list);
+
+ local_bh_disable();
+ queue = container_of(work, struct padata_queue, pwork);
+ pd = queue->pd;
+ pinst = pd->pinst;
+
+ spin_lock(&queue->parallel.lock);
+ list_replace_init(&queue->parallel.list, &local_list);
+ spin_unlock(&queue->parallel.lock);
+
+ while (!list_empty(&local_list)) {
+ struct padata_priv *padata;
+
+ padata = list_entry(local_list.next,
+ struct padata_priv, list);
+
+ list_del_init(&padata->list);
+
+ padata->parallel(padata);
+ }
+
+ local_bh_enable();
+}
+
+/*
+ * padata_do_parallel - padata parallelization function
+ *
+ * @pinst: padata instance
+ * @padata: object to be parallelized
+ * @cb_cpu: cpu the serialization callback function will run on,
+ * must be in the cpumask of padata.
+ *
+ * The parallelization callback function will run with BHs off.
+ * Note: Every object which is parallelized by padata_do_parallel
+ * must be seen by padata_do_serial.
+ */
+int padata_do_parallel(struct padata_instance *pinst,
+ struct padata_priv *padata, int cb_cpu)
+{
+ int target_cpu, err;
+ struct padata_queue *queue;
+ struct parallel_data *pd;
+
+ rcu_read_lock_bh();
+
+ pd = rcu_dereference(pinst->pd);
+
+ err = 0;
+ if (!(pinst->flags & PADATA_INIT))
+ goto out;
+
+ err = -EBUSY;
+ if ((pinst->flags & PADATA_RESET))
+ goto out;
+
+ if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
+ goto out;
+
+ err = -EINVAL;
+ if (!cpumask_test_cpu(cb_cpu, pd->cpumask))
+ goto out;
+
+ err = -EINPROGRESS;
+ atomic_inc(&pd->refcnt);
+ padata->pd = pd;
+ padata->cb_cpu = cb_cpu;
+
+ if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
+ atomic_set(&pd->seq_nr, -1);
+
+ padata->seq_nr = atomic_inc_return(&pd->seq_nr);
+
+ target_cpu = padata_cpu_hash(padata);
+ queue = per_cpu_ptr(pd->queue, target_cpu);
+
+ spin_lock(&queue->parallel.lock);
+ list_add_tail(&padata->list, &queue->parallel.list);
+ spin_unlock(&queue->parallel.lock);
+
+ queue_work_on(target_cpu, pinst->wq, &queue->pwork);
+
+out:
+ rcu_read_unlock_bh();
+
+ return err;
+}
+EXPORT_SYMBOL(padata_do_parallel);
+
+static struct padata_priv *padata_get_next(struct parallel_data *pd)
+{
+ int cpu, num_cpus, empty, calc_seq_nr;
+ int seq_nr, next_nr, overrun, next_overrun;
+ struct padata_queue *queue, *next_queue;
+ struct padata_priv *padata;
+ struct padata_list *reorder;
+
+ empty = 0;
+ next_nr = -1;
+ next_overrun = 0;
+ next_queue = NULL;
+
+ num_cpus = cpumask_weight(pd->cpumask);
+
+ for_each_cpu(cpu, pd->cpumask) {
+ queue = per_cpu_ptr(pd->queue, cpu);
+ reorder = &queue->reorder;
+
+ /*
+ * Calculate the seq_nr of the object that should be
+ * next in this queue.
+ */
+ overrun = 0;
+ calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus)
+ + queue->cpu_index;
+
+ if (unlikely(calc_seq_nr > pd->max_seq_nr)) {
+ calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1;
+ overrun = 1;
+ }
+
+ if (!list_empty(&reorder->list)) {
+ padata = list_entry(reorder->list.next,
+ struct padata_priv, list);
+
+ seq_nr = padata->seq_nr;
+ BUG_ON(calc_seq_nr != seq_nr);
+ } else {
+ seq_nr = calc_seq_nr;
+ empty++;
+ }
+
+ if (next_nr < 0 || seq_nr < next_nr
+ || (next_overrun && !overrun)) {
+ next_nr = seq_nr;
+ next_overrun = overrun;
+ next_queue = queue;
+ }
+ }
+
+ padata = NULL;
+
+ if (empty == num_cpus)
+ goto out;
+
+ reorder = &next_queue->reorder;
+
+ if (!list_empty(&reorder->list)) {
+ padata = list_entry(reorder->list.next,
+ struct padata_priv, list);
+
+ if (unlikely(next_overrun)) {
+ for_each_cpu(cpu, pd->cpumask) {
+ queue = per_cpu_ptr(pd->queue, cpu);
+ atomic_set(&queue->num_obj, 0);
+ }
+ }
+
+ spin_lock(&reorder->lock);
+ list_del_init(&padata->list);
+ atomic_dec(&pd->reorder_objects);
+ spin_unlock(&reorder->lock);
+
+ atomic_inc(&next_queue->num_obj);
+
+ goto out;
+ }
+
+ if (next_nr % num_cpus == next_queue->cpu_index) {
+ padata = ERR_PTR(-ENODATA);
+ goto out;
+ }
+
+ padata = ERR_PTR(-EINPROGRESS);
+out:
+ return padata;
+}
+
+static void padata_reorder(struct parallel_data *pd)
+{
+ struct padata_priv *padata;
+ struct padata_queue *queue;
+ struct padata_instance *pinst = pd->pinst;
+
+try_again:
+ if (!spin_trylock_bh(&pd->lock))
+ goto out;
+
+ while (1) {
+ padata = padata_get_next(pd);
+
+ if (!padata || PTR_ERR(padata) == -EINPROGRESS)
+ break;
+
+ if (PTR_ERR(padata) == -ENODATA) {
+ spin_unlock_bh(&pd->lock);
+ goto out;
+ }
+
+ queue = per_cpu_ptr(pd->queue, padata->cb_cpu);
+
+ spin_lock(&queue->serial.lock);
+ list_add_tail(&padata->list, &queue->serial.list);
+ spin_unlock(&queue->serial.lock);
+
+ queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork);
+ }
+
+ spin_unlock_bh(&pd->lock);
+
+ if (atomic_read(&pd->reorder_objects))
+ goto try_again;
+
+out:
+ return;
+}
+
+static void padata_serial_worker(struct work_struct *work)
+{
+ struct padata_queue *queue;
+ struct parallel_data *pd;
+ LIST_HEAD(local_list);
+
+ local_bh_disable();
+ queue = container_of(work, struct padata_queue, swork);
+ pd = queue->pd;
+
+ spin_lock(&queue->serial.lock);
+ list_replace_init(&queue->serial.list, &local_list);
+ spin_unlock(&queue->serial.lock);
+
+ while (!list_empty(&local_list)) {
+ struct padata_priv *padata;
+
+ padata = list_entry(local_list.next,
+ struct padata_priv, list);
+
+ list_del_init(&padata->list);
+
+ padata->serial(padata);
+ atomic_dec(&pd->refcnt);
+ }
+ local_bh_enable();
+}
+
+/*
+ * padata_do_serial - padata serialization function
+ *
+ * @padata: object to be serialized.
+ *
+ * padata_do_serial must be called for every parallelized object.
+ * The serialization callback function will run with BHs off.
+ */
+void padata_do_serial(struct padata_priv *padata)
+{
+ int cpu;
+ struct padata_queue *queue;
+ struct parallel_data *pd;
+
+ pd = padata->pd;
+
+ cpu = get_cpu();
+ queue = per_cpu_ptr(pd->queue, cpu);
+
+ spin_lock(&queue->reorder.lock);
+ atomic_inc(&pd->reorder_objects);
+ list_add_tail(&padata->list, &queue->reorder.list);
+ spin_unlock(&queue->reorder.lock);
+
+ put_cpu();
+
+ padata_reorder(pd);
+}
+EXPORT_SYMBOL(padata_do_serial);
+
+static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
+ const struct cpumask *cpumask)
+{
+ int cpu, cpu_index, num_cpus;
+ struct padata_queue *queue;
+ struct parallel_data *pd;
+
+ cpu_index = 0;
+
+ pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
+ if (!pd)
+ goto err;
+
+ pd->queue = alloc_percpu(struct padata_queue);
+ if (!pd->queue)
+ goto err_free_pd;
+
+ if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL))
+ goto err_free_queue;
+
+ for_each_possible_cpu(cpu) {
+ queue = per_cpu_ptr(pd->queue, cpu);
+
+ queue->pd = pd;
+
+ if (cpumask_test_cpu(cpu, cpumask)
+ && cpumask_test_cpu(cpu, cpu_active_mask)) {
+ queue->cpu_index = cpu_index;
+ cpu_index++;
+ } else
+ queue->cpu_index = -1;
+
+ INIT_LIST_HEAD(&queue->reorder.list);
+ INIT_LIST_HEAD(&queue->parallel.list);
+ INIT_LIST_HEAD(&queue->serial.list);
+ spin_lock_init(&queue->reorder.lock);
+ spin_lock_init(&queue->parallel.lock);
+ spin_lock_init(&queue->serial.lock);
+
+ INIT_WORK(&queue->pwork, padata_parallel_worker);
+ INIT_WORK(&queue->swork, padata_serial_worker);
+ atomic_set(&queue->num_obj, 0);
+ }
+
+ cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
+
+ num_cpus = cpumask_weight(pd->cpumask);
+ pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1;
+
+ atomic_set(&pd->seq_nr, -1);
+ atomic_set(&pd->reorder_objects, 0);
+ atomic_set(&pd->refcnt, 0);
+ pd->pinst = pinst;
+ spin_lock_init(&pd->lock);
+
+ return pd;
+
+err_free_queue:
+ free_percpu(pd->queue);
+err_free_pd:
+ kfree(pd);
+err:
+ return NULL;
+}
+
+static void padata_free_pd(struct parallel_data *pd)
+{
+ free_cpumask_var(pd->cpumask);
+ free_percpu(pd->queue);
+ kfree(pd);
+}
+
+static void padata_replace(struct padata_instance *pinst,
+ struct parallel_data *pd_new)
+{
+ struct parallel_data *pd_old = pinst->pd;
+
+ pinst->flags |= PADATA_RESET;
+
+ rcu_assign_pointer(pinst->pd, pd_new);
+
+ synchronize_rcu();
+
+ while (atomic_read(&pd_old->refcnt) != 0)
+ yield();
+
+ flush_workqueue(pinst->wq);
+
+ padata_free_pd(pd_old);
+
+ pinst->flags &= ~PADATA_RESET;
+}
+
+/*
+ * padata_set_cpumask - set the cpumask that padata should use
+ *
+ * @pinst: padata instance
+ * @cpumask: the cpumask to use
+ */
+int padata_set_cpumask(struct padata_instance *pinst,
+ cpumask_var_t cpumask)
+{
+ struct parallel_data *pd;
+ int err = 0;
+
+ might_sleep();
+
+ mutex_lock(&pinst->lock);
+
+ pd = padata_alloc_pd(pinst, cpumask);
+ if (!pd) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ cpumask_copy(pinst->cpumask, cpumask);
+
+ padata_replace(pinst, pd);
+
+out:
+ mutex_unlock(&pinst->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(padata_set_cpumask);
+
+static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
+{
+ struct parallel_data *pd;
+
+ if (cpumask_test_cpu(cpu, cpu_active_mask)) {
+ pd = padata_alloc_pd(pinst, pinst->cpumask);
+ if (!pd)
+ return -ENOMEM;
+
+ padata_replace(pinst, pd);
+ }
+
+ return 0;
+}
+
+/*
+ * padata_add_cpu - add a cpu to the padata cpumask
+ *
+ * @pinst: padata instance
+ * @cpu: cpu to add
+ */
+int padata_add_cpu(struct padata_instance *pinst, int cpu)
+{
+ int err;
+
+ might_sleep();
+
+ mutex_lock(&pinst->lock);
+
+ cpumask_set_cpu(cpu, pinst->cpumask);
+ err = __padata_add_cpu(pinst, cpu);
+
+ mutex_unlock(&pinst->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(padata_add_cpu);
+
+static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
+{
+ struct parallel_data *pd;
+
+ if (cpumask_test_cpu(cpu, cpu_online_mask)) {
+ pd = padata_alloc_pd(pinst, pinst->cpumask);
+ if (!pd)
+ return -ENOMEM;
+
+ padata_replace(pinst, pd);
+ }
+
+ return 0;
+}
+
+/*
+ * padata_remove_cpu - remove a cpu from the padata cpumask
+ *
+ * @pinst: padata instance
+ * @cpu: cpu to remove
+ */
+int padata_remove_cpu(struct padata_instance *pinst, int cpu)
+{
+ int err;
+
+ might_sleep();
+
+ mutex_lock(&pinst->lock);
+
+ cpumask_clear_cpu(cpu, pinst->cpumask);
+ err = __padata_remove_cpu(pinst, cpu);
+
+ mutex_unlock(&pinst->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(padata_remove_cpu);
+
+/*
+ * padata_start - start the parallel processing
+ *
+ * @pinst: padata instance to start
+ */
+void padata_start(struct padata_instance *pinst)
+{
+ might_sleep();
+
+ mutex_lock(&pinst->lock);