diff options
Diffstat (limited to 'kernel')
94 files changed, 5800 insertions, 3267 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 0b72d1a74be..0b5ff083fa2 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -10,8 +10,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ - async.o range.o -obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o + async.o range.o jump_label.o obj-y += groups.o ifdef CONFIG_FUNCTION_TRACER @@ -23,6 +22,7 @@ CFLAGS_REMOVE_rtmutex-debug.o = -pg CFLAGS_REMOVE_cgroup-debug.o = -pg CFLAGS_REMOVE_sched_clock.o = -pg CFLAGS_REMOVE_perf_event.o = -pg +CFLAGS_REMOVE_irq_work.o = -pg endif obj-$(CONFIG_FREEZER) += freezer.o @@ -86,6 +86,7 @@ obj-$(CONFIG_TREE_RCU) += rcutree.o obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o obj-$(CONFIG_TINY_RCU) += rcutiny.o +obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_SYSCTL) += utsname_sysctl.o obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o @@ -100,6 +101,7 @@ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o +obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c9483d8f614..7b69b8d0313 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -52,7 +52,6 @@ #include <linux/cgroupstats.h> #include <linux/hash.h> #include <linux/namei.h> -#include <linux/smp_lock.h> #include <linux/pid_namespace.h> #include <linux/idr.h> #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ @@ -138,7 +137,7 @@ struct css_id { * is called after synchronize_rcu(). But for safe use, css_is_removed() * css_tryget() should be used for avoiding race. */ - struct cgroup_subsys_state *css; + struct cgroup_subsys_state __rcu *css; /* * ID of this css. */ @@ -1222,7 +1221,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) struct cgroup *cgrp = &root->top_cgroup; struct cgroup_sb_opts opts; - lock_kernel(); mutex_lock(&cgrp->dentry->d_inode->i_mutex); mutex_lock(&cgroup_mutex); @@ -1255,7 +1253,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) kfree(opts.name); mutex_unlock(&cgroup_mutex); mutex_unlock(&cgrp->dentry->d_inode->i_mutex); - unlock_kernel(); return ret; } @@ -1568,7 +1565,6 @@ static int cgroup_get_sb(struct file_system_type *fs_type, out_err: kfree(opts.release_agent); kfree(opts.name); - return ret; } diff --git a/kernel/compat.c b/kernel/compat.c index e167efce842..c9e2ec0b34a 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info) return 0; } + +/* + * Allocate user-space memory for the duration of a single system call, + * in order to marshall parameters inside a compat thunk. + */ +void __user *compat_alloc_user_space(unsigned long len) +{ + void __user *ptr; + + /* If len would occupy more than half of the entire compat space... */ + if (unlikely(len > (((compat_uptr_t)~0) >> 1))) + return NULL; + + ptr = arch_compat_alloc_user_space(len); + + if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) + return NULL; + + return ptr; +} +EXPORT_SYMBOL_GPL(compat_alloc_user_space); diff --git a/kernel/configs.c b/kernel/configs.c index abaee684ecb..b4066b44a99 100644 --- a/kernel/configs.c +++ b/kernel/configs.c @@ -66,6 +66,7 @@ ikconfig_read_current(struct file *file, char __user *buf, static const struct file_operations ikconfig_file_ops = { .owner = THIS_MODULE, .read = ikconfig_read_current, + .llseek = default_llseek, }; static int __init ikconfig_init(void) diff --git a/kernel/cpuset.c b/kernel/cpuset.c index b23c0979bbe..51b143e2a07 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1397,7 +1397,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, if (tsk->flags & PF_THREAD_BOUND) return -EINVAL; - ret = security_task_setscheduler(tsk, 0, NULL); + ret = security_task_setscheduler(tsk); if (ret) return ret; if (threadgroup) { @@ -1405,7 +1405,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, rcu_read_lock(); list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { - ret = security_task_setscheduler(c, 0, NULL); + ret = security_task_setscheduler(c); if (ret) { rcu_read_unlock(); return ret; diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index de407c78178..fec596da9bd 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -47,6 +47,7 @@ #include <linux/pid.h> #include <linux/smp.h> #include <linux/mm.h> +#include <linux/rcupdate.h> #include <asm/cacheflush.h> #include <asm/byteorder.h> @@ -109,13 +110,15 @@ static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = { */ atomic_t kgdb_active = ATOMIC_INIT(-1); EXPORT_SYMBOL_GPL(kgdb_active); +static DEFINE_RAW_SPINLOCK(dbg_master_lock); +static DEFINE_RAW_SPINLOCK(dbg_slave_lock); /* * We use NR_CPUs not PERCPU, in case kgdb is used to debug early * bootup code (which might not have percpu set up yet): */ -static atomic_t passive_cpu_wait[NR_CPUS]; -static atomic_t cpu_in_kgdb[NR_CPUS]; +static atomic_t masters_in_kgdb; +static atomic_t slaves_in_kgdb; static atomic_t kgdb_break_tasklet_var; atomic_t kgdb_setting_breakpoint; @@ -457,26 +460,32 @@ static int kgdb_reenter_check(struct kgdb_state *ks) return 1; } -static void dbg_cpu_switch(int cpu, int next_cpu) +static void dbg_touch_watchdogs(void) { - /* Mark the cpu we are switching away from as a slave when it - * holds the kgdb_active token. This must be done so that the - * that all the cpus wait in for the debug core will not enter - * again as the master. */ - if (cpu == atomic_read(&kgdb_active)) { - kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE; - kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER; - } - kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER; + touch_softlockup_watchdog_sync(); + clocksource_touch_watchdog(); + rcu_cpu_stall_reset(); } -static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) +static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, + int exception_state) { unsigned long flags; int sstep_tries = 100; int error; - int i, cpu; + int cpu; int trace_on = 0; + int online_cpus = num_online_cpus(); + + kgdb_info[ks->cpu].enter_kgdb++; + kgdb_info[ks->cpu].exception_state |= exception_state; + + if (exception_state == DCPU_WANT_MASTER) + atomic_inc(&masters_in_kgdb); + else + atomic_inc(&slaves_in_kgdb); + kgdb_disable_hw_debug(ks->linux_regs); + acquirelock: /* * Interrupts will be restored by the 'trap return' code, except when @@ -489,14 +498,15 @@ acquirelock: kgdb_info[cpu].task = current; kgdb_info[cpu].ret_state = 0; kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT; - /* - * Make sure the above info reaches the primary CPU before - * our cpu_in_kgdb[] flag setting does: - */ - atomic_inc(&cpu_in_kgdb[cpu]); - if (exception_level == 1) + /* Make sure the above info reaches the primary CPU */ + smp_mb(); + + if (exception_level == 1) { + if (raw_spin_trylock(&dbg_master_lock)) + atomic_xchg(&kgdb_active, cpu); goto cpu_master_loop; + } /* * CPU will loop if it is a slave or request to become a kgdb @@ -508,10 +518,12 @@ cpu_loop: kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER; goto cpu_master_loop; } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { - if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu) + if (raw_spin_trylock(&dbg_master_lock)) { + atomic_xchg(&kgdb_active, cpu); break; + } } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { - if (!atomic_read(&passive_cpu_wait[cpu])) + if (!raw_spin_is_locked(&dbg_slave_lock)) goto return_normal; } else { return_normal: @@ -522,9 +534,12 @@ return_normal: arch_kgdb_ops.correct_hw_break(); if (trace_on) tracing_on(); - atomic_dec(&cpu_in_kgdb[cpu]); - touch_softlockup_watchdog_sync(); - clocksource_touch_watchdog(); + kgdb_info[cpu].exception_state &= + ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); + kgdb_info[cpu].enter_kgdb--; + smp_mb__before_atomic_dec(); + atomic_dec(&slaves_in_kgdb); + dbg_touch_watchdogs(); local_irq_restore(flags); return 0; } @@ -541,8 +556,8 @@ return_normal: (kgdb_info[cpu].task && kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { atomic_set(&kgdb_active, -1); - touch_softlockup_watchdog_sync(); - clocksource_touch_watchdog(); + raw_spin_unlock(&dbg_master_lock); + dbg_touch_watchdogs(); local_irq_restore(flags); goto acquirelock; @@ -563,16 +578,12 @@ return_normal: if (dbg_io_ops->pre_exception) dbg_io_ops->pre_exception(); - kgdb_disable_hw_debug(ks->linux_regs); - /* * Get the passive CPU lock which will hold all the non-primary * CPU in a spin state while the debugger is active */ - if (!kgdb_single_step) { - for (i = 0; i < NR_CPUS; i++) - atomic_inc(&passive_cpu_wait[i]); - } + if (!kgdb_single_step) + raw_spin_lock(&dbg_slave_lock); #ifdef CONFIG_SMP /* Signal the other CPUs to enter kgdb_wait() */ @@ -583,10 +594,9 @@ return_normal: /* |