diff options
Diffstat (limited to 'kernel')
58 files changed, 3881 insertions, 2596 deletions
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index 2251882daf5..44511d100ea 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -87,6 +87,9 @@ config ARCH_INLINE_WRITE_UNLOCK_IRQ config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE bool +config UNINLINE_SPIN_UNLOCK + bool + # # lock_* functions are inlined when: # - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y @@ -103,100 +106,120 @@ config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y # +if !DEBUG_SPINLOCK + config INLINE_SPIN_TRYLOCK - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK + def_bool y + depends on ARCH_INLINE_SPIN_TRYLOCK config INLINE_SPIN_TRYLOCK_BH - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH + def_bool y + depends on ARCH_INLINE_SPIN_TRYLOCK_BH config INLINE_SPIN_LOCK - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK config INLINE_SPIN_LOCK_BH - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_SPIN_LOCK_BH + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_BH config INLINE_SPIN_LOCK_IRQ - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_SPIN_LOCK_IRQ + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_IRQ config INLINE_SPIN_LOCK_IRQSAVE - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_SPIN_LOCK_IRQSAVE - -config UNINLINE_SPIN_UNLOCK - bool + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_IRQSAVE config INLINE_SPIN_UNLOCK_BH - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH + def_bool y + depends on ARCH_INLINE_SPIN_UNLOCK_BH config INLINE_SPIN_UNLOCK_IRQ - def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH) + def_bool y + depends on !PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH config INLINE_SPIN_UNLOCK_IRQRESTORE - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE + def_bool y + depends on ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE config INLINE_READ_TRYLOCK - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK + def_bool y + depends on ARCH_INLINE_READ_TRYLOCK config INLINE_READ_LOCK - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK config INLINE_READ_LOCK_BH - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_READ_LOCK_BH + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_BH config INLINE_READ_LOCK_IRQ - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_READ_LOCK_IRQ + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_IRQ config INLINE_READ_LOCK_IRQSAVE - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_READ_LOCK_IRQSAVE + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_IRQSAVE config INLINE_READ_UNLOCK - def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK) + def_bool y + depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK config INLINE_READ_UNLOCK_BH - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH + def_bool y + depends on ARCH_INLINE_READ_UNLOCK_BH config INLINE_READ_UNLOCK_IRQ - def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH) + def_bool y + depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK_BH config INLINE_READ_UNLOCK_IRQRESTORE - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE + def_bool y + depends on ARCH_INLINE_READ_UNLOCK_IRQRESTORE config INLINE_WRITE_TRYLOCK - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK + def_bool y + depends on ARCH_INLINE_WRITE_TRYLOCK config INLINE_WRITE_LOCK - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK config INLINE_WRITE_LOCK_BH - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_WRITE_LOCK_BH + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_BH config INLINE_WRITE_LOCK_IRQ - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_WRITE_LOCK_IRQ + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_IRQ config INLINE_WRITE_LOCK_IRQSAVE - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_WRITE_LOCK_IRQSAVE + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_IRQSAVE config INLINE_WRITE_UNLOCK - def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK) + def_bool y + depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK config INLINE_WRITE_UNLOCK_BH - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH + def_bool y + depends on ARCH_INLINE_WRITE_UNLOCK_BH config INLINE_WRITE_UNLOCK_IRQ - def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH) + def_bool y + depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH config INLINE_WRITE_UNLOCK_IRQRESTORE - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + def_bool y + depends on ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + +endif config MUTEX_SPIN_ON_OWNER - def_bool SMP && !DEBUG_MUTEXES + def_bool y + depends on SMP && !DEBUG_MUTEXES diff --git a/kernel/Makefile b/kernel/Makefile index c0cc67ad764..5404911eaee 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -10,7 +10,7 @@ obj-y = fork.o exec_domain.o panic.o printk.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o cred.o \ - async.o range.o groups.o lglock.o + async.o range.o groups.o lglock.o smpboot.o ifdef CONFIG_FUNCTION_TRACER # Do not trace debug files and internal ftrace files @@ -46,7 +46,6 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SMP) += smpboot.o ifneq ($(CONFIG_SMP),y) obj-y += up.o endif @@ -98,7 +97,7 @@ obj-$(CONFIG_COMPAT_BINFMT_ELF) += elfcore.o obj-$(CONFIG_BINFMT_ELF_FDPIC) += elfcore.o obj-$(CONFIG_FUNCTION_TRACER) += trace/ obj-$(CONFIG_TRACING) += trace/ -obj-$(CONFIG_X86_DS) += trace/ +obj-$(CONFIG_TRACE_CLOCK) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_TRACEPOINTS) += trace/ obj-$(CONFIG_IRQ_WORK) += irq_work.o diff --git a/kernel/cpu.c b/kernel/cpu.c index 14d32588ccc..f560598807c 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -280,12 +280,13 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) __func__, cpu); goto out_release; } + smpboot_park_threads(cpu); err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ + smpboot_unpark_threads(cpu); cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); - goto out_release; } BUG_ON(cpu_online(cpu)); @@ -354,6 +355,10 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) goto out; } + ret = smpboot_create_threads(cpu); + if (ret) + goto out; + ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); if (ret) { nr_calls--; @@ -368,6 +373,9 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) goto out_notify; BUG_ON(!cpu_online(cpu)); + /* Wake the per cpu threads */ + smpboot_unpark_threads(cpu); + /* Now call notifier in preparation. */ cpu_notify(CPU_ONLINE | mod, hcpu); @@ -439,14 +447,6 @@ EXPORT_SYMBOL_GPL(cpu_up); #ifdef CONFIG_PM_SLEEP_SMP static cpumask_var_t frozen_cpus; -void __weak arch_disable_nonboot_cpus_begin(void) -{ -} - -void __weak arch_disable_nonboot_cpus_end(void) -{ -} - int disable_nonboot_cpus(void) { int cpu, first_cpu, error = 0; @@ -458,7 +458,6 @@ int disable_nonboot_cpus(void) * with the userspace trying to use the CPU hotplug at the same time */ cpumask_clear(frozen_cpus); - arch_disable_nonboot_cpus_begin(); printk("Disabling non-boot CPUs ...\n"); for_each_online_cpu(cpu) { @@ -474,8 +473,6 @@ int disable_nonboot_cpus(void) } } - arch_disable_nonboot_cpus_end(); - if (!error) { BUG_ON(num_online_cpus() > 1); /* Make sure the CPUs won't be enabled by someone else */ diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 98d4597f43d..c77206184b8 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -159,6 +159,11 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) int rctx; struct perf_callchain_entry *entry; + int kernel = !event->attr.exclude_callchain_kernel; + int user = !event->attr.exclude_callchain_user; + + if (!kernel && !user) + return NULL; entry = get_callchain_entry(&rctx); if (rctx == -1) @@ -169,24 +174,29 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) entry->nr = 0; - if (!user_mode(regs)) { + if (kernel && !user_mode(regs)) { perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_kernel(entry, regs); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; } - if (regs) { - /* - * Disallow cross-task user callchains. - */ - if (event->ctx->task && event->ctx->task != current) - goto exit_put; - - perf_callchain_store(entry, PERF_CONTEXT_USER); - perf_callchain_user(entry, regs); + if (user) { + if (!user_mode(regs)) { + if (current->mm) + regs = task_pt_regs(current); + else + regs = NULL; + } + + if (regs) { + /* + * Disallow cross-task user callchains. + */ + if (event->ctx->task && event->ctx->task != current) + goto exit_put; + + perf_callchain_store(entry, PERF_CONTEXT_USER); + perf_callchain_user(entry, regs); + } } exit_put: diff --git a/kernel/events/core.c b/kernel/events/core.c index b7935fcec7d..7b9df353ba1 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -36,6 +36,7 @@ #include <linux/perf_event.h> #include <linux/ftrace_event.h> #include <linux/hw_breakpoint.h> +#include <linux/mm_types.h> #include "internal.h" @@ -1253,7 +1254,7 @@ retry: /* * Cross CPU call to disable a performance event */ -static int __perf_event_disable(void *info) +int __perf_event_disable(void *info) { struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; @@ -2935,12 +2936,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel); /* * Called when the last reference to the file is gone. */ -static int perf_release(struct inode *inode, struct file *file) +static void put_event(struct perf_event *event) { - struct perf_event *event = file->private_data; struct task_struct *owner; - file->private_data = NULL; + if (!atomic_long_dec_and_test(&event->refcount)) + return; rcu_read_lock(); owner = ACCESS_ONCE(event->owner); @@ -2975,7 +2976,13 @@ static int perf_release(struct inode *inode, struct file *file) put_task_struct(owner); } - return perf_event_release_kernel(event); + perf_event_release_kernel(event); +} + +static int perf_release(struct inode *inode, struct file *file) +{ + put_event(file->private_data); + return 0; } u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) @@ -3227,7 +3234,7 @@ unlock: static const struct file_operations perf_fops; -static struct perf_event *perf_fget_light(int fd, int *fput_needed) +static struct file *perf_fget_light(int fd, int *fput_needed) { struct file *file; @@ -3241,7 +3248,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed) return ERR_PTR(-EBADF); } - return file->private_data; + return file; } static int perf_event_set_output(struct perf_event *event, @@ -3273,19 +3280,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case PERF_EVENT_IOC_SET_OUTPUT: { + struct file *output_file = NULL; struct perf_event *output_event = NULL; int fput_needed = 0; int ret; if (arg != -1) { - output_event = perf_fget_light(arg, &fput_needed); - if (IS_ERR(output_event)) - return PTR_ERR(output_event); + output_file = perf_fget_light(arg, &fput_needed); + if (IS_ERR(output_file)) + return PTR_ERR(output_file); + output_event = output_file->private_data; } ret = perf_event_set_output(event, output_event); if (output_event) - fput_light(output_event->filp, fput_needed); + fput_light(output_file, fput_needed); return ret; } @@ -3756,6 +3765,132 @@ int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) } EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); +static void +perf_output_sample_regs(struct perf_output_handle *handle, + struct pt_regs *regs, u64 mask) +{ + int bit; + + for_each_set_bit(bit, (const unsigned long *) &mask, + sizeof(mask) * BITS_PER_BYTE) { + u64 val; + + val = perf_reg_value(regs, bit); + perf_output_put(handle, val); + } +} + +static void perf_sample_regs_user(struct perf_regs_user *regs_user, + struct pt_regs *regs) +{ + if (!user_mode(regs)) { + if (current->mm) + regs = task_pt_regs(current); + else + regs = NULL; + } + + if (regs) { + regs_user->regs = regs; + regs_user->abi = perf_reg_abi(current); + } +} + +/* + * Get remaining task size from user stack pointer. + * + * It'd be better to take stack vma map and limit this more + * precisly, but there's no way to get it safely under interrupt, + * so using TASK_SIZE as limit. + */ +static u64 perf_ustack_task_size(struct pt_regs *regs) +{ + unsigned long addr = perf_user_stack_pointer(regs); + + if (!addr || addr >= TASK_SIZE) + return 0; + + return TASK_SIZE - addr; +} + +static u16 +perf_sample_ustack_size(u16 stack_size, u16 header_size, + struct pt_regs *regs) +{ + u64 task_size; + + /* No regs, no stack pointer, no dump. */ + if (!regs) + return 0; + + /* + * Check if we fit in with the requested stack size into the: + * - TASK_SIZE + * If we don't, we limit the size to the TASK_SIZE. + * + * - remaining sample size + * If we don't, we customize the stack size to + * fit in to the remaining sample size. + */ + + task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); + stack_size = min(stack_size, (u16) task_size); + + /* Current header size plus static size and dynamic size. */ + header_size += 2 * sizeof(u64); + + /* Do we fit in with the current stack dump size? */ + if ((u16) (header_size + stack_size) < header_size) { + /* + * If we overflow the maximum size for the sample, + * we customize the stack dump size to fit in. + */ + stack_size = USHRT_MAX - header_size - sizeof(u64); + stack_size = round_up(stack_size, sizeof(u64)); + } + + return stack_size; +} + +static void +perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, + struct pt_regs *regs) +{ + /* Case of a kernel thread, nothing to dump */ + if (!regs) { + u64 size = 0; + perf_output_put(handle, size); + } else { + unsigned long sp; + unsigned int rem; + u64 dyn_size; + + /* + * We dump: + * static size + * - the size requested by user or the best one we can fit + * in to the sample max size + * data + * - user stack dump data + * dynamic size + * - the actual dumped size + */ + + /* Static size. */ + perf_output_put(handle, dump_size); + + /* Data. */ + sp = perf_user_stack_pointer(regs); + rem = __output_copy_user(handle, (void *) sp, dump_size); + dyn_size = dump_size - rem; + + perf_output_skip(handle, rem); + + /* Dynamic size. */ + perf_output_put(handle, dyn_size); + } +} + static void __perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) @@ -4016,6 +4151,28 @@ void perf_output_sample(struct perf_output_handle *handle, perf_output_put(handle, nr); } } + + if (sample_type & PERF_SAMPLE_REGS_USER) { + u64 abi = data->regs_user.abi; + + /* + * If there are no regs to dump, notice it through + * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). + */ + perf_output_put(handle, abi); + + if (abi) { + u64 mask = event->attr.sample_regs_user; + perf_output_sample_regs(handle, + data->regs_user.regs, + mask); + } + } + + if (sample_type & PERF_SAMPLE_STACK_USER) + perf_output_sample_ustack(handle, + data->stack_user_size, + data->regs_user.regs); } void perf_prepare_sample(struct perf_event_header *header, @@ -4067,6 +4224,49 @@ void perf_prepare_sample(struct perf_event_header *header, } header->size += size; } + + if (sample_type & PERF_SAMPLE_REGS_USER) { + /* regs dump ABI info */ + int size = sizeof(u64); + + perf_sample_regs_user(&data->regs_user, regs); + + if (data->regs_user.regs) { + u64 mask = event->attr.sample_regs_user; + size += hweight64(mask) * sizeof(u64); + } + + header->size += size; + } + + if (sample_type & PERF_SAMPLE_STACK_USER) { + /* + * Either we need PERF_SAMPLE_STACK_USER bit to be allways + * processed as the last one or have additional check added + * in case new sample type is added, because we could eat + * up the rest of the sample size. + */ + struct perf_regs_user *uregs = &data->regs_user; + u16 stack_size = event->attr.sample_stack_user; + u16 size = sizeof(u64); + + if (!uregs->abi) + perf_sample_regs_user(uregs, regs); + + stack_size = perf_sample_ustack_size(stack_size, header->size, + uregs->regs); + + /* + * If there is something to dump, add space for the dump + * itself and for the field that tells the dynamic size, + * which is how many have been actually dumped. + */ + if (stack_size) + size += sizeof(u64) + stack_size; + + data->stack_user_size = stack_size; + header->size += size; + } } static void perf_event_output(struct perf_event *event, @@ -5950,6 +6150,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, mutex_init(&event->mmap_mutex); + atomic_long_set(&event->refcount, 1); event->cpu = cpu; event->attr = *attr; event->group_leader = group_leader; @@ -6142,6 +6343,28 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, attr->branch_sample_type = mask; } } + + if (attr->sample_type & PERF_SAMPLE_REGS_USER) { + ret = perf_reg_validate(attr->sample_regs_user); + if (ret) + return ret; + } + + if (attr->sample_type & PERF_SAMPLE_STACK_USER) { + if (!arch_perf_have_user_stack_dump()) + return -ENOSYS; + + /* + * We have __u32 type for the size, but so far + * we can only use __u16 as ma |