From 426d31071ac476ea62c62656b242930c17b58c00 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Sat, 7 Aug 2010 12:30:03 +0200 Subject: fix printk typo 'faild' Signed-off-by: Paul Bolle Signed-off-by: Jiri Kosina --- kernel/trace/trace_kprobe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f52b5f50299..58716e73e2a 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -490,7 +490,7 @@ static int register_trace_probe(struct trace_probe *tp) } ret = register_probe_event(tp); if (ret) { - pr_warning("Faild to register probe event(%d)\n", ret); + pr_warning("Failed to register probe event(%d)\n", ret); goto end; } -- cgit v1.2.3-70-g09d2 From f1b499f029c5dde85d46a8811353c62f29157541 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Thu, 5 Aug 2010 17:10:53 +0200 Subject: lockdep: Remove __debug_show_held_locks There is no longer any functional difference between __debug_show_held_locks() and debug_show_held_locks(), so remove the former. Signed-off-by: John Kacur Cc: Peter Zijlstra LKML-Reference: <1281021054-4228-1-git-send-email-jkacur@redhat.com> Signed-off-by: Ingo Molnar --- include/linux/debug_locks.h | 5 ----- kernel/hung_task.c | 2 +- kernel/lockdep.c | 8 +------- 3 files changed, 2 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 29b3ce3f2a1..2833452ea01 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -49,7 +49,6 @@ struct task_struct; #ifdef CONFIG_LOCKDEP extern void debug_show_all_locks(void); -extern void __debug_show_held_locks(struct task_struct *task); extern void debug_show_held_locks(struct task_struct *task); extern void debug_check_no_locks_freed(const void *from, unsigned long len); extern void debug_check_no_locks_held(struct task_struct *task); @@ -58,10 +57,6 @@ static inline void debug_show_all_locks(void) { } -static inline void __debug_show_held_locks(struct task_struct *task) -{ -} - static inline void debug_show_held_locks(struct task_struct *task) { } diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 0c642d51aac..bca94237955 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -98,7 +98,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" " disables this message.\n"); sched_show_task(t); - __debug_show_held_locks(t); + debug_show_held_locks(t); touch_nmi_watchdog(); diff --git a/kernel/lockdep.c b/kernel/lockdep.c index f2852a51023..84baa71cfda 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -3775,7 +3775,7 @@ EXPORT_SYMBOL_GPL(debug_show_all_locks); * Careful: only use this function if you are sure that * the task cannot run in parallel! */ -void __debug_show_held_locks(struct task_struct *task) +void debug_show_held_locks(struct task_struct *task) { if (unlikely(!debug_locks)) { printk("INFO: lockdep is turned off.\n"); @@ -3783,12 +3783,6 @@ void __debug_show_held_locks(struct task_struct *task) } lockdep_print_held_locks(task); } -EXPORT_SYMBOL_GPL(__debug_show_held_locks); - -void debug_show_held_locks(struct task_struct *task) -{ - __debug_show_held_locks(task); -} EXPORT_SYMBOL_GPL(debug_show_held_locks); void lockdep_sys_exit(void) -- cgit v1.2.3-70-g09d2 From 6a103b0d44e9f97dc430002cf3ac7a7defa3819f Mon Sep 17 00:00:00 2001 From: John Kacur Date: Thu, 5 Aug 2010 17:10:54 +0200 Subject: lockup detector: Fix grammar by adding a missing "to" in the comments This fixes a minor grammar problem in the comments in hung_task.c Signed-off-by: John Kacur Cc: Peter Zijlstra LKML-Reference: <1281021054-4228-2-git-send-email-jkacur@redhat.com> Signed-off-by: Ingo Molnar --- kernel/hung_task.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/hung_task.c b/kernel/hung_task.c index bca94237955..53ead174da2 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -111,7 +111,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) * periodically exit the critical section and enter a new one. * * For preemptible RCU it is sufficient to call rcu_read_unlock in order - * exit the grace period. For classic RCU, a reschedule is required. + * to exit the grace period. For classic RCU, a reschedule is required. */ static void rcu_lock_break(struct task_struct *g, struct task_struct *t) { -- cgit v1.2.3-70-g09d2 From 86397dc3ccfc0e17b7550d05eaf15fe91f6498dd Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 17 Aug 2010 13:53:06 +0800 Subject: tracing: Clean up seqfile code for format file Remove the nasty hack that marks a pointer's LSB to distinguish common fields from event fields. Replace it with a more sane approach. Signed-off-by: Li Zefan LKML-Reference: <4C6A23C2.9020606@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace_events.c | 55 +++++++++++++++------------------------------ 1 file changed, 18 insertions(+), 37 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 4c758f14632..398c0e8b332 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -600,21 +600,29 @@ out: enum { FORMAT_HEADER = 1, - FORMAT_PRINTFMT = 2, + FORMAT_FIELD_SEPERATOR = 2, + FORMAT_PRINTFMT = 3, }; static void *f_next(struct seq_file *m, void *v, loff_t *pos) { struct ftrace_event_call *call = m->private; struct ftrace_event_field *field; - struct list_head *head; + struct list_head *common_head = &ftrace_common_fields; + struct list_head *head = trace_get_fields(call); (*pos)++; switch ((unsigned long)v) { case FORMAT_HEADER: - head = &ftrace_common_fields; + if (unlikely(list_empty(common_head))) + return NULL; + + field = list_entry(common_head->prev, + struct ftrace_event_field, link); + return field; + case FORMAT_FIELD_SEPERATOR: if (unlikely(list_empty(head))) return NULL; @@ -626,31 +634,10 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos) return NULL; } - head = trace_get_fields(call); - - /* - * To separate common fields from event fields, the - * LSB is set on the first event field. Clear it in case. - */ - v = (void *)((unsigned long)v & ~1L); - field = v; - /* - * If this is a common field, and at the end of the list, then - * continue with main list. - */ - if (field->link.prev == &ftrace_common_fields) { - if (unlikely(list_empty(head))) - return NULL; - field = list_entry(head->prev, struct ftrace_event_field, link); - /* Set the LSB to notify f_show to print an extra newline */ - field = (struct ftrace_event_field *) - ((unsigned long)field | 1); - return field; - } - - /* If we are done tell f_show to print the format */ - if (field->link.prev == head) + if (field->link.prev == common_head) + return (void *)FORMAT_FIELD_SEPERATOR; + else if (field->link.prev == head) return (void *)FORMAT_PRINTFMT; field = list_entry(field->link.prev, struct ftrace_event_field, link); @@ -688,22 +675,16 @@ static int f_show(struct seq_file *m, void *v) seq_printf(m, "format:\n"); return 0; + case FORMAT_FIELD_SEPERATOR: + seq_putc(m, '\n'); + return 0; + case FORMAT_PRINTFMT: seq_printf(m, "\nprint fmt: %s\n", call->print_fmt); return 0; } - /* - * To separate common fields from event fields, the - * LSB is set on the first event field. Clear it and - * print a newline if it is set. - */ - if ((unsigned long)v & 1) { - seq_putc(m, '\n'); - v = (void *)((unsigned long)v & ~1L); - } - field = v; /* -- cgit v1.2.3-70-g09d2 From 56962b4449af34070bb1994621ef4f0265eed4d8 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 30 Jun 2010 23:03:51 +0200 Subject: perf: Generalize some arch callchain code - Most archs use one callchain buffer per cpu, except x86 that needs to deal with NMIs. Provide a default perf_callchain_buffer() implementation that x86 overrides. - Centralize all the kernel/user regs handling and invoke new arch handlers from there: perf_callchain_user() / perf_callchain_kernel() That avoid all the user_mode(), current->mm checks and so... - Invert some parameters in perf_callchain_*() helpers: entry to the left, regs to the right, following the traditional (dst, src). Signed-off-by: Frederic Weisbecker Acked-by: Paul Mackerras Tested-by: Will Deacon Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Cc: David Miller Cc: Paul Mundt Cc: Borislav Petkov --- arch/arm/kernel/perf_event.c | 43 +++---------------------------- arch/powerpc/kernel/perf_callchain.c | 49 +++++++++++------------------------- arch/sh/kernel/perf_callchain.c | 37 ++------------------------- arch/sparc/kernel/perf_event.c | 46 +++++++++++---------------------- arch/x86/kernel/cpu/perf_event.c | 45 ++++++--------------------------- include/linux/perf_event.h | 10 +++++++- kernel/perf_event.c | 40 +++++++++++++++++++++++++++-- 7 files changed, 90 insertions(+), 180 deletions(-) (limited to 'kernel') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index a07c3b1955f..0e3bbdb1592 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -3044,17 +3044,13 @@ user_backtrace(struct frame_tail *tail, return buftail.fp - 1; } -static void -perf_callchain_user(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void +perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct frame_tail *tail; perf_callchain_store(entry, PERF_CONTEXT_USER); - if (!user_mode(regs)) - regs = task_pt_regs(current); - tail = (struct frame_tail *)regs->ARM_fp - 1; while (tail && !((unsigned long)tail & 0x3)) @@ -3075,9 +3071,8 @@ callchain_trace(struct stackframe *fr, return 0; } -static void -perf_callchain_kernel(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void +perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct stackframe fr; @@ -3088,33 +3083,3 @@ perf_callchain_kernel(struct pt_regs *regs, fr.pc = regs->ARM_pc; walk_stackframe(&fr, callchain_trace, entry); } - -static void -perf_do_callchain(struct pt_regs *regs, - struct perf_callchain_entry *entry) -{ - int is_user; - - if (!regs) - return; - - is_user = user_mode(regs); - - if (!is_user) - perf_callchain_kernel(regs, entry); - - if (current->mm) - perf_callchain_user(regs, entry); -} - -static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); - -struct perf_callchain_entry * -perf_callchain(struct pt_regs *regs) -{ - struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); - - entry->nr = 0; - perf_do_callchain(regs, entry); - return entry; -} diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index a286c2e5a3e..f7a85ede840 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c @@ -46,8 +46,8 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) return 0; } -static void perf_callchain_kernel(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void +perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned long sp, next_sp; unsigned long next_ip; @@ -221,8 +221,8 @@ static int sane_signal_64_frame(unsigned long sp) puc == (unsigned long) &sf->uc; } -static void perf_callchain_user_64(struct pt_regs *regs, - struct perf_callchain_entry *entry) +static void perf_callchain_user_64(struct perf_callchain_entry *entry, + struct pt_regs *regs) { unsigned long sp, next_sp; unsigned long next_ip; @@ -303,8 +303,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) return __get_user_inatomic(*ret, ptr); } -static inline void perf_callchain_user_64(struct pt_regs *regs, - struct perf_callchain_entry *entry) +static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, + struct pt_regs *regs) { } @@ -423,8 +423,8 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp, return mctx->mc_gregs; } -static void perf_callchain_user_32(struct pt_regs *regs, - struct perf_callchain_entry *entry) +static void perf_callchain_user_32(struct perf_callchain_entry *entry, + struct pt_regs *regs) { unsigned int sp, next_sp; unsigned int next_ip; @@ -471,32 +471,11 @@ static void perf_callchain_user_32(struct pt_regs *regs, } } -/* - * Since we can't get PMU interrupts inside a PMU interrupt handler, - * we don't need separate irq and nmi entries here. - */ -static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain); - -struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +void +perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { - struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain); - - entry->nr = 0; - - if (!user_mode(regs)) { - perf_callchain_kernel(regs, entry); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; - } - - if (regs) { - if (current_is_64bit()) - perf_callchain_user_64(regs, entry); - else - perf_callchain_user_32(regs, entry); - } - - return entry; + if (current_is_64bit()) + perf_callchain_user_64(entry, regs); + else + perf_callchain_user_32(entry, regs); } diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c index 00143f3dd19..ef076a91292 100644 --- a/arch/sh/kernel/perf_callchain.c +++ b/arch/sh/kernel/perf_callchain.c @@ -44,44 +44,11 @@ static const struct stacktrace_ops callchain_ops = { .address = callchain_address, }; -static void -perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) +void +perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, regs->pc); unwind_stack(NULL, regs, NULL, &callchain_ops, entry); } - -static void -perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) -{ - int is_user; - - if (!regs) - return; - - is_user = user_mode(regs); - - /* - * Only the kernel side is implemented for now. - */ - if (!is_user) - perf_callchain_kernel(regs, entry); -} - -/* - * No need for separate IRQ and NMI entries. - */ -static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); - -struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) -{ - struct perf_callchain_entry *entry = &__get_cpu_var(callchain); - - entry->nr = 0; - - perf_do_callchain(regs, entry); - - return entry; -} diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 2a95a907986..460162d74ab 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1283,14 +1283,16 @@ void __init init_hw_perf_events(void) register_die_notifier(&perf_event_nmi_notifier); } -static void perf_callchain_kernel(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) { unsigned long ksp, fp; #ifdef CONFIG_FUNCTION_GRAPH_TRACER int graph = 0; #endif + stack_trace_flush(); + perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, regs->tpc); @@ -1330,8 +1332,8 @@ static void perf_callchain_kernel(struct pt_regs *regs, } while (entry->nr < PERF_MAX_STACK_DEPTH); } -static void perf_callchain_user_64(struct pt_regs *regs, - struct perf_callchain_entry *entry) +static void perf_callchain_user_64(struct perf_callchain_entry *entry, + struct pt_regs *regs) { unsigned long ufp; @@ -1353,8 +1355,8 @@ static void perf_callchain_user_64(struct pt_regs *regs, } while (entry->nr < PERF_MAX_STACK_DEPTH); } -static void perf_callchain_user_32(struct pt_regs *regs, - struct perf_callchain_entry *entry) +static void perf_callchain_user_32(struct perf_callchain_entry *entry, + struct pt_regs *regs) { unsigned long ufp; @@ -1376,30 +1378,12 @@ static void perf_callchain_user_32(struct pt_regs *regs, } while (entry->nr < PERF_MAX_STACK_DEPTH); } -/* Like powerpc we can't get PMU interrupts within the PMU handler, - * so no need for separate NMI and IRQ chains as on x86. - */ -static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); - -struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +void +perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { - struct perf_callchain_entry *entry = &__get_cpu_var(callchain); - - entry->nr = 0; - if (!user_mode(regs)) { - stack_trace_flush(); - perf_callchain_kernel(regs, entry); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; - } - if (regs) { - flushw_user(); - if (test_thread_flag(TIF_32BIT)) - perf_callchain_user_32(regs, entry); - else - perf_callchain_user_64(regs, entry); - } - return entry; + flushw_user(); + if (test_thread_flag(TIF_32BIT)) + perf_callchain_user_32(entry, regs); + else + perf_callchain_user_64(entry, regs); } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 8af28caeafc..39f8421b86e 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1571,9 +1571,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) * callchain support */ - -static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); -static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); +static DEFINE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry_nmi); static void @@ -1607,8 +1605,8 @@ static const struct stacktrace_ops backtrace_ops = { .walk_stack = print_context_stack_bp, }; -static void -perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) +void +perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, regs->ip); @@ -1653,14 +1651,12 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) } #endif -static void -perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) +void +perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct stack_frame frame; const void __user *fp; - if (!user_mode(regs)) - regs = task_pt_regs(current); fp = (void __user *)regs->bp; @@ -1687,42 +1683,17 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) } } -static void -perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) -{ - int is_user; - - if (!regs) - return; - - is_user = user_mode(regs); - - if (!is_user) - perf_callchain_kernel(regs, entry); - - if (current->mm) - perf_callchain_user(regs, entry); -} - -struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +struct perf_callchain_entry *perf_callchain_buffer(void) { - struct perf_callchain_entry *entry; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ return NULL; } if (in_nmi()) - entry = &__get_cpu_var(pmc_nmi_entry); - else - entry = &__get_cpu_var(pmc_irq_entry); - - entry->nr = 0; - - perf_do_callchain(regs, entry); + return &__get_cpu_var(perf_callchain_entry_nmi); - return entry; + return &__get_cpu_var(perf_callchain_entry); } unsigned long perf_instruction_pointer(struct pt_regs *regs) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 358880404b4..4db61dded38 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -976,7 +976,15 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks extern void perf_event_comm(struct task_struct *tsk); extern void perf_event_fork(struct task_struct *tsk); -extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); +/* Callchains */ +DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); + +extern void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs); +extern void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs); +extern struct perf_callchain_entry *perf_callchain_buffer(void); + static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index c772a3d4000..02efde6c879 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2937,13 +2937,49 @@ void perf_event_do_pending(void) __perf_pending_run(); } +DEFINE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); + /* * Callchain support -- arch specific */ -__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +__weak struct perf_callchain_entry *perf_callchain_buffer(void) { - return NULL; + return &__get_cpu_var(perf_callchain_entry); +} + +__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ +} + +__weak void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ +} + +static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +{ + struct perf_callchain_entry *entry; + + entry = perf_callchain_buffer(); + if (!entry) + return NULL; + + entry->nr = 0; + + if (!user_mode(regs)) { + perf_callchain_kernel(entry, regs); + if (current->mm) + regs = task_pt_regs(current); + else + regs = NULL; + } + + if (regs) + perf_callchain_user(entry, regs); + + return entry; } -- cgit v1.2.3-70-g09d2 From f72c1a931e311bb7780fee19e41a89ac42cab50e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 1 Jul 2010 02:31:21 +0200 Subject: perf: Factorize callchain context handling Store the kernel and user contexts from the generic layer instead of archs, this gathers some repetitive code. Signed-off-by: Frederic Weisbecker Acked-by: Paul Mackerras Tested-by: Will Deacon Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Cc: David Miller Cc: Paul Mundt Cc: Borislav Petkov --- arch/arm/kernel/perf_event.c | 2 -- arch/powerpc/kernel/perf_callchain.c | 3 --- arch/sh/kernel/perf_callchain.c | 1 - arch/sparc/kernel/perf_event.c | 3 --- arch/x86/kernel/cpu/perf_event.c | 2 -- kernel/perf_event.c | 5 ++++- 6 files changed, 4 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 0e3bbdb1592..64ca8c3ab94 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -3049,7 +3049,6 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct frame_tail *tail; - perf_callchain_store(entry, PERF_CONTEXT_USER); tail = (struct frame_tail *)regs->ARM_fp - 1; @@ -3076,7 +3075,6 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct stackframe fr; - perf_callchain_store(entry, PERF_CONTEXT_KERNEL); fr.fp = regs->ARM_fp; fr.sp = regs->ARM_sp; fr.lr = regs->ARM_lr; diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index f7a85ede840..d05ae4204bb 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c @@ -57,7 +57,6 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) lr = regs->link; sp = regs->gpr[1]; - perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, regs->nip); if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) @@ -234,7 +233,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, next_ip = regs->nip; lr = regs->link; sp = regs->gpr[1]; - perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, next_ip); for (;;) { @@ -435,7 +433,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, next_ip = regs->nip; lr = regs->link; sp = regs->gpr[1]; - perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, next_ip); while (entry->nr < PERF_MAX_STACK_DEPTH) { diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c index ef076a91292..d5ca1ef50fa 100644 --- a/arch/sh/kernel/perf_callchain.c +++ b/arch/sh/kernel/perf_callchain.c @@ -47,7 +47,6 @@ static const struct stacktrace_ops callchain_ops = { void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { - perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, regs->pc); unwind_stack(NULL, regs, NULL, &callchain_ops, entry); diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 460162d74ab..4bc40293857 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1293,7 +1293,6 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, stack_trace_flush(); - perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, regs->tpc); ksp = regs->u_regs[UREG_I6]; @@ -1337,7 +1336,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, { unsigned long ufp; - perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, regs->tpc); ufp = regs->u_regs[UREG_I6] + STACK_BIAS; @@ -1360,7 +1358,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, { unsigned long ufp; - perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, regs->tpc); ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 39f8421b86e..a3c922288cc 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1608,7 +1608,6 @@ static const struct stacktrace_ops backtrace_ops = { void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { - perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, regs->ip); dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); @@ -1660,7 +1659,6 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) fp = (void __user *)regs->bp; - perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, regs->ip); if (perf_callchain_user32(regs, entry)) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 02efde6c879..615d024894c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2969,6 +2969,7 @@ static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) entry->nr = 0; if (!user_mode(regs)) { + perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_kernel(entry, regs); if (current->mm) regs = task_pt_regs(current); @@ -2976,8 +2977,10 @@ static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) regs = NULL; } - if (regs) + if (regs) { + perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_user(entry, regs); + } return entry; } -- cgit v1.2.3-70-g09d2 From 927c7a9e92c4f69097a6e9e086d11fc2f8a5b40b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 1 Jul 2010 16:20:36 +0200 Subject: perf: Fix race in callchains Now that software events don't have interrupt disabled anymore in the event path, callchains can nest on any context. So seperating nmi and others contexts in two buffers has become racy. Fix this by providing one buffer per nesting level. Given the size of the callchain entries (2040 bytes * 4), we now need to allocate them dynamically. v2: Fixed put_callchain_entry call after recursion. Fix the type of the recursion, it must be an array. v3: Use a manual pr cpu allocation (temporary solution until NMIs can safely access vmalloc'ed memory). Do a better separation between callchain reference tracking and allocation. Make the "put" path lockless for non-release cases. v4: Protect the callchain buffers with rcu. v5: Do the cpu buffers allocations node affine. Signed-off-by: Frederic Weisbecker Tested-by: Will Deacon Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Stephane Eranian Cc: Paul Mundt Cc: David Miller Cc: Borislav Petkov --- arch/x86/kernel/cpu/perf_event.c | 22 ++- include/linux/perf_event.h | 1 - kernel/perf_event.c | 298 ++++++++++++++++++++++++++++++--------- 3 files changed, 238 insertions(+), 83 deletions(-) (limited to 'kernel') diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index a3c922288cc..8e91cf34a9c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1608,6 +1608,11 @@ static const struct stacktrace_ops backtrace_ops = { void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + /* TODO: We don't support guest os callchain now */ + return NULL; + } + perf_callchain_store(entry, regs->ip); dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); @@ -1656,6 +1661,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) struct stack_frame frame; const void __user *fp; + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + /* TODO: We don't support guest os callchain now */ + return NULL; + } fp = (void __user *)regs->bp; @@ -1681,19 +1690,6 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) } } -struct perf_callchain_entry *perf_callchain_buffer(void) -{ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { - /* TODO: We don't support guest os callchain now */ - return NULL; - } - - if (in_nmi()) - return &__get_cpu_var(perf_callchain_entry_nmi); - - return &__get_cpu_var(perf_callchain_entry); -} - unsigned long perf_instruction_pointer(struct pt_regs *regs) { unsigned long ip; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 4db61dded38..d7e8ea69086 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -983,7 +983,6 @@ extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); -extern struct perf_callchain_entry *perf_callchain_buffer(void); static inline void diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 615d024894c..75ab8a2df6b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1763,6 +1763,216 @@ static u64 perf_event_read(struct perf_event *event) return perf_event_count(event); } +/* + * Callchain support + */ + +struct callchain_cpus_entries { + struct rcu_head rcu_head; + struct perf_callchain_entry *cpu_entries[0]; +}; + +static DEFINE_PER_CPU(int, callchain_recursion[4]); +static atomic_t nr_callchain_events; +static DEFINE_MUTEX(callchain_mutex); +struct callchain_cpus_entries *callchain_cpus_entries; + + +__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ +} + +__weak void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ +} + +static void release_callchain_buffers_rcu(struct rcu_head *head) +{ + struct callchain_cpus_entries *entries; + int cpu; + + entries = container_of(head, struct callchain_cpus_entries, rcu_head); + + for_each_possible_cpu(cpu) + kfree(entries->cpu_entries[cpu]); + + kfree(entries); +} + +static void release_callchain_buffers(void) +{ + struct callchain_cpus_entries *entries; + + entries = callchain_cpus_entries; + rcu_assign_pointer(callchain_cpus_entries, NULL); + call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); +} + +static int alloc_callchain_buffers(void) +{ + int cpu; + int size; + struct callchain_cpus_entries *entries; + + /* + * We can't use the percpu allocation API for data that can be + * accessed from NMI. Use a temporary manual per cpu allocation + * until that gets sorted out. + */ + size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) * + num_possible_cpus(); + + entries = kzalloc(size, GFP_KERNEL); + if (!entries) + return -ENOMEM; + + size = sizeof(struct perf_callchain_entry) * 4; + + for_each_possible_cpu(cpu) { + entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, + cpu_to_node(cpu)); + if (!entries->cpu_entries[cpu]) + goto fail; + } + + rcu_assign_pointer(callchain_cpus_entries, entries); + + return 0; + +fail: + for_each_possible_cpu(cpu) + kfree(entries->cpu_entries[cpu]); + kfree(entries); + + return -ENOMEM; +} + +static int get_callchain_buffers(void) +{ + int err = 0; + int count; + + mutex_lock(&callchain_mutex); + + count = atomic_inc_return(&nr_callchain_events); + if (WARN_ON_ONCE(count < 1)) { + err = -EINVAL; + goto exit; + } + + if (count > 1) { + /* If the allocation failed, give up */ + if (!callchain_cpus_entries) + err = -ENOMEM; + goto exit; + } + + err = alloc_callchain_buffers(); + if (err) + release_callchain_buffers(); +exit: + mutex_unlock(&callchain_mutex); + + return err; +} + +static void put_callchain_buffers(void) +{ + if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { + release_callchain_buffers(); + mutex_unlock(&callchain_mutex); + } +} + +static int get_recursion_context(int *recursion) +{ + int rctx; + + if (in_nmi()) + rctx = 3; + else if (in_irq()) + rctx = 2; + else if (in_softirq()) + rctx = 1; + else + rctx = 0; + + if (recursion[rctx]) + return -1; + + recursion[rctx]++; + barrier(); + + return rctx; +} + +static inline void put_recursion_context(int *recursion, int rctx) +{ + barrier(); + recursion[rctx]--; +} + +static struct perf_callchain_entry *get_callchain_entry(int *rctx) +{ + int cpu; + struct callchain_cpus_entries *entries; + + *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); + if (*rctx == -1) + return NULL; + + entries = rcu_dereference(callchain_cpus_entries); + if (!entries) + return NULL; + + cpu = smp_processor_id(); + + return &entries->cpu_entries[cpu][*rctx]; +} + +static void +put_callchain_entry(int rctx) +{ + put_recursion_context(__get_cpu_var(callchain_recursion), rctx); +} + +static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +{ + int rctx; + struct perf_callchain_entry *entry; + + + entry = get_callchain_entry(&rctx); + if (rctx == -1) + return NULL; + + if (!entry) + goto exit_put; + + entry->nr = 0; + + if (!user_mode(regs)) { + perf_callchain_store(entry, PERF_CONTEXT_KERNEL); + perf_callchain_kernel(entry, regs); + if (current->mm) + regs = task_pt_regs(current); + else + regs = NULL; + } + + if (regs) { + perf_callchain_store(entry, PERF_CONTEXT_USER); + perf_callchain_user(entry, regs); + } + +exit_put: + put_callchain_entry(rctx); + + return entry; +} + /* * Initialize the perf_event context in a task_struct: */ @@ -1895,6 +2105,8 @@ static void free_event(struct perf_event *event) atomic_dec(&nr_comm_events); if (event->attr.task) atomic_dec(&nr_task_events); + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) + put_callchain_buffers(); } if (event->buffer) { @@ -2937,55 +3149,6 @@ void perf_event_do_pending(void) __perf_pending_run(); } -DEFINE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); - -/* - * Callchain support -- arch specific - */ - -__weak struct perf_callchain_entry *perf_callchain_buffer(void) -{ - return &__get_cpu_var(perf_callchain_entry); -} - -__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, - struct pt_regs *regs) -{ -} - -__weak void perf_callchain_user(struct perf_callchain_entry *entry, - struct pt_regs *regs) -{ -} - -static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) -{ - struct perf_callchain_entry *entry; - - entry = perf_callchain_buffer(); - if (!entry) - return NULL; - - entry->nr = 0; - - if (!user_mode(regs)) { - perf_callchain_store(entry, PERF_CONTEXT_KERNEL); - perf_callchain_kernel(entry, regs); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; - } - - if (regs) { - perf_callchain_store(entry, PERF_CONTEXT_USER); - perf_callchain_user(entry, regs); - } - - return entry; -} - - /* * We assume there is only KVM supporting the callbacks. * Later on, we might change it to a list if there is @@ -3480,14 +3643,20 @@ static void perf_event_output(struct perf_event *event, int nmi, struct perf_output_handle handle; struct perf_event_header header; + /* protect the callchain buffers */ + rcu_read_lock(); + perf_prepare_sample(&header, data, event, regs); if (perf_output_begin(&handle, event, header.size, nmi, 1)) - return; + goto exit; perf_output_sample(&handle, &header, data, event); perf_output_end(&handle); + +exit: + rcu_read_unlock(); } /* @@ -4243,32 +4412,16 @@ end: int perf_swevent_get_recursion_context(void) { struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); - int rctx; - if (in_nmi()) - rctx = 3; - else if (in_irq()) - rctx = 2; - else if (in_softirq()) - rctx = 1; - else - rctx = 0; - - if (cpuctx->recursion[rctx]) - return -1; - - cpuctx->recursion[rctx]++; - barrier(); - - return rctx; + return get_recursion_context(cpuctx->recursion); } EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); void inline perf_swevent_put_recursion_context(int rctx) { struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); - barrier(); - cpuctx->recursion[rctx]--; + + put_recursion_context(cpuctx->recursion, rctx); } void __perf_sw_event(u32 event_id, u64 nr, int nmi, @@ -4968,6 +5121,13 @@ done: atomic_inc(&nr_comm_events); if (event->attr.task) atomic_inc(&nr_task_events); + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { + err = get_callchain_buffers(); + if (err) { + free_event(event); + return ERR_PTR(err); + } + } } return event; -- cgit v1.2.3-70-g09d2 From 7ae07ea3a48d30689ee037cb136bc21f0b37d8ae Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 14 Aug 2010 20:45:13 +0200 Subject: perf: Humanize the number of contexts Instead of hardcoding the number of contexts for the recursions barriers, define a cpp constant to make the code more self-explanatory. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Stephane Eranian --- include/linux/perf_event.h | 14 ++++++++------ kernel/perf_event.c | 4 ++-- kernel/trace/trace_event_perf.c | 8 ++++---- 3 files changed, 14 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index d7e8ea69086..ae6fa605092 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -808,6 +808,12 @@ struct perf_event_context { struct rcu_head rcu_head; }; +/* + * Number of contexts where an event can trigger: + * task, softirq, hardirq, nmi. + */ +#define PERF_NR_CONTEXTS 4 + /** * struct perf_event_cpu_context - per cpu event context structure */ @@ -821,12 +827,8 @@ struct perf_cpu_context { struct mutex hlist_mutex; int hlist_refcount; - /* - * Recursion avoidance: - * - * task, softirq, irq, nmi context - */ - int recursion[4]; + /* Recursion avoidance in each contexts */ + int recursion[PERF_NR_CONTEXTS]; }; struct perf_output_handle { diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 75ab8a2df6b..f416aef242c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1772,7 +1772,7 @@ struct callchain_cpus_entries { struct perf_callchain_entry *cpu_entries[0]; }; -static DEFINE_PER_CPU(int, callchain_recursion[4]); +static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); static atomic_t nr_callchain_events; static DEFINE_MUTEX(callchain_mutex); struct callchain_cpus_entries *callchain_cpus_entries; @@ -1828,7 +1828,7 @@ static int alloc_callchain_buffers(void) if (!entries) return -ENOMEM; - size = sizeof(struct perf_callchain_entry) * 4; + size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; for_each_possible_cpu(cpu) { entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 000e6e85b44..db2eae2efcf 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -9,7 +9,7 @@ #include #include "trace.h" -static char *perf_trace_buf[4]; +static char *perf_trace_buf[PERF_NR_CONTEXTS]; /* * Force it to be aligned to unsigned long to avoid misaligned accesses @@ -45,7 +45,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, char *buf; int i; - for (i = 0; i < 4; i++) { + for (i = 0; i < PERF_NR_CONTEXTS; i++) { buf = (char *)alloc_percpu(perf_trace_t); if (!buf) goto fail; @@ -65,7 +65,7 @@ fail: if (!total_ref_count) { int i; - for (i = 0; i < 4; i++) { + for (i = 0; i < PERF_NR_CONTEXTS; i++) { free_percpu(perf_trace_buf[i]); perf_trace_buf[i] = NULL; } @@ -140,7 +140,7 @@ void perf_trace_destroy(struct perf_event *p_event) tp_event->perf_events = NULL; if (!--total_ref_count) { - for (i = 0; i < 4; i++) { + for (i = 0; i < PERF_NR_CONTEXTS; i++) { free_percpu(perf_trace_buf[i]); perf_trace_buf[i] = NULL; } -- cgit v1.2.3-70-g09d2 From 6016ee13db518ab1cd0cbf43fc2ad5712021e338 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 11 Aug 2010 12:47:59 +0900 Subject: perf, tracing: add missing __percpu markups ftrace_event_call->perf_events, perf_trace_buf, fgraph_data->cpu_data and some local variables are percpu pointers missing __percpu markups. Add them. Signed-off-by: Namhyung Kim Acked-by: Tejun Heo Cc: Steven Rostedt Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Stephane Eranian LKML-Reference: <1281498479-28551-1-git-send-email-namhyung@gmail.com> Signed-off-by: Frederic Weisbecker --- include/linux/ftrace_event.h | 4 ++-- kernel/trace/trace_event_perf.c | 15 ++++++++------- kernel/trace/trace_functions_graph.c | 2 +- 3 files changed, 11 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 02b8b24f8f5..5f8ad7bec63 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -191,8 +191,8 @@ struct ftrace_event_call { unsigned int flags; #ifdef CONFIG_PERF_EVENTS - int perf_refcount; - struct hlist_head *perf_events; + int perf_refcount; + struct hlist_head __percpu *perf_events; #endif }; diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index db2eae2efcf..92f5477a006 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -9,7 +9,7 @@ #include #include "trace.h" -static char *perf_trace_buf[PERF_NR_CONTEXTS]; +static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS]; /* * Force it to be aligned to unsigned long to avoid misaligned accesses @@ -24,7 +24,7 @@ static int total_ref_count; static int perf_trace_event_init(struct ftrace_event_call *tp_event, struct perf_event *p_event) { - struct hlist_head *list; + struct hlist_head __percpu *list; int ret = -ENOMEM; int cpu; @@ -42,11 +42,11 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, tp_event->perf_events = list; if (!total_ref_count) { - char *buf; + char __percpu *buf; int i; for (i = 0; i < PERF_NR_CONTEXTS; i++) { - buf = (char *)alloc_percpu(perf_trace_t); + buf = (char __percpu *)alloc_percpu(perf_trace_t); if (!buf) goto fail; @@ -102,13 +102,14 @@ int perf_trace_init(struct perf_event *p_event) int perf_trace_enable(struct perf_event *p_event) { struct ftrace_event_call *tp_event = p_event->tp_event; + struct hlist_head __percpu *pcpu_list; struct hlist_head *list; - list = tp_event->perf_events; - if (WARN_ON_ONCE(!list)) + pcpu_list = tp_event->perf_events; + if (WARN_ON_ONCE(!pcpu_list)) return -EINVAL; - list = this_cpu_ptr(list); + list = this_cpu_ptr(pcpu_list); hlist_add_head_rcu(&p_event->hlist_entry, list); return 0; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 6bff2362578..fcb5a542cd2 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -23,7 +23,7 @@ struct fgraph_cpu_data { }; struct fgraph_data { - struct fgraph_cpu_data *cpu_data; + struct fgraph_cpu_data __percpu *cpu_data; /* Place to preserve last processed entry. */ struct ftrace_graph_ent_entry ent; -- cgit v1.2.3-70-g09d2 From ca5ecddfa8fcbd948c95530e7e817cee9fb43a3d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 28 Apr 2010 14:39:09 -0700 Subject: rcu: define __rcu address space modifier for sparse This commit provides definitions for the __rcu annotation defined earlier. This annotation permits sparse to check for correct use of RCU-protected pointers. If a pointer that is annotated with __rcu is accessed directly (as opposed to via rcu_dereference(), rcu_assign_pointer(), or one of their variants), sparse can be made to complain. To enable such complaints, use the new default-disabled CONFIG_SPARSE_RCU_POINTER kernel configuration option. Please note that these sparse complaints are intended to be a debugging aid, -not- a code-style-enforcement mechanism. There are special rcu_dereference_protected() and rcu_access_pointer() accessors for use when RCU read-side protection is not required, for example, when no other CPU has access to the data structure in question or while the current CPU hold the update-side lock. This patch also updates a number of docbook comments that were showing their age. Signed-off-by: Arnd Bergmann Signed-off-by: Paul E. McKenney Cc: Christopher Li Reviewed-by: Josh Triplett --- include/linux/compiler.h | 4 + include/linux/rcupdate.h | 352 ++++++++++++++++++++++++++++------------------- include/linux/srcu.h | 27 +++- kernel/rcupdate.c | 6 +- lib/Kconfig.debug | 13 ++ 5 files changed, 257 insertions(+), 145 deletions(-) (limited to 'kernel') diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c1a62c56a66..320d6c94ff8 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -16,7 +16,11 @@ # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) # define __percpu __attribute__((noderef, address_space(3))) +#ifdef CONFIG_SPARSE_RCU_POINTER +# define __rcu __attribute__((noderef, address_space(4))) +#else # define __rcu +#endif extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); #else diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 9fbc54a2585..b973dea2d6b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -41,6 +41,7 @@ #include #include #include +#include #ifdef CONFIG_RCU_TORTURE_TEST extern int rcutorture_runnable; /* for sysctl */ @@ -120,14 +121,15 @@ extern struct lockdep_map rcu_sched_lock_map; extern int debug_lockdep_rcu_enabled(void); /** - * rcu_read_lock_held - might we be in RCU read-side critical section? + * rcu_read_lock_held() - might we be in RCU read-side critical section? * * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an RCU read-side critical section unless it can - * prove otherwise. + * prove otherwise. This is useful for debug checks in functions that + * require that they be called within an RCU read-side critical section. * - * Check debug_lockdep_rcu_enabled() to prevent false positives during boot + * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot * and while lockdep is disabled. */ static inline int rcu_read_lock_held(void) @@ -144,14 +146,16 @@ static inline int rcu_read_lock_held(void) extern int rcu_read_lock_bh_held(void); /** - * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? + * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? * * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an * RCU-sched read-side critical section. In absence of * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side * critical section unless it can prove otherwise. Note that disabling * of preemption (including disabling irqs) counts as an RCU-sched - * read-side critical section. + * read-side critical section. This is useful for debug checks in functions + * that required that they be called within an RCU-sched read-side + * critical section. * * Check debug_lockdep_rcu_enabled() to prevent false positives during boot * and while lockdep is disabled. @@ -220,41 +224,155 @@ extern int rcu_my_thread_group_empty(void); } \ } while (0) +#else /* #ifdef CONFIG_PROVE_RCU */ + +#define __do_rcu_dereference_check(c) do { } while (0) + +#endif /* #else #ifdef CONFIG_PROVE_RCU */ + +/* + * Helper functions for rcu_dereference_check(), rcu_dereference_protected() + * and rcu_assign_pointer(). Some of these could be folded into their + * callers, but they are left separate in order to ease introduction of + * multiple flavors of pointers to match the multiple flavors of RCU + * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in + * the future. + */ +#define __rcu_access_pointer(p, space) \ + ({ \ + typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ + (void) (((typeof (*p) space *)p) == p); \ + ((typeof(*p) __force __kernel *)(_________p1)); \ + }) +#define __rcu_dereference_check(p, c, space) \ + ({ \ + typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ + __do_rcu_dereference_check(c); \ + (void) (((typeof (*p) space *)p) == p); \ + smp_read_barrier_depends(); \ + ((typeof(*p) __force __kernel *)(_________p1)); \ + }) +#define __rcu_dereference_protected(p, c, space) \ + ({ \ + __do_rcu_dereference_check(c); \ + (void) (((typeof (*p) space *)p) == p); \ + ((typeof(*p) __force __kernel *)(p)); \ + }) + +#define __rcu_dereference_index_check(p, c) \ + ({ \ + typeof(p) _________p1 = ACCESS_ONCE(p); \ + __do_rcu_dereference_check(c); \ + smp_read_barrier_depends(); \ + (_________p1); \ + }) +#define __rcu_assign_pointer(p, v, space) \ + ({ \ + if (!__builtin_constant_p(v) || \ + ((v) != NULL)) \ + smp_wmb(); \ + (p) = (typeof(*v) __force space *)(v); \ + }) + + +/** + * rcu_access_pointer() - fetch RCU pointer with no dereferencing + * @p: The pointer to read + * + * Return the value of the specified RCU-protected pointer, but omit the + * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful + * when the value of this pointer is accessed, but the pointer is not + * dereferenced, for example, when testing an RCU-protected pointer against + * NULL. Although rcu_access_pointer() may also be used in cases where + * update-side locks prevent the value of the pointer from changing, you + * should instead use rcu_dereference_protected() for this use case. + */ +#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) + /** - * rcu_dereference_check - rcu_dereference with debug checking + * rcu_dereference_check() - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * Do an rcu_dereference(), but check that the conditions under which the - * dereference will take place are correct. Typically the conditions indicate - * the various locking conditions that should be held at that point. The check - * should return true if the conditions are satisfied. + * dereference will take place are correct. Typically the conditions + * indicate the various locking conditions that should be held at that + * point. The check should return true if the conditions are satisfied. + * An implicit check for being in an RCU read-side critical section + * (rcu_read_lock()) is included. * * For example: * - * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || - * lockdep_is_held(&foo->lock)); + * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); * * could be used to indicate to lockdep that foo->bar may only be dereferenced - * if either the RCU read lock is held, or that the lock required to replace + * if either rcu_read_lock() is held, or that the lock required to replace * the bar struct at foo->bar is held. * * Note that the list of conditions may also include indications of when a lock * need not be held, for example during initialisation or destruction of the * target struct: * - * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || - * lockdep_is_held(&foo->lock) || + * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || * atomic_read(&foo->usage) == 0); + * + * Inserts memory barriers on architectures that require them + * (currently only the Alpha), prevents the compiler from refetching + * (and from merging fetches), and, more importantly, documents exactly + * which pointers are protected by RCU and checks that the pointer is + * annotated as __rcu. */ #define rcu_dereference_check(p, c) \ - ({ \ - __do_rcu_dereference_check(c); \ - rcu_dereference_raw(p); \ - }) + __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu) + +/** + * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * This is the RCU-bh counterpart to rcu_dereference_check(). + */ +#define rcu_dereference_bh_check(p, c) \ + __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu) /** - * rcu_dereference_protected - fetch RCU pointer when updates prevented + * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * This is the RCU-sched counterpart to rcu_dereference_check(). + */ +#define rcu_dereference_sched_check(p, c) \ + __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \ + __rcu) + +#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ + +/** + * rcu_dereference_index_check() - rcu_dereference for indices with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * Similar to rcu_dereference_check(), but omits the sparse checking. + * This allows rcu_dereference_index_check() to be used on integers, + * which can then be used as array indices. Attempting to use + * rcu_dereference_check() on an integer will give compiler warnings + * because the sparse address-space mechanism relies on dereferencing + * the RCU-protected pointer. Dereferencing integers is not something + * that even gcc will put up with. + * + * Note that this function does not implicitly check for RCU read-side + * critical sections. If this function gains lots of uses, it might + * make sense to provide versions for each flavor of RCU, but it does + * not make sense as of early 2010. + */ +#define rcu_dereference_index_check(p, c) \ + __rcu_dereference_index_check((p), (c)) + +/** + * rcu_dereference_protected() - fetch RCU pointer when updates prevented + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place * * Return the value of the specified RCU-protected pointer, but omit * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This @@ -263,35 +381,61 @@ extern int rcu_my_thread_group_empty(void); * prevent the compiler from repeating this reference or combining it * with other references, so it should not be used without protection * of appropriate locks. + * + * This function is only for update-side use. Using this function + * when protected only by rcu_read_lock() will result in infrequent + * but very ugly failures. */ #define rcu_dereference_protected(p, c) \ - ({ \ - __do_rcu_dereference_check(c); \ - (p); \ - }) + __rcu_dereference_protected((p), (c), __rcu) -#else /* #ifdef CONFIG_PROVE_RCU */ +/** + * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * This is the RCU-bh counterpart to rcu_dereference_protected(). + */ +#define rcu_dereference_bh_protected(p, c) \ + __rcu_dereference_protected((p), (c), __rcu) -#define rcu_dereference_check(p, c) rcu_dereference_raw(p) -#define rcu_dereference_protected(p, c) (p) +/** + * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * This is the RCU-sched counterpart to rcu_dereference_protected(). + */ +#define rcu_dereference_sched_protected(p, c) \ + __rcu_dereference_protected((p), (c), __rcu) -#endif /* #else #ifdef CONFIG_PROVE_RCU */ /** - * rcu_access_pointer - fetch RCU pointer with no dereferencing + * rcu_dereference() - fetch RCU-protected pointer for dereferencing + * @p: The pointer to read, prior to dereferencing * - * Return the value of the specified RCU-protected pointer, but omit the - * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful - * when the value of this pointer is accessed, but the pointer is not - * dereferenced, for example, when testing an RCU-protected pointer against - * NULL. This may also be used in cases where update-side locks prevent - * the value of the pointer from changing, but rcu_dereference_protected() - * is a lighter-weight primitive for this use case. + * This is a simple wrapper around rcu_dereference_check(). */ -#define rcu_access_pointer(p) ACCESS_ONCE(p) +#define rcu_dereference(p) rcu_dereference_check(p, 0) /** - * rcu_read_lock - mark the beginning of an RCU read-side critical section. + * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing + * @p: The pointer to read, prior to dereferencing + * + * Makes rcu_dereference_check() do the dirty work. + */ +#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) + +/** + * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing + * @p: The pointer to read, prior to dereferencing + * + * Makes rcu_dereference_check() do the dirty work. + */ +#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) + +/** + * rcu_read_lock() - mark the beginning of an RCU read-side critical section * * When synchronize_rcu() is invoked on one CPU while other CPUs * are within RCU read-side critical sections, then the @@ -337,7 +481,7 @@ static inline void rcu_read_lock(void) */ /** - * rcu_read_unlock - marks the end of an RCU read-side critical section. + * rcu_read_unlock() - marks the end of an RCU read-side critical section. * * See rcu_read_lock() for more information. */ @@ -349,15 +493,16 @@ static inline void rcu_read_unlock(void) } /** - * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section + * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * * This is equivalent of rcu_read_lock(), but to be used when updates - * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks - * consider completion of a softirq handler to be a quiescent state, - * a process in RCU read-side critical section must be protected by - * disabling softirqs. Read-side critical sections in interrupt context - * can use just rcu_read_lock(). - * + * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since + * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a + * softirq handler to be a quiescent state, a process in RCU read-side + * critical section must be protected by disabling softirqs. Read-side + * critical sections in interrupt context can use just rcu_read_lock(), + * though this should at least be commented to avoid confusing people + * reading the code. */ static inline void rcu_read_lock_bh(void) { @@ -379,13 +524,12 @@ static inline void rcu_read_unlock_bh(void) } /** - * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section + * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section * - * Should be used with either - * - synchronize_sched() - * or - * - call_rcu_sched() and rcu_barrier_sched() - * on the write-side to insure proper synchronization. + * This is equivalent of rcu_read_lock(), but to be used when updates + * are being done using call_rcu_sched() or synchronize_rcu_sched(). + * Read-side critical sections can also be introduced by anything that + * disables preemption, including local_irq_disable() and friends. */ static inline void rcu_read_lock_sched(void) { @@ -420,54 +564,14 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) preempt_enable_notrace(); } - /** - * rcu_dereference_raw - fetch an RCU-protected pointer + * rcu_assign_pointer() - assign to RCU-protected pointer + * @p: pointer to assign to + * @v: value to assign (publish) * - * The caller must be within some flavor of RCU read-side critical - * section, or must be otherwise preventing the pointer from changing, - * for example, by holding an appropriate lock. This pointer may later - * be safely dereferenced. It is the caller's responsibility to have - * done the right thing, as this primitive does no checking of any kind. - * - * Inserts memory barriers on architectures that require them - * (currently only the Alpha), and, more importantly, documents - * exactly which pointers are protected by RCU. - */ -#define rcu_dereference_raw(p) ({ \ - typeof(p) _________p1 = ACCESS_ONCE(p); \ - smp_read_barrier_depends(); \ - (_________p1); \ - }) - -/** - * rcu_dereference - fetch an RCU-protected pointer, checking for RCU - * - * Makes rcu_dereference_check() do the dirty work. - */ -#define rcu_dereference(p) \ - rcu_dereference_check(p, rcu_read_lock_held()) - -/** - * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh - * - * Makes rcu_dereference_check() do the dirty work. - */ -#define rcu_dereference_bh(p) \ - rcu_dereference_check(p, rcu_read_lock_bh_held()) - -/** - * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched - * - * Makes rcu_dereference_check() do the dirty work. - */ -#define rcu_dereference_sched(p) \ - rcu_dereference_check(p, rcu_read_lock_sched_held()) - -/** - * rcu_assign_pointer - assign (publicize) a pointer to a newly - * initialized structure that will be dereferenced by RCU read-side - * critical sections. Returns the value assigned. + * Assigns the specified value to the specified RCU-protected + * pointer, ensuring that any concurrent RCU readers will see + * any prior initialization. Returns the value assigned. * * Inserts memory barriers on architectures that require them * (pretty much all of them other than x86), and also prevents @@ -476,14 +580,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * call documents which pointers will be dereferenced by RCU read-side * code. */ - #define rcu_assign_pointer(p, v) \ - ({ \ - if (!__builtin_constant_p(v) || \ - ((v) != NULL)) \ - smp_wmb(); \ - (p) = (v); \ - }) + __rcu_assign_pointer((p), (v), __rcu) + +/** + * RCU_INIT_POINTER() - initialize an RCU protected pointer + * + * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep + * splats. + */ +#define RCU_INIT_POINTER(p, v) \ + p = (typeof(*v) __force __rcu *)(v) /* Infrastructure to implement the synchronize_() primitives. */ @@ -495,7 +602,7 @@ struct rcu_synchronize { extern void wakeme_after_rcu(struct rcu_head *head); /** - * call_rcu - Queue an RCU callback for invocation after a grace period. + * call_rcu() - Queue an RCU callback for invocation after a grace period. * @head: structure to be used for queueing the RCU updates. * @func: actual update function to be invoked after the grace period * @@ -509,7 +616,7 @@ extern void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head)); /** - * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. + * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. * @func: actual update function to be invoked after the grace period * @@ -566,37 +673,4 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head) } #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ -#ifndef CONFIG_PROVE_RCU -#define __do_rcu_dereference_check(c) do { } while (0) -#endif /* #ifdef CONFIG_PROVE_RCU */ - -#define __rcu_dereference_index_check(p, c) \ - ({ \ - typeof(p) _________p1 = ACCESS_ONCE(p); \ - __do_rcu_dereference_check(c); \ - smp_read_barrier_depends(); \ - (_________p1); \ - }) - -/** - * rcu_dereference_index_check() - rcu_dereference for indices with debug checking - * @p: The pointer to read, prior to dereferencing - * @c: The conditions under which the dereference will take place - * - * Similar to rcu_dereference_check(), but omits the sparse checking. - * This allows rcu_dereference_index_check() to be used on integers, - * which can then be used as array indices. Attempting to use - * rcu_dereference_check() on an integer will give compiler warnings - * because the sparse address-space mechanism relies on dereferencing - * the RCU-protected pointer. Dereferencing integers is not something - * that even gcc will put up with. - * - * Note that this function does not implicitly check for RCU read-side - * critical sections. If this function gains lots of uses, it might - * make sense to provide versions for each flavor of RCU, but it does - * not make sense as of early 2010. - */ -#define rcu_dereference_index_check(p, c) \ - __rcu_dereference_index_check((p), (c)) - #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 4d5d2f546db..6f456a720ff 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -108,12 +108,31 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ /** - * srcu_dereference - fetch SRCU-protected pointer with checking + * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing + * @p: the pointer to fetch and protect for later dereferencing + * @sp: pointer to the srcu_struct, which is used to check that we + * really are in an SRCU read-side critical section. + * @c: condition to check for update-side use * - * Makes rcu_dereference_check() do the dirty work. + * If PROVE_RCU is enabled, invoking this outside of an RCU read-side + * critical section will result in an RCU-lockdep splat, unless @c evaluates + * to 1. The @c argument will normally be a logical expression containing + * lockdep_is_held() calls. */ -#define srcu_dereference(p, sp) \ - rcu_dereference_check(p, srcu_read_lock_held(sp)) +#define srcu_dereference_check(p, sp, c) \ + __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu) + +/** + * srcu_dereference - fetch SRCU-protected pointer for later dereferencing + * @p: the pointer to fetch and protect for later dereferencing + * @sp: pointer to the srcu_struct, which is used to check that we + * really are in an SRCU read-side critical section. + * + * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU + * is enabled, invoking this outside of an RCU read-side critical + * section will result in an RCU-lockdep splat. + */ +#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) /** * srcu_read_lock - register a new reader for an SRCU-protected structure. diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 4d169835fb3..6c79e851521 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -73,12 +73,14 @@ int debug_lockdep_rcu_enabled(void) EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); /** - * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? + * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? * * Check for bottom half being disabled, which covers both the * CONFIG_PROVE_RCU and not cases. Note that if someone uses * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) - * will show the situation. + * will show the situation. This is useful for debug checks in functions + * that require that they be called within an RCU read-side critical + * section. * * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1b4afd2e6ca..12465f2ef76 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -539,6 +539,19 @@ config PROVE_RCU_REPEATEDLY disabling, allowing multiple RCU-lockdep warnings to be printed on a single reboot. +config SPARSE_RCU_POINTER + bool "RCU debugging: sparse-based checks for pointer usage" + default n + help + This feature enables the __rcu sparse annotation for + RCU-protected pointers. This annotation will cause sparse + to flag any non-RCU used of annotated pointers. This can be + helpful when debugging RCU usage. Please note that this feature + is not intended to enforce code cleanliness; it is instead merely + a debugging aid. + + Say Y to make sparse flag questionable use of RCU-protected pointers + Say N if you are unsure. config LOCKDEP -- cgit v1.2.3-70-g09d2 From 67bdbffd696f29a0b68aa8daa285783a06651583 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 25 Feb 2010 16:55:13 +0100 Subject: rculist: avoid __rcu annotations This avoids warnings from missing __rcu annotations in the rculist implementation, making it possible to use the same lists in both RCU and non-RCU cases. We can add rculist annotations later, together with lockdep support for rculist, which is missing as well, but that may involve changing all the users. Signed-off-by: Arnd Bergmann Signed-off-by: Paul E. McKenney Cc: Pavel Emelyanov Cc: Sukadev Bhattiprolu Reviewed-by: Josh Triplett --- include/linux/rculist.h | 53 +++++++++++++++++++++++++++---------------- include/linux/rculist_nulls.h | 16 +++++++++---- kernel/pid.c | 2 +- 3 files changed, 46 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 4ec3b38ce9c..c10b1050dbe 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -9,6 +9,12 @@ #include #include +/* + * return the ->next pointer of a list_head in an rcu safe + * way, we must not access it directly + */ +#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) + /* * Insert a new entry between two known consecutive entries. * @@ -20,7 +26,7 @@ static inline void __list_add_rcu(struct list_head *new, { new->next = next; new->prev = prev; - rcu_assign_pointer(prev->next, new); + rcu_assign_pointer(list_next_rcu(prev), new); next->prev = new; } @@ -138,7 +144,7 @@ static inline void list_replace_rcu(struct list_head *old, { new->next = old->next; new->prev = old->prev; - rcu_assign_pointer(new->prev->next, new); + rcu_assign_pointer(list_next_rcu(new->prev), new); new->next->prev = new; old->prev = LIST_POISON2; } @@ -193,7 +199,7 @@ static inline void list_splice_init_rcu(struct list_head *list, */ last->next = at; - rcu_assign_pointer(head->next, first); + rcu_assign_pointer(list_next_rcu(head), first); first->prev = head; at->prev = last; } @@ -208,7 +214,9 @@ static inline void list_splice_init_rcu(struct list_head *list, * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_entry_rcu(ptr, type, member) \ - container_of(rcu_dereference_raw(ptr), type, member) + ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \ + container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ + }) /** * list_first_entry_rcu - get the first element from a list @@ -225,9 +233,9 @@ static inline void list_splice_init_rcu(struct list_head *list, list_entry_rcu((ptr)->next, type, member) #define __list_for_each_rcu(pos, head) \ - for (pos = rcu_dereference_raw((head)->next); \ + for (pos = rcu_dereference_raw(list_next_rcu(head)); \ pos != (head); \ - pos = rcu_dereference_raw(pos->next)) + pos = rcu_dereference_raw(list_next_rcu((pos))) /** * list_for_each_entry_rcu - iterate over rcu list of given type @@ -257,9 +265,9 @@ static inline void list_splice_init_rcu(struct list_head *list, * as long as the traversal is guarded by rcu_read_lock(). */ #define list_for_each_continue_rcu(pos, head) \ - for ((pos) = rcu_dereference_raw((pos)->next); \ + for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ prefetch((pos)->next), (pos) != (head); \ - (pos) = rcu_dereference_raw((pos)->next)) + (pos) = rcu_dereference_raw(list_next_rcu(pos))) /** * list_for_each_entry_continue_rcu - continue iteration over list of given type @@ -314,12 +322,19 @@ static inline void hlist_replace_rcu(struct hlist_node *old, new->next = next; new->pprev = old->pprev; - rcu_assign_pointer(*new->pprev, new); + rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); if (next) new->next->pprev = &new->next; old->pprev = LIST_POISON2; } +/* + * return the first or the next element in an RCU protected hlist + */ +#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) +#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) +#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) + /** * hlist_add_head_rcu * @n: the element to add to the hash list. @@ -346,7 +361,7 @@ static inline void hlist_add_head_rcu(struct hlist_node *n, n->next = first; n->pprev = &h->first; - rcu_assign_pointer(h->first, n); + rcu_assign_pointer(hlist_first_rcu(h), n); if (first) first->pprev = &n->next; } @@ -374,7 +389,7 @@ static inline void hlist_add_before_rcu(struct hlist_node *n, { n->pprev = next->pprev; n->next = next; - rcu_assign_pointer(*(n->pprev), n); + rcu_assign_pointer(hlist_pprev_rcu(n), n); next->pprev = &n->next; } @@ -401,15 +416,15 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, { n->next = prev->next; n->pprev = &prev->next; - rcu_assign_pointer(prev->next, n); + rcu_assign_pointer(hlist_next_rcu(prev), n); if (n->next) n->next->pprev = &n->next; } -#define __hlist_for_each_rcu(pos, head) \ - for (pos = rcu_dereference((head)->first); \ - pos && ({ prefetch(pos->next); 1; }); \ - pos = rcu_dereference(pos->next)) +#define __hlist_for_each_rcu(pos, head) \ + for (pos = rcu_dereference(hlist_first_rcu(head)); \ + pos && ({ prefetch(pos->next); 1; }); \ + pos = rcu_dereference(hlist_next_rcu(pos))) /** * hlist_for_each_entry_rcu - iterate over rcu list of given type @@ -422,11 +437,11 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ -#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ - for (pos = rcu_dereference_raw((head)->first); \ +#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ + for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ pos && ({ prefetch(pos->next); 1; }) && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ - pos = rcu_dereference_raw(pos->next)) + pos = rcu_dereference_raw(hlist_next_rcu(pos))) /** * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index b70ffe53cb9..2ae13714828 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -37,6 +37,12 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) } } +#define hlist_nulls_first_rcu(head) \ + (*((struct hlist_nulls_node __rcu __force **)&(head)->first)) + +#define hlist_nulls_next_rcu(node) \ + (*((struct hlist_nulls_node __rcu __force **)&(node)->next)) + /** * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. @@ -88,7 +94,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, n->next = first; n->pprev = &h->first; - rcu_assign_pointer(h->first, n); + rcu_assign_pointer(hlist_nulls_first_rcu(h), n); if (!is_a_nulls(first)) first->pprev = &n->next; } @@ -100,11 +106,11 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, * @member: the name of the hlist_nulls_node within the struct. * */ -#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ - for (pos = rcu_dereference_raw((head)->first); \ - (!is_a_nulls(pos)) && \ +#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ + for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ + (!is_a_nulls(pos)) && \ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ - pos = rcu_dereference_raw(pos->next)) + pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) #endif #endif diff --git a/kernel/pid.c b/kernel/pid.c index d55c6fb8d08..0f90c2f713f 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -401,7 +401,7 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type) struct task_struct *result = NULL; if (pid) { struct hlist_node *first; - first = rcu_dereference_check(pid->tasks[type].first, + first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), rcu_read_lock_held() || lockdep_tasklist_lock_is_held()); if (first) -- cgit v1.2.3-70-g09d2 From 2c392b8c3450ceb69ba1b93cb0cddb3998fb8cdc Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 24 Feb 2010 19:41:39 +0100 Subject: cgroups: __rcu annotations Signed-off-by: Arnd Bergmann Signed-off-by: Paul E. McKenney Acked-by: Paul Menage Cc: Li Zefan Reviewed-by: Josh Triplett --- include/linux/cgroup.h | 4 ++-- include/linux/sched.h | 2 +- kernel/cgroup.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ed3e92e41c6..3cb7d04308c 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -75,7 +75,7 @@ struct cgroup_subsys_state { unsigned long flags; /* ID for this css, if possible */ - struct css_id *id; + struct css_id __rcu *id; }; /* bits in struct cgroup_subsys_state flags field */ @@ -205,7 +205,7 @@ struct cgroup { struct list_head children; /* my children */ struct cgroup *parent; /* my parent */ - struct dentry *dentry; /* cgroup fs entry, RCU protected */ + struct dentry __rcu *dentry; /* cgroup fs entry, RCU protected */ /* Private pointers for each registered subsystem */ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; diff --git a/include/linux/sched.h b/include/linux/sched.h index 1e2a6db2d7d..bbffd087476 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1418,7 +1418,7 @@ struct task_struct { #endif #ifdef CONFIG_CGROUPS /* Control Group info protected by css_set_lock */ - struct css_set *cgroups; + struct css_set __rcu *cgroups; /* cg_list protected by css_set_lock and tsk->alloc_lock */ struct list_head cg_list; #endif diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 192f88c5b0f..e5c5497a7dc 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -138,7 +138,7 @@ struct css_id { * is called after synchronize_rcu(). But for safe use, css_is_removed() * css_tryget() should be used for avoiding race. */ - struct cgroup_subsys_state *css; + struct cgroup_subsys_state __rcu *css; /* * ID of this css. */ -- cgit v1.2.3-70-g09d2 From e546f485e1d7520ca0200502cdcc11b503f4805c Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 21 Jun 2010 16:57:42 +0800 Subject: rcutorture: add random preemption Add random preemption to help we to torture the preemptable rcu. srcu_read_delay() also calls rcu_read_delay() for shorter delays. Added comment to preempt_schedule() call indicating that no quiescent states happen if preemption is disabled. Signed-off-by: Lai Jiangshan Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- kernel/rcutorture.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 2e2726d790b..729710273dc 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -303,6 +303,10 @@ static void rcu_read_delay(struct rcu_random_state *rrsp) mdelay(longdelay_ms); if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) udelay(shortdelay_us); +#ifdef CONFIG_PREEMPT + if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000))) + preempt_schedule(); /* No QS if preempt_disable() in effect */ +#endif } static void rcu_torture_read_unlock(int idx) __releases(RCU) @@ -536,6 +540,8 @@ static void srcu_read_delay(struct rcu_random_state *rrsp) delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick); if (!delay) schedule_timeout_interruptible(longdelay); + else + rcu_read_delay(rrsp); } static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl) -- cgit v1.2.3-70-g09d2 From 394f99a9007d4274f7076bb8553ab0ff9707688b Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 28 Jun 2010 16:25:04 +0800 Subject: rcu: simplify the usage of percpu data &percpu_data is compatible with allocated percpu data. And we use it and remove the "->rda[NR_CPUS]" array, saving significant storage on systems with large numbers of CPUs. This does add an additional level of indirection and thus an additional cache line referenced, but because ->rda is not used on the read side, this is OK. Signed-off-by: Lai Jiangshan Reviewed-by: Tejun Heo Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- kernel/rcutree.c | 42 +++++++++++++++--------------------------- kernel/rcutree.h | 2 +- kernel/rcutree_plugin.h | 4 ++-- kernel/rcutree_trace.c | 2 +- 4 files changed, 19 insertions(+), 31 deletions(-) (limited to 'kernel') diff --git a/kernel/rcutree.c b/kernel/rcutree.c index d5bc43976c5..5b1c3c231ba 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -712,7 +712,7 @@ static void rcu_start_gp(struct rcu_state *rsp, unsigned long flags) __releases(rcu_get_root(rsp)->lock) { - struct rcu_data *rdp = rsp->rda[smp_processor_id()]; + struct rcu_data *rdp = this_cpu_ptr(rsp->rda); struct rcu_node *rnp = rcu_get_root(rsp); if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) { @@ -960,7 +960,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) { int i; - struct rcu_data *rdp = rsp->rda[smp_processor_id()]; + struct rcu_data *rdp = this_cpu_ptr(rsp->rda); if (rdp->nxtlist == NULL) return; /* irqs disabled, so comparison is stable. */ @@ -984,7 +984,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) struct rcu_data *rdp; raw_spin_lock_irqsave(&rsp->onofflock, flags); - rdp = rsp->rda[smp_processor_id()]; + rdp = this_cpu_ptr(rsp->rda); if (rsp->orphan_cbs_list == NULL) { raw_spin_unlock_irqrestore(&rsp->onofflock, flags); return; @@ -1007,7 +1007,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) unsigned long flags; unsigned long mask; int need_report = 0; - struct rcu_data *rdp = rsp->rda[cpu]; + struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp; /* Exclude any attempts to start a new grace period. */ @@ -1226,7 +1226,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) cpu = rnp->grplo; bit = 1; for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { - if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) + if ((rnp->qsmask & bit) != 0 && + f(per_cpu_ptr(rsp->rda, cpu))) mask |= bit; } if (mask != 0) { @@ -1402,7 +1403,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), * a quiescent state betweentimes. */ local_irq_save(flags); - rdp = rsp->rda[smp_processor_id()]; + rdp = this_cpu_ptr(rsp->rda); rcu_process_gp_end(rsp, rdp); check_for_new_grace_period(rsp, rdp); @@ -1701,7 +1702,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) { unsigned long flags; int i; - struct rcu_data *rdp = rsp->rda[cpu]; + struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp = rcu_get_root(rsp); /* Set up local state, ensuring consistent view of global state. */ @@ -1729,7 +1730,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) { unsigned long flags; unsigned long mask; - struct rcu_data *rdp = rsp->rda[cpu]; + struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp = rcu_get_root(rsp); /* Set up local state, ensuring consistent view of global state. */ @@ -1865,7 +1866,8 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) /* * Helper function for rcu_init() that initializes one rcu_state structure. */ -static void __init rcu_init_one(struct rcu_state *rsp) +static void __init rcu_init_one(struct rcu_state *rsp, + struct rcu_data __percpu *rda) { static char *buf[] = { "rcu_node_level_0", "rcu_node_level_1", @@ -1918,37 +1920,23 @@ static void __init rcu_init_one(struct rcu_state *rsp) } } + rsp->rda = rda; rnp = rsp->level[NUM_RCU_LVLS - 1]; for_each_possible_cpu(i) { while (i > rnp->grphi) rnp++; - rsp->rda[i]->mynode = rnp; + per_cpu_ptr(rsp->rda, i)->mynode = rnp; rcu_boot_init_percpu_data(i, rsp); } } -/* - * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used - * nowhere else! Assigns leaf node pointers into each CPU's rcu_data - * structure. - */ -#define RCU_INIT_FLAVOR(rsp, rcu_data) \ -do { \ - int i; \ - \ - for_each_possible_cpu(i) { \ - (rsp)->rda[i] = &per_cpu(rcu_data, i); \ - } \ - rcu_init_one(rsp); \ -} while (0) - void __init rcu_init(void) { int cpu; rcu_bootup_announce(); - RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); - RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); + rcu_init_one(&rcu_sched_state, &rcu_sched_data); + rcu_init_one(&rcu_bh_state, &rcu_bh_data); __rcu_init_preempt(); open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 14c040b18ed..5ce197e8779 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -283,7 +283,7 @@ struct rcu_state { struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ - struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ + struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ /* The following fields are guarded by the root rcu_node's lock. */ diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 0e4f420245d..9906f85c778 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -154,7 +154,7 @@ static void rcu_preempt_note_context_switch(int cpu) (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { /* Possibly blocking in an RCU read-side critical section. */ - rdp = rcu_preempt_state.rda[cpu]; + rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); rnp = rdp->mynode; raw_spin_lock_irqsave(&rnp->lock, flags); t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; @@ -771,7 +771,7 @@ static void rcu_preempt_send_cbs_to_orphanage(void) */ static void __init __rcu_init_preempt(void) { - RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); + rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); } /* diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 36c95b45738..458e032a3a3 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c @@ -262,7 +262,7 @@ static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp) struct rcu_data *rdp; for_each_possible_cpu(cpu) { - rdp = rsp->rda[cpu]; + rdp = per_cpu_ptr(rsp->rda, cpu); if (rdp->beenonline) print_one_rcu_pending(m, rdp); } -- cgit v1.2.3-70-g09d2 From 4221a9918e38b7494cee341dda7b7b4bb8c04bde Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sat, 26 Jun 2010 01:08:19 +0900 Subject: Add RCU check for find_task_by_vpid(). find_task_by_vpid() says "Must be called under rcu_read_lock().". But due to commit 3120438 "rcu: Disable lockdep checking in RCU list-traversal primitives", we are currently unable to catch "find_task_by_vpid() with tasklist_lock held but RCU lock not held" errors due to the RCU-lockdep checks being suppressed in the RCU variants of the struct list_head traversals. This commit therefore places an explicit check for being in an RCU read-side critical section in find_task_by_pid_ns(). =================================================== [ INFO: suspicious rcu_dereference_check() usage. ] --------------------------------------------------- kernel/pid.c:386 invoked rcu_dereference_check() without protection! other info that might help us debug this: rcu_scheduler_active = 1, debug_locks = 1 1 lock held by rc.sysinit/1102: #0: (tasklist_lock){.+.+..}, at: [] sys_setpgid+0x40/0x160 stack backtrace: Pid: 1102, comm: rc.sysinit Not tainted 2.6.35-rc3-dirty #1 Call Trace: [] lockdep_rcu_dereference+0x94/0xb0 [] find_task_by_pid_ns+0x6d/0x70 [] find_task_by_vpid+0x18/0x20 [] sys_setpgid+0x47/0x160 [] sysenter_do_call+0x12/0x36 Commit updated to use a new rcu_lockdep_assert() exported API rather than the old internal __do_rcu_dereference(). Signed-off-by: Tetsuo Handa Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcupdate.h | 14 +++++++++----- kernel/pid.c | 1 + 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index b973dea2d6b..b124bc6a75a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -215,7 +215,11 @@ static inline int rcu_read_lock_sched_held(void) extern int rcu_my_thread_group_empty(void); -#define __do_rcu_dereference_check(c) \ +/** + * rcu_lockdep_assert - emit lockdep splat if specified condition not met + * @c: condition to check + */ +#define rcu_lockdep_assert(c) \ do { \ static bool __warned; \ if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ @@ -226,7 +230,7 @@ extern int rcu_my_thread_group_empty(void); #else /* #ifdef CONFIG_PROVE_RCU */ -#define __do_rcu_dereference_check(c) do { } while (0) +#define rcu_lockdep_assert(c) do { } while (0) #endif /* #else #ifdef CONFIG_PROVE_RCU */ @@ -247,14 +251,14 @@ extern int rcu_my_thread_group_empty(void); #define __rcu_dereference_check(p, c, space) \ ({ \ typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ - __do_rcu_dereference_check(c); \ + rcu_lockdep_assert(c); \ (void) (((typeof (*p) space *)p) == p); \ smp_read_barrier_depends(); \ ((typeof(*p) __force __kernel *)(_________p1)); \ }) #define __rcu_dereference_protected(p, c, space) \ ({ \ - __do_rcu_dereference_check(c); \ + rcu_lockdep_assert(c); \ (void) (((typeof (*p) space *)p) == p); \ ((typeof(*p) __force __kernel *)(p)); \ }) @@ -262,7 +266,7 @@ extern int rcu_my_thread_group_empty(void); #define __rcu_dereference_index_check(p, c) \ ({ \ typeof(p) _________p1 = ACCESS_ONCE(p); \ - __do_rcu_dereference_check(c); \ + rcu_lockdep_assert(c); \ smp_read_barrier_depends(); \ (_________p1); \ }) diff --git a/kernel/pid.c b/kernel/pid.c index 0f90c2f713f..39b65b69584 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -416,6 +416,7 @@ EXPORT_SYMBOL(pid_task); */ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) { + rcu_lockdep_assert(rcu_read_lock_held()); return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); } -- cgit v1.2.3-70-g09d2 From b163760e37047781b37c412cde54d146ac4b651f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 2 Jun 2010 16:21:38 -0700 Subject: rcu: make CPU stall warning timeout configurable Also set the default to 60 seconds, up from the previous hard-coded timeout of 10 seconds. This allows people who care to set short timeouts, while avoiding people with unusual configurations (make randconfig!!!) from being bothered with spurious CPU stall warnings. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- kernel/rcutree.h | 5 +++-- lib/Kconfig.debug | 11 +++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 5ce197e8779..183ebf40531 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -254,9 +254,10 @@ struct rcu_data { #define RCU_STALL_DELAY_DELTA 0 #endif -#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ + RCU_STALL_DELAY_DELTA) +#define RCU_SECONDS_TILL_STALL_CHECK (CONFIG_RCU_CPU_STALL_TIMEOUT * HZ + \ + RCU_STALL_DELAY_DELTA) /* for rsp->jiffies_stall */ -#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA) +#define RCU_SECONDS_TILL_STALL_RECHECK (3 * RCU_SECONDS_TILL_STALL_CHECK + 30) /* for rsp->jiffies_stall */ #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ /* to take at least one */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 12465f2ef76..b5005b76210 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -845,6 +845,17 @@ config RCU_CPU_STALL_DETECTOR Say Y if you are unsure. +config RCU_CPU_STALL_TIMEOUT + int "RCU CPU stall timeout in seconds" + depends on RCU_CPU_STALL_DETECTOR + range 3 300 + default 60 + help + If a given RCU grace period extends more than the specified + number of seconds, a CPU stall warning is printed. If the + RCU grace period persists, additional CPU stall warnings are + printed at more widely spaced intervals. + config RCU_CPU_STALL_VERBOSE bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU -- cgit v1.2.3-70-g09d2 From 742734eea0cf5314cde5945963ed964be167bd84 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 30 Jun 2010 11:43:52 -0700 Subject: rcu: add boot parameter to suppress RCU CPU stall warning messages Although the RCU CPU stall warning messages are a very good way to alert people to a problem, once alerted, it is sometimes helpful to shut them off in order to avoid obscuring other messages that might be being used to track down the problem. Although you can rebuild the kernel with CONFIG_RCU_CPU_STALL_DETECTOR=n, this is sometimes inconvenient. This commit therefore adds a boot parameter named "rcu_cpu_stall_suppress" that shuts these messages off without requiring a rebuild (though a reboot might be needed for those not brave enough to patch their kernel while it is running). This message-suppression was already in place for the panic case, so this commit need only rename the variable and export it via module_param(). Signed-off-by: Paul E. McKenney --- kernel/rcutree.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 5b1c3c231ba..f3d5906cbc2 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -143,6 +143,11 @@ module_param(blimit, int, 0); module_param(qhimark, int, 0); module_param(qlowmark, int, 0); +#ifdef CONFIG_RCU_CPU_STALL_DETECTOR +int rcu_cpu_stall_suppress __read_mostly; +module_param(rcu_cpu_stall_suppress, int, 0); +#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ + static void force_quiescent_state(struct rcu_state *rsp, int relaxed); static int rcu_pending(int cpu); @@ -450,7 +455,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) #ifdef CONFIG_RCU_CPU_STALL_DETECTOR -int rcu_cpu_stall_panicking __read_mostly; +int rcu_cpu_stall_suppress __read_mostly; static void record_gp_stall_check_time(struct rcu_state *rsp) { @@ -530,7 +535,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) long delta; struct rcu_node *rnp; - if (rcu_cpu_stall_panicking) + if (rcu_cpu_stall_suppress) return; delta = jiffies - rsp->jiffies_stall; rnp = rdp->mynode; @@ -548,7 +553,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) { - rcu_cpu_stall_panicking = 1; + rcu_cpu_stall_suppress = 1; return NOTIFY_DONE; } -- cgit v1.2.3-70-g09d2 From 77d8485a8b5416c615b6acd95f01bfcacd7d81ff Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 8 Jul 2010 17:38:59 -0700 Subject: rcu: improve kerneldoc for rcu_read_lock(), call_rcu(), and synchronize_rcu() Make it explicit that new RCU read-side critical sections that start after call_rcu() and synchronize_rcu() start might still be running after the end of the relevant grace period. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcupdate.h | 16 +++++++++------- kernel/rcutree_plugin.h | 8 +++++--- 2 files changed, 14 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index b124bc6a75a..3e1b6625553 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -450,7 +450,7 @@ extern int rcu_my_thread_group_empty(void); * until after the all the other CPUs exit their critical sections. * * Note, however, that RCU callbacks are permitted to run concurrently - * with RCU read-side critical sections. One way that this can happen + * with new RCU read-side critical sections. One way that this can happen * is via the following sequence of events: (1) CPU 0 enters an RCU * read-side critical section, (2) CPU 1 invokes call_rcu() to register * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, @@ -608,11 +608,13 @@ extern void wakeme_after_rcu(struct rcu_head *head); /** * call_rcu() - Queue an RCU callback for invocation after a grace period. * @head: structure to be used for queueing the RCU updates. - * @func: actual update function to be invoked after the grace period + * @func: actual callback function to be invoked after the grace period * - * The update function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU - * read-side critical sections have completed. RCU read-side critical + * The callback function will be invoked some time after a full grace + * period elapses, in other words after all pre-existing RCU read-side + * critical sections have completed. However, the callback function + * might well execute concurrently with RCU read-side critical sections + * that started after call_rcu() was invoked. RCU read-side critical * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. */ @@ -622,9 +624,9 @@ extern void call_rcu(struct rcu_head *head, /** * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. - * @func: actual update function to be invoked after the grace period + * @func: actual callback function to be invoked after the grace period * - * The update function will be invoked some time after a full grace + * The callback function will be invoked some time after a full grace * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. call_rcu_bh() assumes * that the read-side critical sections end on completion of a softirq diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 9906f85c778..63bb7714fde 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -546,9 +546,11 @@ EXPORT_SYMBOL_GPL(call_rcu); * * Control will return to the caller some time after a full grace * period has elapsed, in other words after all currently executing RCU - * read-side critical sections have completed. RCU read-side critical - * sections are delimited by rcu_read_lock() and rcu_read_unlock(), - * and may be nested. + * read-side critical sections have completed. Note, however, that + * upon return from synchronize_rcu(), the caller might well be executing + * concurrently with new RCU read-side critical sections that began while + * synchronize_rcu() was waiting. RCU read-side critical sections are + * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. */ void synchronize_rcu(void) { -- cgit v1.2.3-70-g09d2 From f2e0dd7090eddef427ab9d9f81de122244cded51 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 14 Jul 2010 14:38:30 -0700 Subject: rcu: allow RCU CPU stall warning messages to be controlled in /sys Set the permissions of the rcu_cpu_stall_suppress to 644 to enable RCU CPU stall warnings to be enabled and disabled at runtime via sysfs. Suggested-by: Josh Triplett Signed-off-by: Paul E. McKenney --- kernel/rcutree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcutree.c b/kernel/rcutree.c index f3d5906cbc2..5d910beefff 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -145,7 +145,7 @@ module_param(qlowmark, int, 0); #ifdef CONFIG_RCU_CPU_STALL_DETECTOR int rcu_cpu_stall_suppress __read_mostly; -module_param(rcu_cpu_stall_suppress, int, 0); +module_param(rcu_cpu_stall_suppress, int, 0644); #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ static void force_quiescent_state(struct rcu_state *rsp, int relaxed); -- cgit v1.2.3-70-g09d2 From 910b1b7e19a292ff685001caf1bf1a9775b771a1 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 21 Jul 2010 08:05:56 -0700 Subject: rcu: Allow RCU CPU stall warnings to be off at boot, but manually enablable Currently, if RCU CPU stall warnings are enabled, they are enabled immediately upon boot. They can be manually disabled via /sys (and also re-enabled via /sys), and are automatically disabled upon panic. However, some users need RCU CPU stalls to be disabled at boot time, but to be enabled without rebuilding/rebooting. For example, someone running a real-time application in production might not want the additional latency of RCU CPU stall detection in normal operation, but might need to enable it at any point for fault isolation purposes. This commit therefore provides a new CONFIG_RCU_CPU_STALL_DETECTOR_RUNNABLE kernel configuration parameter that maintains the current behavior (enable at boot) by default, but allows a kernel to be configured with RCU CPU stall detection built into the kernel, but disabled at boot time. Requested-by: Clark Williams Requested-by: John Kacur Signed-off-by: Paul E. McKenney --- kernel/rcutree.c | 2 +- kernel/rcutree.h | 6 ++++++ lib/Kconfig.debug | 13 +++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 5d910beefff..5aab7dabd0d 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -144,7 +144,7 @@ module_param(qhimark, int, 0); module_param(qlowmark, int, 0); #ifdef CONFIG_RCU_CPU_STALL_DETECTOR -int rcu_cpu_stall_suppress __read_mostly; +int rcu_cpu_stall_suppress __read_mostly = RCU_CPU_STALL_SUPPRESS_INIT; module_param(rcu_cpu_stall_suppress, int, 0644); #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 183ebf40531..bb4d08695c4 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -264,6 +264,12 @@ struct rcu_data { /* scheduling clock irq */ /* before ratting on them. */ +#ifdef CONFIG_RCU_CPU_STALL_DETECTOR_RUNNABLE +#define RCU_CPU_STALL_SUPPRESS_INIT 0 +#else +#define RCU_CPU_STALL_SUPPRESS_INIT 1 +#endif + #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b5005b76210..52c2172dff1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -856,6 +856,19 @@ config RCU_CPU_STALL_TIMEOUT RCU grace period persists, additional CPU stall warnings are printed at more widely spaced intervals. +config RCU_CPU_STALL_DETECTOR_RUNNABLE + bool "RCU CPU stall checking starts automatically at boot" + depends on RCU_CPU_STALL_DETECTOR + default y + help + If set, start checking for RCU CPU stalls immediately on + boot. Otherwise, RCU CPU stall checking must be manually + enabled. + + Say Y if you are unsure. + + Say N if you wish to suppress RCU CPU stall checking during boot. + config RCU_CPU_STALL_VERBOSE bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU -- cgit v1.2.3-70-g09d2 From 277b199800ac90811ac86d215063df1984f51619 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Fri, 20 Aug 2010 11:03:51 +0800 Subject: lockup_detector: Make callback function static watchdog_overflow_callback() is only used in kernel/watchdog.c. Signed-off-by: Lin Ming Cc: Peter Zijlstra Cc: Don Zickus LKML-Reference: <1282273431.16443.32.camel@minggr.sh.intel.com> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 613bc1f0461..b60e2a869bb 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -202,7 +202,7 @@ static struct perf_event_attr wd_hw_attr = { }; /* Callback function for perf event subsystem */ -void watchdog_overflow_callback(struct perf_event *event, int nmi, +static void watchdog_overflow_callback(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { -- cgit v1.2.3-70-g09d2 From a57eb940d130477a799dfb24a570ee04979c0f7f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 29 Jun 2010 16:49:16 -0700 Subject: rcu: Add a TINY_PREEMPT_RCU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement a small-memory-footprint uniprocessor-only implementation of preemptible RCU. This implementation uses but a single blocked-tasks list rather than the combinatorial number used per leaf rcu_node by TREE_PREEMPT_RCU, which reduces memory consumption and greatly simplifies processing. This version also takes advantage of uniprocessor execution to accelerate grace periods in the case where there are no readers. The general design is otherwise broadly similar to that of TREE_PREEMPT_RCU. This implementation is a step towards having RCU implementation driven off of the SMP and PREEMPT kernel configuration variables, which can happen once this implementation has accumulated sufficient experience. Removed ACCESS_ONCE() from __rcu_read_unlock() and added barrier() as suggested by Steve Rostedt in order to avoid the compiler-reordering issue noted by Mathieu Desnoyers (http://lkml.org/lkml/2010/8/16/183). As can be seen below, CONFIG_TINY_PREEMPT_RCU represents almost 5Kbyte savings compared to CONFIG_TREE_PREEMPT_RCU. Of course, for non-real-time workloads, CONFIG_TINY_RCU is even better. CONFIG_TREE_PREEMPT_RCU text data bss dec filename 13 0 0 13 kernel/rcupdate.o 6170 825 28 7023 kernel/rcutree.o ---- 7026 Total CONFIG_TINY_PREEMPT_RCU text data bss dec filename 13 0 0 13 kernel/rcupdate.o 2081 81 8 2170 kernel/rcutiny.o ---- 2183 Total CONFIG_TINY_RCU (non-preemptible) text data bss dec filename 13 0 0 13 kernel/rcupdate.o 719 25 0 744 kernel/rcutiny.o --- 757 Total Requested-by: Loïc Minier Signed-off-by: Paul E. McKenney --- include/linux/hardirq.h | 2 +- include/linux/init_task.h | 10 +- include/linux/rcupdate.h | 3 +- include/linux/rcutiny.h | 126 +++++++--- include/linux/rcutree.h | 2 + include/linux/sched.h | 10 +- init/Kconfig | 16 +- kernel/Makefile | 1 + kernel/rcutiny.c | 33 ++- kernel/rcutiny_plugin.h | 582 +++++++++++++++++++++++++++++++++++++++++++++- 10 files changed, 717 insertions(+), 68 deletions(-) (limited to 'kernel') diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index d5b387669da..1f4517d55b1 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -139,7 +139,7 @@ static inline void account_system_vtime(struct task_struct *tsk) #endif #if defined(CONFIG_NO_HZ) -#if defined(CONFIG_TINY_RCU) +#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) extern void rcu_enter_nohz(void); extern void rcu_exit_nohz(void); diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 6460fc65ed6..2fea6c8ef6b 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -82,11 +82,17 @@ extern struct group_info init_groups; # define CAP_INIT_BSET CAP_FULL_SET #ifdef CONFIG_TREE_PREEMPT_RCU +#define INIT_TASK_RCU_TREE_PREEMPT() \ + .rcu_blocked_node = NULL, +#else +#define INIT_TASK_RCU_TREE_PREEMPT(tsk) +#endif +#ifdef CONFIG_PREEMPT_RCU #define INIT_TASK_RCU_PREEMPT(tsk) \ .rcu_read_lock_nesting = 0, \ .rcu_read_unlock_special = 0, \ - .rcu_blocked_node = NULL, \ - .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), + .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ + INIT_TASK_RCU_TREE_PREEMPT() #else #define INIT_TASK_RCU_PREEMPT(tsk) #endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 27b44b3e302..24b89664938 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -58,7 +58,6 @@ struct rcu_head { }; /* Exported common interfaces */ -extern void rcu_barrier(void); extern void rcu_barrier_bh(void); extern void rcu_barrier_sched(void); extern void synchronize_sched_expedited(void); @@ -69,7 +68,7 @@ extern void rcu_init(void); #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) #include -#elif defined(CONFIG_TINY_RCU) +#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) #include #else #error "Unknown RCU implementation specified to kernel configuration" diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index e2e893144a8..4cc5eba4161 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -29,66 +29,51 @@ void rcu_sched_qs(int cpu); void rcu_bh_qs(int cpu); -static inline void rcu_note_context_switch(int cpu) -{ - rcu_sched_qs(cpu); -} +#ifdef CONFIG_TINY_RCU #define __rcu_read_lock() preempt_disable() #define __rcu_read_unlock() preempt_enable() +#else /* #ifdef CONFIG_TINY_RCU */ +void __rcu_read_lock(void); +void __rcu_read_unlock(void); +#endif /* #else #ifdef CONFIG_TINY_RCU */ #define __rcu_read_lock_bh() local_bh_disable() #define __rcu_read_unlock_bh() local_bh_enable() -#define call_rcu_sched call_rcu +extern void call_rcu_sched(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)); #define rcu_init_sched() do { } while (0) -extern void rcu_check_callbacks(int cpu, int user); -static inline int rcu_needs_cpu(int cpu) -{ - return 0; -} +extern void synchronize_sched(void); -/* - * Return the number of grace periods. - */ -static inline long rcu_batches_completed(void) -{ - return 0; -} +#ifdef CONFIG_TINY_RCU -/* - * Return the number of bottom-half grace periods. - */ -static inline long rcu_batches_completed_bh(void) -{ - return 0; -} +#define call_rcu call_rcu_sched -static inline void rcu_force_quiescent_state(void) +static inline void synchronize_rcu(void) { + synchronize_sched(); } -static inline void rcu_bh_force_quiescent_state(void) +static inline void synchronize_rcu_expedited(void) { + synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ } -static inline void rcu_sched_force_quiescent_state(void) +static inline void rcu_barrier(void) { + rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ } -extern void synchronize_sched(void); +#else /* #ifdef CONFIG_TINY_RCU */ -static inline void synchronize_rcu(void) -{ - synchronize_sched(); -} +void synchronize_rcu(void); +void rcu_barrier(void); +void synchronize_rcu_expedited(void); -static inline void synchronize_rcu_bh(void) -{ - synchronize_sched(); -} +#endif /* #else #ifdef CONFIG_TINY_RCU */ -static inline void synchronize_rcu_expedited(void) +static inline void synchronize_rcu_bh(void) { synchronize_sched(); } @@ -117,15 +102,82 @@ static inline void rcu_exit_nohz(void) #endif /* #else #ifdef CONFIG_NO_HZ */ +#ifdef CONFIG_TINY_RCU + +static inline void rcu_preempt_note_context_switch(void) +{ +} + static inline void exit_rcu(void) { } +static inline int rcu_needs_cpu(int cpu) +{ + return 0; +} + static inline int rcu_preempt_depth(void) { return 0; } +#else /* #ifdef CONFIG_TINY_RCU */ + +void rcu_preempt_note_context_switch(void); +extern void exit_rcu(void); +int rcu_preempt_needs_cpu(void); + +static inline int rcu_needs_cpu(int cpu) +{ + return rcu_preempt_needs_cpu(); +} + +/* + * Defined as macro as it is a very low level header + * included from areas that don't even know about current + * FIXME: combine with include/linux/rcutree.h into rcupdate.h. + */ +#define rcu_preempt_depth() (current->rcu_read_lock_nesting) + +#endif /* #else #ifdef CONFIG_TINY_RCU */ + +static inline void rcu_note_context_switch(int cpu) +{ + rcu_sched_qs(cpu); + rcu_preempt_note_context_switch(); +} + +extern void rcu_check_callbacks(int cpu, int user); + +/* + * Return the number of grace periods. + */ +static inline long rcu_batches_completed(void) +{ + return 0; +} + +/* + * Return the number of bottom-half grace periods. + */ +static inline long rcu_batches_completed_bh(void) +{ + return 0; +} + +static inline void rcu_force_quiescent_state(void) +{ +} + +static inline void rcu_bh_force_quiescent_state(void) +{ +} + +static inline void rcu_sched_force_quiescent_state(void) +{ +} + #ifdef CONFIG_DEBUG_LOCK_ALLOC extern int rcu_scheduler_active __read_mostly; diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index c0ed1c056f2..c13b85dd22b 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -95,6 +95,8 @@ static inline void synchronize_rcu_bh_expedited(void) synchronize_sched_expedited(); } +extern void rcu_barrier(void); + extern void rcu_check_callbacks(int cpu, int user); extern long rcu_batches_completed(void); diff --git a/include/linux/sched.h b/include/linux/sched.h index 2c756666c11..e18473f0eb7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1202,11 +1202,13 @@ struct task_struct { unsigned int policy; cpumask_t cpus_allowed; -#ifdef CONFIG_TREE_PREEMPT_RCU +#ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; char rcu_read_unlock_special; - struct rcu_node *rcu_blocked_node; struct list_head rcu_node_entry; +#endif /* #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_TREE_PREEMPT_RCU + struct rcu_node *rcu_blocked_node; #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) @@ -1740,7 +1742,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) #define used_math() tsk_used_math(current) -#ifdef CONFIG_TREE_PREEMPT_RCU +#ifdef CONFIG_PREEMPT_RCU #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ @@ -1749,7 +1751,9 @@ static inline void rcu_copy_process(struct task_struct *p) { p->rcu_read_lock_nesting = 0; p->rcu_read_unlock_special = 0; +#ifdef CONFIG_TREE_PREEMPT_RCU p->rcu_blocked_node = NULL; +#endif INIT_LIST_HEAD(&p->rcu_node_entry); } diff --git a/init/Kconfig b/init/Kconfig index dbc08baad77..a619a1ac7f4 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -348,7 +348,7 @@ config TREE_RCU smaller systems. config TREE_PREEMPT_RCU - bool "Preemptable tree-based hierarchical RCU" + bool "Preemptible tree-based hierarchical RCU" depends on PREEMPT help This option selects the RCU implementation that is @@ -366,8 +366,22 @@ config TINY_RCU is not required. This option greatly reduces the memory footprint of RCU. +config TINY_PREEMPT_RCU + bool "Preemptible UP-only small-memory-footprint RCU" + depends on !SMP && PREEMPT + help + This option selects the RCU implementation that is designed + for real-time UP systems. This option greatly reduces the + memory footprint of RCU. + endchoice +config PREEMPT_RCU + def_bool ( TREE_PREEMPT_RCU || TINY_PREEMPT_RCU ) + help + This option enables preemptible-RCU code that is common between + the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations. + config RCU_TRACE bool "Enable tracing for RCU" depends on TREE_RCU || TREE_PREEMPT_RCU diff --git a/kernel/Makefile b/kernel/Makefile index 0b72d1a74be..17046b6e7c9 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -86,6 +86,7 @@ obj-$(CONFIG_TREE_RCU) += rcutree.o obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o obj-$(CONFIG_TINY_RCU) += rcutiny.o +obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_SYSCTL) += utsname_sysctl.o obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 196ec02f8be..d806735342a 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -59,6 +59,14 @@ int rcu_scheduler_active __read_mostly; EXPORT_SYMBOL_GPL(rcu_scheduler_active); #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +/* Forward declarations for rcutiny_plugin.h. */ +static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); +static void __call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *rcu), + struct rcu_ctrlblk *rcp); + +#include "rcutiny_plugin.h" + #ifdef CONFIG_NO_HZ static long rcu_dynticks_nesting = 1; @@ -140,6 +148,7 @@ void rcu_check_callbacks(int cpu, int user) rcu_sched_qs(cpu); else if (!in_softirq()) rcu_bh_qs(cpu); + rcu_preempt_check_callbacks(); } /* @@ -162,6 +171,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) *rcp->donetail = NULL; if (rcp->curtail == rcp->donetail) rcp->curtail = &rcp->rcucblist; + rcu_preempt_remove_callbacks(rcp); rcp->donetail = &rcp->rcucblist; local_irq_restore(flags); @@ -182,6 +192,7 @@ static void rcu_process_callbacks(struct softirq_action *unused) { __rcu_process_callbacks(&rcu_sched_ctrlblk); __rcu_process_callbacks(&rcu_bh_ctrlblk); + rcu_preempt_process_callbacks(); } /* @@ -223,15 +234,15 @@ static void __call_rcu(struct rcu_head *head, } /* - * Post an RCU callback to be invoked after the end of an RCU grace + * Post an RCU callback to be invoked after the end of an RCU-sched grace * period. But since we have but one CPU, that would be after any * quiescent state. */ -void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) +void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { __call_rcu(head, func, &rcu_sched_ctrlblk); } -EXPORT_SYMBOL_GPL(call_rcu); +EXPORT_SYMBOL_GPL(call_rcu_sched); /* * Post an RCU bottom-half callback to be invoked after any subsequent @@ -243,20 +254,6 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) } EXPORT_SYMBOL_GPL(call_rcu_bh); -void rcu_barrier(void) -{ - struct rcu_synchronize rcu; - - init_rcu_head_on_stack(&rcu.head); - init_completion(&rcu.completion); - /* Will wake me after RCU finished. */ - call_rcu(&rcu.head, wakeme_after_rcu); - /* Wait for it. */ - wait_for_completion(&rcu.completion); - destroy_rcu_head_on_stack(&rcu.head); -} -EXPORT_SYMBOL_GPL(rcu_barrier); - void rcu_barrier_bh(void) { struct rcu_synchronize rcu; @@ -289,5 +286,3 @@ void __init rcu_init(void) { open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); } - -#include "rcutiny_plugin.h" diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index d223a92bc74..e6bc1b447c6 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h @@ -1,7 +1,7 @@ /* - * Read-Copy Update mechanism for mutual exclusion (tree-based version) + * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition * Internal non-public definitions that provide either classic - * or preemptable semantics. + * or preemptible semantics. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,11 +17,587 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * Copyright IBM Corporation, 2009 + * Copyright (c) 2010 Linaro * * Author: Paul E. McKenney */ +#ifdef CONFIG_TINY_PREEMPT_RCU + +#include + +/* FIXME: merge with definitions in kernel/rcutree.h. */ +#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) +#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) + +/* Global control variables for preemptible RCU. */ +struct rcu_preempt_ctrlblk { + struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */ + struct rcu_head **nexttail; + /* Tasks blocked in a preemptible RCU */ + /* read-side critical section while an */ + /* preemptible-RCU grace period is in */ + /* progress must wait for a later grace */ + /* period. This pointer points to the */ + /* ->next pointer of the last task that */ + /* must wait for a later grace period, or */ + /* to &->rcb.rcucblist if there is no */ + /* such task. */ + struct list_head blkd_tasks; + /* Tasks blocked in RCU read-side critical */ + /* section. Tasks are placed at the head */ + /* of this list and age towards the tail. */ + struct list_head *gp_tasks; + /* Pointer to the first task blocking the */ + /* current grace period, or NULL if there */ + /* is not such task. */ + struct list_head *exp_tasks; + /* Pointer to first task blocking the */ + /* current expedited grace period, or NULL */ + /* if there is no such task. If there */ + /* is no current expedited grace period, */ + /* then there cannot be any such task. */ + u8 gpnum; /* Current grace period. */ + u8 gpcpu; /* Last grace period blocked by the CPU. */ + u8 completed; /* Last grace period completed. */ + /* If all three are equal, RCU is idle. */ +}; + +static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { + .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist, + .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist, + .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist, + .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks), +}; + +static int rcu_preempted_readers_exp(void); +static void rcu_report_exp_done(void); + +/* + * Return true if the CPU has not yet responded to the current grace period. + */ +static int rcu_cpu_cur_gp(void) +{ + return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum; +} + +/* + * Check for a running RCU reader. Because there is only one CPU, + * there can be but one running RCU reader at a time. ;-) + */ +static int rcu_preempt_running_reader(void) +{ + return current->rcu_read_lock_nesting; +} + +/* + * Check for preempted RCU readers blocking any grace period. + * If the caller needs a reliable answer, it must disable hard irqs. + */ +static int rcu_preempt_blocked_readers_any(void) +{ + return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks); +} + +/* + * Check for preempted RCU readers blocking the current grace period. + * If the caller needs a reliable answer, it must disable hard irqs. + */ +static int rcu_preempt_blocked_readers_cgp(void) +{ + return rcu_preempt_ctrlblk.gp_tasks != NULL; +} + +/* + * Return true if another preemptible-RCU grace period is needed. + */ +static int rcu_preempt_needs_another_gp(void) +{ + return *rcu_preempt_ctrlblk.rcb.curtail != NULL; +} + +/* + * Return true if a preemptible-RCU grace period is in progress. + * The caller must disable hardirqs. + */ +static int rcu_preempt_gp_in_progress(void) +{ + return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum; +} + +/* + * Record a preemptible-RCU quiescent state for the specified CPU. Note + * that this just means that the task currently running on the CPU is + * in a quiescent state. There might be any number of tasks blocked + * while in an RCU read-side critical section. + * + * Unlike the other rcu_*_qs() functions, callers to this function + * must disable irqs in order to protect the assignment to + * ->rcu_read_unlock_special. + * + * Because this is a single-CPU implementation, the only way a grace + * period can end is if the CPU is in a quiescent state. The reason is + * that a blocked preemptible-RCU reader can exit its critical section + * only if the CPU is running it at the time. Therefore, when the + * last task blocking the current grace period exits its RCU read-side + * critical section, neither the CPU nor blocked tasks will be stopping + * the current grace period. (In contrast, SMP implementations + * might have CPUs running in RCU read-side critical sections that + * block later grace periods -- but this is not possible given only + * one CPU.) + */ +static void rcu_preempt_cpu_qs(void) +{ + /* Record both CPU and task as having responded to current GP. */ + rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum; + current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; + + /* + * If there is no GP, or if blocked readers are still blocking GP, + * then there is nothing more to do. + */ + if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp()) + return; + + /* Advance callbacks. */ + rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum; + rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail; + rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail; + + /* If there are no blocked readers, next GP is done instantly. */ + if (!rcu_preempt_blocked_readers_any()) + rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail; + + /* If there are done callbacks, make RCU_SOFTIRQ process them. */ + if (*rcu_preempt_ctrlblk.rcb.donetail != NULL) + raise_softirq(RCU_SOFTIRQ); +} + +/* + * Start a new RCU grace period if warranted. Hard irqs must be disabled. + */ +static void rcu_preempt_start_gp(void) +{ + if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) { + + /* Official start of GP. */ + rcu_preempt_ctrlblk.gpnum++; + + /* Any blocked RCU readers block new GP. */ + if (rcu_preempt_blocked_readers_any()) + rcu_preempt_ctrlblk.gp_tasks = + rcu_preempt_ctrlblk.blkd_tasks.next; + + /* If there is no running reader, CPU is done with GP. */ + if (!rcu_preempt_running_reader()) + rcu_preempt_cpu_qs(); + } +} + +/* + * We have entered the scheduler, and the current task might soon be + * context-switched away from. If this task is in an RCU read-side + * critical section, we will no longer be able to rely on the CPU to + * record that fact, so we enqueue the task on the blkd_tasks list. + * If the task started after the current grace period began, as recorded + * by ->gpcpu, we enqueue at the beginning of the list. Otherwise + * before the element referenced by ->gp_tasks (or at the tail if + * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element. + * The task will dequeue itself when it exits the outermost enclosing + * RCU read-side critical section. Therefore, the current grace period + * cannot be permitted to complete until the ->gp_tasks pointer becomes + * NULL. + * + * Caller must disable preemption. + */ +void rcu_preempt_note_context_switch(void) +{ + struct task_struct *t = current; + unsigned long flags; + + local_irq_save(flags); /* must exclude scheduler_tick(). */ + if (rcu_preempt_running_reader() && + (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { + + /* Possibly blocking in an RCU read-side critical section. */ + t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; + + /* + * If this CPU has already checked in, then this task + * will hold up the next grace period rather than the + * current grace period. Queue the task accordingly. + * If the task is queued for the current grace period + * (i.e., this CPU has not yet passed through a quiescent + * state for the current grace period), then as long + * as that task remains queued, the current grace period + * cannot end. + */ + list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); + if (rcu_cpu_cur_gp()) + rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; + } + + /* + * Either we were not in an RCU read-side critical section to + * begin with, or we have now recorded that critical section + * globally. Either way, we can now note a quiescent state + * for this CPU. Again, if we were in an RCU read-side critical + * section, and if that critical section was blocking the current + * grace period, then the fact that the task has been enqueued + * means that current grace period continues to be blocked. + */ + rcu_preempt_cpu_qs(); + local_irq_restore(flags); +} + +/* + * Tiny-preemptible RCU implementation for rcu_read_lock(). + * Just increment ->rcu_read_lock_nesting, shared state will be updated + * if we block. + */ +void __rcu_read_lock(void) +{ + current->rcu_read_lock_nesting++; + barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */ +} +EXPORT_SYMBOL_GPL(__rcu_read_lock); + +/* + * Handle special cases during rcu_read_unlock(), such as needing to + * notify RCU core processing or task having blocked during the RCU + * read-side critical section. + */ +static void rcu_read_unlock_special(struct task_struct *t) +{ + int empty; + int empty_exp; + unsigned long flags; + struct list_head *np; + int special; + + /* + * NMI handlers cannot block and cannot safely manipulate state. + * They therefore cannot possibly be special, so just leave. + */ + if (in_nmi()) + return; + + local_irq_save(flags); + + /* + * If RCU core is waiting for this CPU to exit critical section, + * let it know that we have done so. + */ + special = t->rcu_read_unlock_special; + if (special & RCU_READ_UNLOCK_NEED_QS) + rcu_preempt_cpu_qs(); + + /* Hardware IRQ handlers cannot block. */ + if (in_irq()) { + local_irq_restore(flags); + return; + } + + /* Clean up if blocked during RCU read-side critical section. */ + if (special & RCU_READ_UNLOCK_BLOCKED) { + t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; + + /* + * Remove this task from the ->blkd_tasks list and adjust + * any pointers that might have been referencing it. + */ + empty = !rcu_preempt_blocked_readers_cgp(); + empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL; + np = t->rcu_node_entry.next; + if (np == &rcu_preempt_ctrlblk.blkd_tasks) + np = NULL; + list_del(&t->rcu_node_entry); + if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks) + rcu_preempt_ctrlblk.gp_tasks = np; + if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks) + rcu_preempt_ctrlblk.exp_tasks = np; + INIT_LIST_HEAD(&t->rcu_node_entry); + + /* + * If this was the last task on the current list, and if + * we aren't waiting on the CPU, report the quiescent state + * and start a new grace period if needed. + */ + if (!empty && !rcu_preempt_blocked_readers_cgp()) { + rcu_preempt_cpu_qs(); + rcu_preempt_start_gp(); + } + + /* + * If this was the last task on the expedited lists, + * then we need wake up the waiting task. + */ + if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL) + rcu_report_exp_done(); + } + local_irq_restore(flags); +} + +/* + * Tiny-preemptible RCU implementation for rcu_read_unlock(). + * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost + * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then + * invoke rcu_read_unlock_special() to clean up after a context switch + * in an RCU read-side critical section and other special cases. + */ +void __rcu_read_unlock(void) +{ + struct task_struct *t = current; + + barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ + --t->rcu_read_lock_nesting; + barrier(); /* decrement before load of ->rcu_read_unlock_special */ + if (t->rcu_read_lock_nesting == 0 && + unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) + rcu_read_unlock_special(t); +#ifdef CONFIG_PROVE_LOCKING + WARN_ON_ONCE(t->rcu_read_lock_nesting < 0); +#endif /* #ifdef CONFIG_PROVE_LOCKING */ +} +EXPORT_SYMBOL_GPL(__rcu_read_unlock); + +/* + * Check for a quiescent state from the current CPU. When a task blocks, + * the task is recorded in the rcu_preempt_ctrlblk structure, which is + * checked elsewhere. This is called from the scheduling-clock interrupt. + * + * Caller must disable hard irqs. + */ +static void rcu_preempt_check_callbacks(void) +{ + struct task_struct *t = current; + + if (!rcu_preempt_running_reader() && rcu_preempt_gp_in_progress()) + rcu_preempt_cpu_qs(); + if (&rcu_preempt_ctrlblk.rcb.rcucblist != + rcu_preempt_ctrlblk.rcb.donetail) + raise_softirq(RCU_SOFTIRQ); + if (rcu_preempt_gp_in_progress() && rcu_preempt_running_reader()) + t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; +} + +/* + * TINY_PREEMPT_RCU has an extra callback-list tail pointer to + * update, so this is invoked from __rcu_process_callbacks() to + * handle that case. Of course, it is invoked for all flavors of + * RCU, but RCU callbacks can appear only on one of the lists, and + * neither ->nexttail nor ->donetail can possibly be NULL, so there + * is no need for an explicit check. + */ +static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) +{ + if (rcu_preempt_ctrlblk.nexttail == rcp->donetail) + rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist; +} + +/* + * Process callbacks for preemptible RCU. + */ +static void rcu_preempt_process_callbacks(void) +{ + __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb); +} + +/* + * Queue a preemptible -RCU callback for invocation after a grace period. + */ +void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) +{ + unsigned long flags; + + debug_rcu_head_queue(head); + head->func = func; + head->next = NULL; + + local_irq_save(flags); + *rcu_preempt_ctrlblk.nexttail = head; + rcu_preempt_ctrlblk.nexttail = &head->next; + rcu_preempt_start_gp(); /* checks to see if GP needed. */ + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(call_rcu); + +void rcu_barrier(void) +{ + struct rcu_synchronize rcu; + + init_rcu_head_on_stack(&rcu.head); + init_completion(&rcu.completion); + /* Will wake me after RCU finished. */ + call_rcu(&rcu.head, wakeme_after_rcu); + /* Wait for it. */ + wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); +} +EXPORT_SYMBOL_GPL(rcu_barrier); + +/* + * synchronize_rcu - wait until a grace period has elapsed. + * + * Control will return to the caller some time after a full grace + * period has elapsed, in other words after all currently executing RCU + * read-side critical sections have completed. RCU read-side critical + * sections are delimited by rcu_read_lock() and rcu_read_unlock(), + * and may be nested. + */ +void synchronize_rcu(void) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + if (!rcu_scheduler_active) + return; +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + + WARN_ON_ONCE(rcu_preempt_running_reader()); + if (!rcu_preempt_blocked_readers_any()) + return; + + /* Once we get past the fastpath checks, same code as rcu_barrier(). */ + rcu_barrier(); +} +EXPORT_SYMBOL_GPL(synchronize_rcu); + +static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); +static unsigned long sync_rcu_preempt_exp_count; +static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); + +/* + * Return non-zero if there are any tasks in RCU read-side critical + * sections blocking the current preemptible-RCU expedited grace period. + * If there is no preemptible-RCU expedited grace period currently in + * progress, returns zero unconditionally. + */ +static int rcu_preempted_readers_exp(void) +{ + return rcu_preempt_ctrlblk.exp_tasks != NULL; +} + +/* + * Report the exit from RCU read-side critical section for the last task + * that queued itself during or before the current expedited preemptible-RCU + * grace period. + */ +static void rcu_report_exp_done(void) +{ + wake_up(&sync_rcu_preempt_exp_wq); +} + +/* + * Wait for an rcu-preempt grace period, but expedite it. The basic idea + * is to rely in the fact that there is but one CPU, and that it is + * illegal for a task to invoke synchronize_rcu_expedited() while in a + * preemptible-RCU read-side critical section. Therefore, any such + * critical sections must correspond to blocked tasks, which must therefore + * be on the ->blkd_tasks list. So just record the current head of the + * list in the ->exp_tasks pointer, and wait for all tasks including and + * after the task pointed to by ->exp_tasks to drain. + */ +void synchronize_rcu_expedited(void) +{ + unsigned long flags; + struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk; + unsigned long snap; + + barrier(); /* ensure prior action seen before grace period. */ + + WARN_ON_ONCE(rcu_preempt_running_reader()); + + /* + * Acquire lock so that there is only one preemptible RCU grace + * period in flight. Of course, if someone does the expedited + * grace period for us while we are acquiring the lock, just leave. + */ + snap = sync_rcu_preempt_exp_count + 1; + mutex_lock(&sync_rcu_preempt_exp_mutex); + if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count)) + goto unlock_mb_ret; /* Others did our work for us. */ + + local_irq_save(flags); + + /* + * All RCU readers have to already be on blkd_tasks because + * we cannot legally be executing in an RCU read-side critical + * section. + */ + + /* Snapshot current head of ->blkd_tasks list. */ + rpcp->exp_tasks = rpcp->blkd_tasks.next; + if (rpcp->exp_tasks == &rpcp->blkd_tasks) + rpcp->exp_tasks = NULL; + local_irq_restore(flags); + + /* Wait for tail of ->blkd_tasks list to drain. */ + if (rcu_preempted_readers_exp()) + wait_event(sync_rcu_preempt_exp_wq, + !rcu_preempted_readers_exp()); + + /* Clean up and exit. */ + barrier(); /* ensure expedited GP seen before counter increment. */ + sync_rcu_preempt_exp_count++; +unlock_mb_ret: + mutex_unlock(&sync_rcu_preempt_exp_mutex); + barrier(); /* ensure subsequent action seen after grace period. */ +} +EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); + +/* + * Does preemptible RCU need the CPU to stay out of dynticks mode? + */ +int rcu_preempt_needs_cpu(void) +{ + if (!rcu_preempt_running_reader()) + rcu_preempt_cpu_qs(); + return rcu_preempt_ctrlblk.rcb.rcucblist != NULL; +} + +/* + * Check for a task exiting while in a preemptible -RCU read-side + * critical section, clean up if so. No need to issue warnings, + * as debug_check_no_locks_held() already does this if lockdep + * is enabled. + */ +void exit_rcu(void) +{ + struct task_struct *t = current; + + if (t->rcu_read_lock_nesting == 0) + return; + t->rcu_read_lock_nesting = 1; + rcu_read_unlock(); +} + +#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */ + +/* + * Because preemptible RCU does not exist, it never has any callbacks + * to check. + */ +static void rcu_preempt_check_callbacks(void) +{ +} + +/* + * Because preemptible RCU does not exist, it never has any callbacks + * to remove. + */ +static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) +{ +} + +/* + * Because preemptible RCU does not exist, it never has any callbacks + * to process. + */ +static void rcu_preempt_process_callbacks(void) +{ +} + +#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC #include -- cgit v1.2.3-70-g09d2 From 8cdd32a918350430483751feaae1c19cef816f69 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 9 Aug 2010 14:23:03 -0700 Subject: rcu: refer RCU CPU stall-warning victims to stallwarn.txt There is some documentation on RCU CPU stall warnings contained in Documentation/RCU/stallwarn.txt, but it will not be apparent to someone who runs into such a warning while under time pressure. This commit therefore adds comments preceding the printk()s pointing out the location of this documentation. Signed-off-by: Paul E. McKenney --- kernel/rcutree.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 5aab7dabd0d..ff214118e4b 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -487,8 +487,11 @@ static void print_other_cpu_stall(struct rcu_state *rsp) rcu_print_task_stall(rnp); raw_spin_unlock_irqrestore(&rnp->lock, flags); - /* OK, time to rat on our buddy... */ - + /* + * OK, time to rat on our buddy... + * See Documentation/RCU/stallwarn.txt for info on how to debug + * RCU CPU stall warnings. + */ printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", rsp->name); rcu_for_each_leaf_node(rsp, rnp) { @@ -517,6 +520,11 @@ static void print_cpu_stall(struct rcu_state *rsp) unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); + /* + * OK, time to rat on ourselves... + * See Documentation/RCU/stallwarn.txt for info on how to debug + * RCU CPU stall warnings. + */ printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", rsp->name, smp_processor_id(), jiffies - rsp->gp_start); trigger_all_cpu_backtrace(); -- cgit v1.2.3-70-g09d2 From 53d84e004d5e8c018be395c4330dc72fd60bd13e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 10 Aug 2010 14:28:53 -0700 Subject: rcu: permit suppressing current grace period's CPU stall warnings When using a kernel debugger, a long sojourn in the debugger can get you lots of RCU CPU stall warnings once you resume. This might not be helpful, especially if you are using the system console. This patch therefore allows RCU CPU stall warnings to be suppressed, but only for the duration of the current set of grace periods. This differs from Jason's original patch in that it adds support for tiny RCU and preemptible RCU, and uses a slightly different method for suppressing the RCU CPU stall warning messages. Signed-off-by: Jason Wessel Signed-off-by: Paul E. McKenney Tested-by: Jason Wessel --- include/linux/rcutiny.h | 4 ++++ include/linux/rcutree.h | 1 + kernel/rcutree.c | 20 ++++++++++++++++++++ kernel/rcutree.h | 1 + kernel/rcutree_plugin.h | 18 ++++++++++++++++++ 5 files changed, 44 insertions(+) (limited to 'kernel') diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 4cc5eba4161..3fa179784e1 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -178,6 +178,10 @@ static inline void rcu_sched_force_quiescent_state(void) { } +static inline void rcu_cpu_stall_reset(void) +{ +} + #ifdef CONFIG_DEBUG_LOCK_ALLOC extern int rcu_scheduler_active __read_mostly; diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index c13b85dd22b..0726809497b 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -36,6 +36,7 @@ extern void rcu_sched_qs(int cpu); extern void rcu_bh_qs(int cpu); extern void rcu_note_context_switch(int cpu); extern int rcu_needs_cpu(int cpu); +extern void rcu_cpu_stall_reset(void); #ifdef CONFIG_TREE_PREEMPT_RCU diff --git a/kernel/rcutree.c b/kernel/rcutree.c index ff214118e4b..42140a860bb 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -565,6 +565,22 @@ static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) return NOTIFY_DONE; } +/** + * rcu_cpu_stall_reset - prevent further stall warnings in current grace period + * + * Set the stall-warning timeout way off into the future, thus preventing + * any RCU CPU stall-warning messages from appearing in the current set of + * RCU grace periods. + * + * The caller must disable hard irqs. + */ +void rcu_cpu_stall_reset(void) +{ + rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2; + rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2; + rcu_preempt_stall_reset(); +} + static struct notifier_block rcu_panic_block = { .notifier_call = rcu_panic, }; @@ -584,6 +600,10 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) { } +void rcu_cpu_stall_reset(void) +{ +} + static void __init check_cpu_stall_init(void) { } diff --git a/kernel/rcutree.h b/kernel/rcutree.h index bb4d08695c4..7abd439a757 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -372,6 +372,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, #ifdef CONFIG_RCU_CPU_STALL_DETECTOR static void rcu_print_detail_task_stall(struct rcu_state *rsp); static void rcu_print_task_stall(struct rcu_node *rnp); +static void rcu_preempt_stall_reset(void); #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 63bb7714fde..561410f70d4 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -417,6 +417,16 @@ static void rcu_print_task_stall(struct rcu_node *rnp) } } +/* + * Suppress preemptible RCU's CPU stall warnings by pushing the + * time of the next stall-warning message comfortably far into the + * future. + */ +static void rcu_preempt_stall_reset(void) +{ + rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; +} + #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ /* @@ -867,6 +877,14 @@ static void rcu_print_task_stall(struct rcu_node *rnp) { } +/* + * Because preemptible RCU does not exist, there is no need to suppress + * its CPU stall warnings. + */ +static void rcu_preempt_stall_reset(void) +{ +} + #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ /* -- cgit v1.2.3-70-g09d2 From a3dc3fb161f9b4066c0fce22db72638af8baf83b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 13 Aug 2010 16:16:25 -0700 Subject: rcu: repair code-duplication FIXMEs Combine the duplicate definitions of ULONG_CMP_GE(), ULONG_CMP_LT(), and rcu_preempt_depth() into include/linux/rcupdate.h. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 15 +++++++++++++++ include/linux/rcutiny.h | 7 ------- include/linux/rcutree.h | 6 ------ kernel/rcutiny_plugin.h | 4 ---- kernel/rcutree.h | 3 --- 5 files changed, 15 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index d7af96ef6fc..325bad7bbca 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -47,6 +47,9 @@ extern int rcutorture_runnable; /* for sysctl */ #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ +#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) +#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) + /** * struct rcu_head - callback structure for use with RCU * @next: next update requests in a list @@ -66,6 +69,18 @@ extern int sched_expedited_torture_stats(char *page); /* Internal to kernel */ extern void rcu_init(void); +#ifdef CONFIG_PREEMPT_RCU + +/* + * Defined as a macro as it is a very low level header included from + * areas that don't even know about current. This gives the rcu_read_lock() + * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other + * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. + */ +#define rcu_preempt_depth() (current->rcu_read_lock_nesting) + +#endif /* #ifdef CONFIG_PREEMPT_RCU */ + #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) #include #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 3fa179784e1..c6b11dc5ba0 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -133,13 +133,6 @@ static inline int rcu_needs_cpu(int cpu) return rcu_preempt_needs_cpu(); } -/* - * Defined as macro as it is a very low level header - * included from areas that don't even know about current - * FIXME: combine with include/linux/rcutree.h into rcupdate.h. - */ -#define rcu_preempt_depth() (current->rcu_read_lock_nesting) - #endif /* #else #ifdef CONFIG_TINY_RCU */ static inline void rcu_note_context_switch(int cpu) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 0726809497b..54a20c11f98 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -45,12 +45,6 @@ extern void __rcu_read_unlock(void); extern void synchronize_rcu(void); extern void exit_rcu(void); -/* - * Defined as macro as it is a very low level header - * included from areas that don't even know about current - */ -#define rcu_preempt_depth() (current->rcu_read_lock_nesting) - #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ static inline void __rcu_read_lock(void) diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index e6bc1b447c6..c5bea1137dc 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h @@ -26,10 +26,6 @@ #include -/* FIXME: merge with definitions in kernel/rcutree.h. */ -#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) -#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) - /* Global control variables for preemptible RCU. */ struct rcu_preempt_ctrlblk { struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */ diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 7abd439a757..7918ba61873 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -272,9 +272,6 @@ struct rcu_data { #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ -#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) -#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) - /* * RCU global state, including node hierarchy. This hierarchy is * represented in "heap" form in a dense array. The root (first level) -- cgit v1.2.3-70-g09d2 From 7b0b759b65247cbc66384a912be9acf8d4800636 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 17 Aug 2010 14:18:46 -0700 Subject: rcu: combine duplicate code, courtesy of CONFIG_PREEMPT_RCU The CONFIG_PREEMPT_RCU kernel configuration parameter was recently re-introduced, but as an indication of the type of RCU (preemptible vs. non-preemptible) instead of as selecting a given implementation. This commit uses CONFIG_PREEMPT_RCU to combine duplicate code from include/linux/rcutiny.h and include/linux/rcutree.h into include/linux/rcupdate.h. This commit also combines a few other pieces of duplicate code that have accumulated. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 75 ++++++++++++++++++++++++++++++++++++++++++++++-- include/linux/rcutiny.h | 51 -------------------------------- include/linux/rcutree.h | 50 -------------------------------- kernel/rcutree_plugin.h | 9 ------ 4 files changed, 72 insertions(+), 113 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 325bad7bbca..89414d67d96 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -61,16 +61,30 @@ struct rcu_head { }; /* Exported common interfaces */ +extern void call_rcu_sched(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)); +extern void synchronize_sched(void); extern void rcu_barrier_bh(void); extern void rcu_barrier_sched(void); extern void synchronize_sched_expedited(void); extern int sched_expedited_torture_stats(char *page); -/* Internal to kernel */ -extern void rcu_init(void); +static inline void __rcu_read_lock_bh(void) +{ + local_bh_disable(); +} + +static inline void __rcu_read_unlock_bh(void) +{ + local_bh_enable(); +} #ifdef CONFIG_PREEMPT_RCU +extern void __rcu_read_lock(void); +extern void __rcu_read_unlock(void); +void synchronize_rcu(void); + /* * Defined as a macro as it is a very low level header included from * areas that don't even know about current. This gives the rcu_read_lock() @@ -79,7 +93,53 @@ extern void rcu_init(void); */ #define rcu_preempt_depth() (current->rcu_read_lock_nesting) -#endif /* #ifdef CONFIG_PREEMPT_RCU */ +#else /* #ifdef CONFIG_PREEMPT_RCU */ + +static inline void __rcu_read_lock(void) +{ + preempt_disable(); +} + +static inline void __rcu_read_unlock(void) +{ + preempt_enable(); +} + +static inline void synchronize_rcu(void) +{ + synchronize_sched(); +} + +static inline int rcu_preempt_depth(void) +{ + return 0; +} + +#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + +/* Internal to kernel */ +extern void rcu_init(void); +extern void rcu_sched_qs(int cpu); +extern void rcu_bh_qs(int cpu); +extern void rcu_check_callbacks(int cpu, int user); +struct notifier_block; + +#ifdef CONFIG_NO_HZ + +extern void rcu_enter_nohz(void); +extern void rcu_exit_nohz(void); + +#else /* #ifdef CONFIG_NO_HZ */ + +static inline void rcu_enter_nohz(void) +{ +} + +static inline void rcu_exit_nohz(void) +{ +} + +#endif /* #else #ifdef CONFIG_NO_HZ */ #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) #include @@ -626,6 +686,8 @@ struct rcu_synchronize { extern void wakeme_after_rcu(struct rcu_head *head); +#ifdef CONFIG_PREEMPT_RCU + /** * call_rcu() - Queue an RCU callback for invocation after a grace period. * @head: structure to be used for queueing the RCU updates. @@ -642,6 +704,13 @@ extern void wakeme_after_rcu(struct rcu_head *head); extern void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head)); +#else /* #ifdef CONFIG_PREEMPT_RCU */ + +/* In classic RCU, call_rcu() is just call_rcu_sched(). */ +#define call_rcu call_rcu_sched + +#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + /** * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index c6b11dc5ba0..13877cb93a6 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -27,34 +27,10 @@ #include -void rcu_sched_qs(int cpu); -void rcu_bh_qs(int cpu); - -#ifdef CONFIG_TINY_RCU -#define __rcu_read_lock() preempt_disable() -#define __rcu_read_unlock() preempt_enable() -#else /* #ifdef CONFIG_TINY_RCU */ -void __rcu_read_lock(void); -void __rcu_read_unlock(void); -#endif /* #else #ifdef CONFIG_TINY_RCU */ -#define __rcu_read_lock_bh() local_bh_disable() -#define __rcu_read_unlock_bh() local_bh_enable() -extern void call_rcu_sched(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)); - #define rcu_init_sched() do { } while (0) -extern void synchronize_sched(void); - #ifdef CONFIG_TINY_RCU -#define call_rcu call_rcu_sched - -static inline void synchronize_rcu(void) -{ - synchronize_sched(); -} - static inline void synchronize_rcu_expedited(void) { synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ @@ -67,7 +43,6 @@ static inline void rcu_barrier(void) #else /* #ifdef CONFIG_TINY_RCU */ -void synchronize_rcu(void); void rcu_barrier(void); void synchronize_rcu_expedited(void); @@ -83,25 +58,6 @@ static inline void synchronize_rcu_bh_expedited(void) synchronize_sched(); } -struct notifier_block; - -#ifdef CONFIG_NO_HZ - -extern void rcu_enter_nohz(void); -extern void rcu_exit_nohz(void); - -#else /* #ifdef CONFIG_NO_HZ */ - -static inline void rcu_enter_nohz(void) -{ -} - -static inline void rcu_exit_nohz(void) -{ -} - -#endif /* #else #ifdef CONFIG_NO_HZ */ - #ifdef CONFIG_TINY_RCU static inline void rcu_preempt_note_context_switch(void) @@ -117,11 +73,6 @@ static inline int rcu_needs_cpu(int cpu) return 0; } -static inline int rcu_preempt_depth(void) -{ - return 0; -} - #else /* #ifdef CONFIG_TINY_RCU */ void rcu_preempt_note_context_switch(void); @@ -141,8 +92,6 @@ static inline void rcu_note_context_switch(int cpu) rcu_preempt_note_context_switch(); } -extern void rcu_check_callbacks(int cpu, int user); - /* * Return the number of grace periods. */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 54a20c11f98..95518e62879 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -30,59 +30,23 @@ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H -struct notifier_block; - -extern void rcu_sched_qs(int cpu); -extern void rcu_bh_qs(int cpu); extern void rcu_note_context_switch(int cpu); extern int rcu_needs_cpu(int cpu); extern void rcu_cpu_stall_reset(void); #ifdef CONFIG_TREE_PREEMPT_RCU -extern void __rcu_read_lock(void); -extern void __rcu_read_unlock(void); -extern void synchronize_rcu(void); extern void exit_rcu(void); #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ -static inline void __rcu_read_lock(void) -{ - preempt_disable(); -} - -static inline void __rcu_read_unlock(void) -{ - preempt_enable(); -} - -#define synchronize_rcu synchronize_sched - static inline void exit_rcu(void) { } -static inline int rcu_preempt_depth(void) -{ - return 0; -} - #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ -static inline void __rcu_read_lock_bh(void) -{ - local_bh_disable(); -} -static inline void __rcu_read_unlock_bh(void) -{ - local_bh_enable(); -} - -extern void call_rcu_sched(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)); extern void synchronize_rcu_bh(void); -extern void synchronize_sched(void); extern void synchronize_rcu_expedited(void); static inline void synchronize_rcu_bh_expedited(void) @@ -92,8 +56,6 @@ static inline void synchronize_rcu_bh_expedited(void) extern void rcu_barrier(void); -extern void rcu_check_callbacks(int cpu, int user); - extern long rcu_batches_completed(void); extern long rcu_batches_completed_bh(void); extern long rcu_batches_completed_sched(void); @@ -101,18 +63,6 @@ extern void rcu_force_quiescent_state(void); extern void rcu_bh_force_quiescent_state(void); extern void rcu_sched_force_quiescent_state(void); -#ifdef CONFIG_NO_HZ -void rcu_enter_nohz(void); -void rcu_exit_nohz(void); -#else /* CONFIG_NO_HZ */ -static inline void rcu_enter_nohz(void) -{ -} -static inline void rcu_exit_nohz(void) -{ -} -#endif /* CONFIG_NO_HZ */ - /* A context switch is a grace period for RCU-sched and RCU-bh. */ static inline int rcu_blocking_is_gp(void) { diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 561410f70d4..87f60f06b18 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -938,15 +938,6 @@ static void rcu_preempt_process_callbacks(void) { } -/* - * In classic RCU, call_rcu() is just call_rcu_sched(). - */ -void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) -{ - call_rcu_sched(head, func); -} -EXPORT_SYMBOL_GPL(call_rcu); - /* * Wait for an rcu-preempt grace period, but make it happen quickly. * But because preemptable RCU does not exist, map to rcu-sched. -- cgit v1.2.3-70-g09d2 From 80dcf60e6b97c7363971e7a0a788d8484d35f8a6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 19 Aug 2010 16:57:45 -0700 Subject: rcu: apply TINY_PREEMPT_RCU read-side speedup to TREE_PREEMPT_RCU Replace one of the ACCESS_ONCE() calls in each of __rcu_read_lock() and __rcu_read_unlock() with barrier() as suggested by Steve Rostedt in order to avoid the potential compiler-optimization-induced bug noted by Mathieu Desnoyers. Located-by: Mathieu Desnoyers Suggested-by: Steven Rostedt Signed-off-by: Paul E. McKenney --- kernel/rcutree_plugin.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 87f60f06b18..e9e0bc74ff3 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -201,7 +201,7 @@ static void rcu_preempt_note_context_switch(int cpu) */ void __rcu_read_lock(void) { - ACCESS_ONCE(current->rcu_read_lock_nesting)++; + current->rcu_read_lock_nesting++; barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ } EXPORT_SYMBOL_GPL(__rcu_read_lock); @@ -344,7 +344,9 @@ void __rcu_read_unlock(void) struct task_struct *t = current; barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && + --t->rcu_read_lock_nesting; + barrier(); /* decrement before load of ->rcu_read_unlock_special */ + if (t->rcu_read_lock_nesting == 0 && unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) rcu_read_unlock_special(t); #ifdef CONFIG_PROVE_LOCKING -- cgit v1.2.3-70-g09d2 From dd7c4d89730a1be2c1d361a8ae1f0fe9465ccf9c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 27 Aug 2010 10:51:17 -0700 Subject: rcu: performance fixes to TINY_PREEMPT_RCU callback checking This commit tightens up checks in rcu_preempt_check_callbacks() to avoid unnecessary special handling at rcu_read_unlock() time. Signed-off-by: Paul E. McKenney --- kernel/rcutiny_plugin.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index c5bea1137dc..6ceca4f745f 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h @@ -72,7 +72,7 @@ static void rcu_report_exp_done(void); /* * Return true if the CPU has not yet responded to the current grace period. */ -static int rcu_cpu_cur_gp(void) +static int rcu_cpu_blocking_cur_gp(void) { return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum; } @@ -229,7 +229,7 @@ void rcu_preempt_note_context_switch(void) * cannot end. */ list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); - if (rcu_cpu_cur_gp()) + if (rcu_cpu_blocking_cur_gp()) rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; } @@ -368,12 +368,16 @@ static void rcu_preempt_check_callbacks(void) { struct task_struct *t = current; - if (!rcu_preempt_running_reader() && rcu_preempt_gp_in_progress()) + if (rcu_preempt_gp_in_progress() && + (!rcu_preempt_running_reader() || + !rcu_cpu_blocking_cur_gp())) rcu_preempt_cpu_qs(); if (&rcu_preempt_ctrlblk.rcb.rcucblist != rcu_preempt_ctrlblk.rcb.donetail) raise_softirq(RCU_SOFTIRQ); - if (rcu_preempt_gp_in_progress() && rcu_preempt_running_reader()) + if (rcu_preempt_gp_in_progress() && + rcu_cpu_blocking_cur_gp() && + rcu_preempt_running_reader()) t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; } -- cgit v1.2.3-70-g09d2 From a587d2daebcd2bc159d4348b6a7b028950a6d803 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:18 -0700 Subject: x86: Remove not used early_res code and some functions in e820.c that are not used anymore Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/e820.h | 14 -- arch/x86/kernel/e820.c | 52 ---- include/linux/early_res.h | 23 -- kernel/Makefile | 1 - kernel/early_res.c | 590 -------------------------------------------- 5 files changed, 680 deletions(-) delete mode 100644 include/linux/early_res.h delete mode 100644 kernel/early_res.c (limited to 'kernel') diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 388fed29146..718646384e0 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -112,31 +112,17 @@ static inline void early_memtest(unsigned long start, unsigned long end) } #endif -extern unsigned long end_user_pfn; - -extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); -extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); -extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); - extern unsigned long e820_end_of_ram_pfn(void); extern unsigned long e820_end_of_low_ram_pfn(void); -extern void e820_register_active_regions(int nid, unsigned long start_pfn, - unsigned long end_pfn); -extern u64 e820_hole_size(u64 start, u64 end); - extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); void memblock_x86_fill(void); - extern void finish_e820_parsing(void); extern void e820_reserve_resources(void); extern void e820_reserve_resources_late(void); extern void setup_memory_map(void); extern char *default_machine_specific_memory_setup(void); -void reserve_early(u64 start, u64 end, char *name); -void free_early(u64 start, u64 end); - /* * Returns true iff the specified range [s,e) is completely contained inside * the ISA region. diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index a9221d18a5e..d5fd89462d7 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -738,32 +738,6 @@ static int __init e820_mark_nvs_memory(void) core_initcall(e820_mark_nvs_memory); #endif -/* - * Find a free area with specified alignment in a specific range. - */ -u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align) -{ - u64 mem = memblock_find_in_range(start, end, size, align); - - if (mem == MEMBLOCK_ERROR) - return -1ULL; - - return mem; -} - -/* - * Find next free range after *start - */ -u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align) -{ - u64 mem = memblock_x86_find_in_range_size(start, sizep, align); - - if (mem == MEMBLOCK_ERROR) - return -1ULL - - return mem; -} - /* * pre allocated 4k and reserved it in memblock and e820_saved */ @@ -856,32 +830,6 @@ unsigned long __init e820_end_of_low_ram_pfn(void) return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); } -/* Walk the e820 map and register active regions within a node */ -void __init e820_register_active_regions(int nid, unsigned long start_pfn, - unsigned long last_pfn) -{ - memblock_x86_register_active_regions(nid, start_pfn, last_pfn); -} - -/* - * Find the hole size (in bytes) in the memory range. - * @start: starting address of the memory range to scan - * @end: ending address of the memory range to scan - */ -u64 __init e820_hole_size(u64 start, u64 end) -{ - return memblock_x86_hole_size(start, end); -} - -void reserve_early(u64 start, u64 end, char *name) -{ - memblock_x86_reserve_range(start, end, name); -} -void free_early(u64 start, u64 end) -{ - memblock_x86_free_range(start, end); -} - static void early_panic(char *msg) { early_printk(msg); diff --git a/include/linux/early_res.h b/include/linux/early_res.h deleted file mode 100644 index 29c09f57a13..00000000000 --- a/include/linux/early_res.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _LINUX_EARLY_RES_H -#define _LINUX_EARLY_RES_H -#ifdef __KERNEL__ - -extern void reserve_early(u64 start, u64 end, char *name); -extern void reserve_early_overlap_ok(u64 start, u64 end, char *name); -extern void free_early(u64 start, u64 end); -void free_early_partial(u64 start, u64 end); -extern void early_res_to_bootmem(u64 start, u64 end); - -void reserve_early_without_check(u64 start, u64 end, char *name); -u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end, - u64 size, u64 align); -u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start, - u64 *sizep, u64 align); -u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align); -u64 get_max_mapped(void); -#include -int get_free_all_memory_range(struct range **rangep, int nodeid); - -#endif /* __KERNEL__ */ - -#endif /* _LINUX_EARLY_RES_H */ diff --git a/kernel/Makefile b/kernel/Makefile index 057472fbc27..80e61c3e44a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -11,7 +11,6 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ async.o range.o -obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o obj-y += groups.o ifdef CONFIG_FUNCTION_TRACER diff --git a/kernel/early_res.c b/kernel/early_res.c deleted file mode 100644 index 7bfae887f21..00000000000 --- a/kernel/early_res.c +++ /dev/null @@ -1,590 +0,0 @@ -/* - * early_res, could be used to replace bootmem - */ -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Early reserved memory areas. - */ -/* - * need to make sure this one is bigger enough before - * find_fw_memmap_area could be used - */ -#define MAX_EARLY_RES_X 32 - -struct early_res { - u64 start, end; - char name[15]; - char overlap_ok; -}; -static struct early_res early_res_x[MAX_EARLY_RES_X] __initdata; - -static int max_early_res __initdata = MAX_EARLY_RES_X; -static struct early_res *early_res __initdata = &early_res_x[0]; -static int early_res_count __initdata; - -static int __init find_overlapped_early(u64 start, u64 end) -{ - int i; - struct early_res *r; - - for (i = 0; i < max_early_res && early_res[i].end; i++) { - r = &early_res[i]; - if (end > r->start && start < r->end) - break; - } - - return i; -} - -/* - * Drop the i-th range from the early reservation map, - * by copying any higher ranges down one over it, and - * clearing what had been the last slot. - */ -static void __init drop_range(int i) -{ - int j; - - for (j = i + 1; j < max_early_res && early_res[j].end; j++) - ; - - memmove(&early_res[i], &early_res[i + 1], - (j - 1 - i) * sizeof(struct early_res)); - - early_res[j - 1].end = 0; - early_res_count--; -} - -static void __init drop_range_partial(int i, u64 start, u64 end) -{ - u64 common_start, common_end; - u64 old_start, old_end; - - old_start = early_res[i].start; - old_end = early_res[i].end; - common_start = max(old_start, start); - common_end = min(old_end, end); - - /* no overlap ? */ - if (common_start >= common_end) - return; - - if (old_start < common_start) { - /* make head segment */ - early_res[i].end = common_start; - if (old_end > common_end) { - char name[15]; - - /* - * Save a local copy of the name, since the - * early_res array could get resized inside - * reserve_early_without_check() -> - * __check_and_double_early_res(), which would - * make the current name pointer invalid. - */ - strncpy(name, early_res[i].name, - sizeof(early_res[i].name) - 1); - /* add another for left over on tail */ - reserve_early_without_check(common_end, old_end, name); - } - return; - } else { - if (old_end > common_end) { - /* reuse the entry for tail left */ - early_res[i].start = common_end; - return; - } - /* all covered */ - drop_range(i); - } -} - -/* - * Split any existing ranges that: - * 1) are marked 'overlap_ok', and - * 2) overlap with the stated range [start, end) - * into whatever portion (if any) of the existing range is entirely - * below or entirely above the stated range. Drop the portion - * of the existing range that overlaps with the stated range, - * which will allow the caller of this routine to then add that - * stated range without conflicting with any existing range. - */ -static void __init drop_overlaps_that_are_ok(u64 start, u64 end) -{ - int i; - struct early_res *r; - u64 lower_start, lower_end; - u64 upper_start, upper_end; - char name[15]; - - for (i = 0; i < max_early_res && early_res[i].end; i++) { - r = &early_res[i]; - - /* Continue past non-overlapping ranges */ - if (end <= r->start || start >= r->end) - continue; - - /* - * Leave non-ok overlaps as is; let caller - * panic "Overlapping early reservations" - * when it hits this overlap. - */ - if (!r->overlap_ok) - return; - - /* - * We have an ok overlap. We will drop it from the early - * reservation map, and add back in any non-overlapping - * portions (lower or upper) as separate, overlap_ok, - * non-overlapping ranges. - */ - - /* 1. Note any non-overlapping (lower or upper) ranges. */ - strncpy(name, r->name, sizeof(name) - 1); - - lower_start = lower_end = 0; - upper_start = upper_end = 0; - if (r->start < start) { - lower_start = r->start; - lower_end = start; - } - if (r->end > end) { - upper_start = end; - upper_end = r->end; - } - - /* 2. Drop the original ok overlapping range */ - drop_range(i); - - i--; /* resume for-loop on copied down entry */ - - /* 3. Add back in any non-overlapping ranges. */ - if (lower_end) - reserve_early_overlap_ok(lower_start, lower_end, name); - if (upper_end) - reserve_early_overlap_ok(upper_start, upper_end, name); - } -} - -static void __init __reserve_early(u64 start, u64 end, char *name, - int overlap_ok) -{ - int i; - struct early_res *r; - - i = find_overlapped_early(start, end); - if (i >= max_early_res) - panic("Too many early reservations"); - r = &early_res[i]; - if (r->end) - panic("Overlapping early reservations " - "%llx-%llx %s to %llx-%llx %s\n", - start, end - 1, name ? name : "", r->start, - r->end - 1, r->name); - r->start = start; - r->end = end; - r->overlap_ok = overlap_ok; - if (name) - strncpy(r->name, name, sizeof(r->name) - 1); - early_res_count++; -} - -/* - * A few early reservtations come here. - * - * The 'overlap_ok' in the name of this routine does -not- mean it - * is ok for these reservations to overlap an earlier reservation. - * Rather it means that it is ok for subsequent reservations to - * overlap this one. - * - * Use this entry point to reserve early ranges when you are doing - * so out of "Paranoia", reserving perhaps more memory than you need, - * just in case, and don't mind a subsequent overlapping reservation - * that is known to be needed. - * - * The drop_overlaps_that_are_ok() call here isn't really needed. - * It would be needed if we had two colliding 'overlap_ok' - * reservations, so that the second such would not panic on the - * overlap with the first. We don't have any such as of this - * writing, but might as well tolerate such if it happens in - * the future. - */ -void __init reserve_early_overlap_ok(u64 start, u64 end, char *name) -{ - drop_overlaps_that_are_ok(start, end); - __reserve_early(start, end, name, 1); -} - -static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end) -{ - u64 start, end, size, mem; - struct early_res *new; - - /* do we have enough slots left ? */ - if ((max_early_res - early_res_count) > max(max_early_res/8, 2)) - return; - - /* double it */ - mem = -1ULL; - size = sizeof(struct early_res) * max_early_res * 2; - if (early_res == early_res_x) - start = 0; - else - start = early_res[0].end; - end = ex_start; - if (start + size < end) - mem = find_fw_memmap_area(start, end, size, - sizeof(struct early_res)); - if (mem == -1ULL) { - start = ex_end; - end = get_max_mapped(); - if (start + size < end) - mem = find_fw_memmap_area(start, end, size, - sizeof(struct early_res)); - } - if (mem == -1ULL) - panic("can not find more space for early_res array"); - - new = __va(mem); - /* save the first one for own */ - new[0].start = mem; - new[0].end = mem + size; - new[0].overlap_ok = 0; - /* copy old to new */ - if (early_res == early_res_x) { - memcpy(&new[1], &early_res[0], - sizeof(struct early_res) * max_early_res); - memset(&new[max_early_res+1], 0, - sizeof(struct early_res) * (max_early_res - 1)); - early_res_count++; - } else { - memcpy(&new[1], &early_res[1], - sizeof(struct early_res) * (max_early_res - 1)); - memset(&new[max_early_res], 0, - sizeof(struct early_res) * max_early_res); - } - memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res); - early_res = new; - max_early_res *= 2; - printk(KERN_DEBUG "early_res array is doubled to %d at [%llx - %llx]\n", - max_early_res, mem, mem + size - 1); -} - -/* - * Most early reservations come here. - * - * We first have drop_overlaps_that_are_ok() drop any pre-existing - * 'overlap_ok' ranges, so that we can then reserve this memory - * range without risk of panic'ing on an overlapping overlap_ok - * early reservation. - */ -void __init reserve_early(u64 start, u64 end, char *name) -{ - if (start >= end) - return; - - __check_and_double_early_res(start, end); - - drop_overlaps_that_are_ok(start, end); - __reserve_early(start, end, name, 0); -} - -void __init reserve_early_without_check(u64 start, u64 end, char *name) -{ - struct early_res *r; - - if (start >= end) - return; - - __check_and_double_early_res(start, end); - - r = &early_res[early_res_count]; - - r->start = start; - r->end = end; - r->overlap_ok = 0; - if (name) - strncpy(r->name, name, sizeof(r->name) - 1); - early_res_count++; -} - -void __init free_early(u64 start, u64 end) -{ - struct early_res *r; - int i; - - kmemleak_free_part(__va(start), end - start); - - i = find_overlapped_early(start, end); - r = &early_res[i]; - if (i >= max_early_res || r->end != end || r->start != start) - panic("free_early on not reserved area: %llx-%llx!", - start, end - 1); - - drop_range(i); -} - -void __init free_early_partial(u64 start, u64 end) -{ - struct early_res *r; - int i; - - kmemleak_free_part(__va(start), end - start); - - if (start == end) - return; - - if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end)) - return; - -try_next: - i = find_overlapped_early(start, end); - if (i >= max_early_res) - return; - - r = &early_res[i]; - /* hole ? */ - if (r->end >= end && r->start <= start) { - drop_range_partial(i, start, end); - return; - } - - drop_range_partial(i, start, end); - goto try_next; -} - -#ifdef CONFIG_NO_BOOTMEM -static void __init subtract_early_res(struct range *range, int az) -{ - int i, count; - u64 final_start, final_end; - int idx = 0; - - count = 0; - for (i = 0; i < max_early_res && early_res[i].end; i++) - count++; - - /* need to skip first one ?*/ - if (early_res != early_res_x) - idx = 1; - -#define DEBUG_PRINT_EARLY_RES 1 - -#if DEBUG_PRINT_EARLY_RES - printk(KERN_INFO "Subtract (%d early reservations)\n", count); -#endif - for (i = idx; i < count; i++) { - struct early_res *r = &early_res[i]; -#if DEBUG_PRINT_EARLY_RES - printk(KERN_INFO " #%d [%010llx - %010llx] %15s\n", i, - r->start, r->end, r->name); -#endif - final_start = PFN_DOWN(r->start); - final_end = PFN_UP(r->end); - if (final_start >= final_end) - continue; - subtract_range(range, az, final_start, final_end); - } - -} - -int __init get_free_all_memory_range(struct range **rangep, int nodeid) -{ - int i, count; - u64 start = 0, end; - u64 size; - u64 mem; - struct range *range; - int nr_range; - - count = 0; - for (i = 0; i < max_early_res && early_res[i].end; i++) - count++; - - count *= 2; - - size = sizeof(struct range) * count; - end = get_max_mapped(); -#ifdef MAX_DMA32_PFN - if (end > (MAX_DMA32_PFN << PAGE_SHIFT)) - start = MAX_DMA32_PFN << PAGE_SHIFT; -#endif - mem = find_fw_memmap_area(start, end, size, sizeof(struct range)); - if (mem == -1ULL) - panic("can not find more space for range free"); - - range = __va(mem); - /* use early_node_map[] and early_res to get range array at first */ - memset(range, 0, size); - nr_range = 0; - - /* need to go over early_node_map to find out good range for node */ - nr_range = add_from_early_node_map(range, count, nr_range, nodeid); -#ifdef CONFIG_X86_32 - subtract_range(range, count, max_low_pfn, -1ULL); -#endif - subtract_early_res(range, count); - nr_range = clean_sort_range(range, count); - - /* need to clear it ? */ - if (nodeid == MAX_NUMNODES) { - memset(&early_res[0], 0, - sizeof(struct early_res) * max_early_res); - early_res = NULL; - max_early_res = 0; - } - - *rangep = range; - return nr_range; -} -#else -void __init early_res_to_bootmem(u64 start, u64 end) -{ - int i, count; - u64 final_start, final_end; - int idx = 0; - - count = 0; - for (i = 0; i < max_early_res && early_res[i].end; i++) - count++; - - /* need to skip first one ?*/ - if (early_res != early_res_x) - idx = 1; - - printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n", - count - idx, max_early_res, start, end); - for (i = idx; i < count; i++) { - struct early_res *r = &early_res[i]; - printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i, - r->start, r->end, r->name); - final_start = max(start, r->start); - final_end = min(end, r->end); - if (final_start >= final_end) { - printk(KERN_CONT "\n"); - continue; - } - printk(KERN_CONT " ==> [%010llx - %010llx]\n", - final_start, final_end); - reserve_bootmem_generic(final_start, final_end - final_start, - BOOTMEM_DEFAULT); - } - /* clear them */ - memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res); - early_res = NULL; - max_early_res = 0; - early_res_count = 0; -} -#endif - -/* Check for already reserved areas */ -static inline int __init bad_addr(u64 *addrp, u64 size, u64 align) -{ - int i; - u64 addr = *addrp; - int changed = 0; - struct early_res *r; -again: - i = find_overlapped_early(addr, addr + size); - r = &early_res[i]; - if (i < max_early_res && r->end) { - *addrp = addr = round_up(r->end, align); - changed = 1; - goto again; - } - return changed; -} - -/* Check for already reserved areas */ -static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align) -{ - int i; - u64 addr = *addrp, last; - u64 size = *sizep; - int changed = 0; -again: - last = addr + size; - for (i = 0; i < max_early_res && early_res[i].end; i++) { - struct early_res *r = &early_res[i]; - if (last > r->start && addr < r->start) { - size = r->start - addr; - changed = 1; - goto again; - } - if (last > r->end && addr < r->end) { - addr = round_up(r->end, align); - size = last - addr; - changed = 1; - goto again; - } - if (last <= r->end && addr >= r->start) { - (*sizep)++; - return 0; - } - } - if (changed) { - *addrp = addr; - *sizep = size; - } - return changed; -} - -/* - * Find a free area with specified alignment in a specific range. - * only with the area.between start to end is active range from early_node_map - * so they are good as RAM - */ -u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end, - u64 size, u64 align) -{ - u64 addr, last; - - addr = round_up(ei_start, align); - if (addr < start) - addr = round_up(start, align); - if (addr >= ei_last) - goto out; - while (bad_addr(&addr, size, align) && addr+size <= ei_last) - ; - last = addr + size; - if (last > ei_last) - goto out; - if (last > end) - goto out; - - return addr; - -out: - return -1ULL; -} - -u64 __init find_early_area_size(u64 ei_start, u64 ei_last, u64 start, - u64 *sizep, u64 align) -{ - u64 addr, last; - - addr = round_up(ei_start, align); - if (addr < start) - addr = round_up(start, align); - if (addr >= ei_last) - goto out; - *sizep = ei_last - addr; - while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last) - ; - last = addr + *sizep; - if (last > ei_last) - goto out; - - return addr; - -out: - return -1ULL; -} -- cgit v1.2.3-70-g09d2 From eac243355a99d6b9d41bbeba4fc83e7f735485f9 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Tue, 31 Aug 2010 23:00:08 -0400 Subject: lockup_detector: Convert cpu notifier to return encapsulate errno value By the commit e6bde73b07edeb703d4c89c1daabc09c303de11f ("cpu-hotplug: return better errno on cpu hotplug failure"), the cpu notifier can return encapsulate errno value, resulting in more meaningful error codes for CPU hotplug failures. This converts the cpu notifier to return encapsulate errno value for the lockup_detector as well. Signed-off-by: Akinobu Mita Cc: peterz@infradead.org Cc: gorcunov@gmail.com Cc: fweisbec@gmail.com LKML-Reference: <1283310009-22168-3-git-send-email-dzickus@redhat.com> Signed-off-by: Don Zickus Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 0d53c8e853b..501cb6edca4 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -371,7 +371,7 @@ static int watchdog_nmi_enable(int cpu) } printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); - return -1; + return PTR_ERR(event); /* success path */ out_save: @@ -415,17 +415,19 @@ static int watchdog_prepare_cpu(int cpu) static int watchdog_enable(int cpu) { struct task_struct *p = per_cpu(softlockup_watchdog, cpu); + int err; /* enable the perf event */ - if (watchdog_nmi_enable(cpu) != 0) - return -1; + err = watchdog_nmi_enable(cpu); + if (err) + return err; /* create the watchdog thread */ if (!p) { p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); if (IS_ERR(p)) { printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); - return -1; + return PTR_ERR(p); } kthread_bind(p, cpu); per_cpu(watchdog_touch_ts, cpu) = 0; @@ -519,17 +521,16 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; + int err = 0; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - if (watchdog_prepare_cpu(hotcpu)) - return NOTIFY_BAD; + err = watchdog_prepare_cpu(hotcpu); break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: - if (watchdog_enable(hotcpu)) - return NOTIFY_BAD; + err = watchdog_enable(hotcpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: @@ -542,7 +543,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; #endif /* CONFIG_HOTPLUG_CPU */ } - return NOTIFY_OK; + return notifier_from_errno(err); } static struct notifier_block __cpuinitdata cpu_nfb = { @@ -558,7 +559,7 @@ static int __init spawn_watchdog_task(void) return 0; err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); - WARN_ON(err == NOTIFY_BAD); + WARN_ON(notifier_to_errno(err)); cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); -- cgit v1.2.3-70-g09d2 From 14416c35b6b9975c9593d7ecc8382d1ecaa0b598 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Tue, 31 Aug 2010 23:00:09 -0400 Subject: lockup_detector: Remove unused panic_notifier The panic notifer in lockup_detector just set did_panic to 1. But did_panic is not used anywhere so we can just remove it. Signed-off-by: Akinobu Mita Cc: peterz@infradead.org Cc: gorcunov@gmail.com Cc: fweisbec@gmail.com LKML-Reference: <1283310009-22168-4-git-send-email-dzickus@redhat.com> Signed-off-by: Don Zickus Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 501cb6edca4..5b1ee4f4ca0 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -43,7 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); #endif -static int __read_mostly did_panic; static int __initdata no_watchdog; @@ -180,18 +179,6 @@ static int is_softlockup(unsigned long touch_ts) return 0; } -static int -watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr) -{ - did_panic = 1; - - return NOTIFY_DONE; -} - -static struct notifier_block panic_block = { - .notifier_call = watchdog_panic, -}; - #ifdef CONFIG_HARDLOCKUP_DETECTOR static struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, @@ -564,8 +551,6 @@ static int __init spawn_watchdog_task(void) cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); - atomic_notifier_chain_register(&panic_notifier_list, &panic_block); - return 0; } early_initcall(spawn_watchdog_task); -- cgit v1.2.3-70-g09d2 From f6195aa09e618d712f52bf4fa33b5293820eb93d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 1 Sep 2010 12:23:12 -0400 Subject: ring-buffer: Place duplicate expression into a single function While discussing the strictness of the 80 character limit on the Kernel Summit Discussion mailing list, I showed examples that I broke that limit slightly with some algorithms. In discussing with John Linville, what looked better, I realized that two of the 80 char breaking culprits were an identical expression. As a clean up, this patch moves the identical expression into its own helper function and that is used instead. As a side effect, the offending code is now under the 80 character limit. :-) This clean up code also changes the expression from (A - B) - C to A - (B + C) This makes the code look a little nicer too. Cc: John W. Linville Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 19cccc3c302..ef27017caa5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2606,6 +2606,19 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) } EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); +/* + * The total entries in the ring buffer is the running counter + * of entries entered into the ring buffer, minus the sum of + * the entries read from the ring buffer and the number of + * entries that were overwritten. + */ +static inline unsigned long +rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) +{ + return local_read(&cpu_buffer->entries) - + (local_read(&cpu_buffer->overrun) + cpu_buffer->read); +} + /** * ring_buffer_entries_cpu - get the number of entries in a cpu buffer * @buffer: The ring buffer @@ -2614,16 +2627,13 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; - unsigned long ret; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 0; cpu_buffer = buffer->buffers[cpu]; - ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun)) - - cpu_buffer->read; - return ret; + return rb_num_of_entries(cpu_buffer); } EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); @@ -2684,8 +2694,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) /* if you care about this being correct, lock the buffer */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; - entries += (local_read(&cpu_buffer->entries) - - local_read(&cpu_buffer->overrun)) - cpu_buffer->read; + entries += rb_num_of_entries(cpu_buffer); } return entries; -- cgit v1.2.3-70-g09d2 From 09bfafac3e237415cc4b6adde49f9f28b3a42659 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 10 Aug 2010 19:32:37 +0100 Subject: ARM: 6314/1: ftrace: allow build without frame pointers on ARM With a new enough GCC, ARM function tracing can be supported without the need for frame pointers. This is essential for Thumb-2 support, since frame pointers aren't available then. Acked-by: Catalin Marinas Acked-by: Steven Rostedt Signed-off-by: Rabin Vincent Signed-off-by: Russell King --- arch/arm/Kconfig.debug | 5 +++++ arch/arm/kernel/armksyms.c | 2 ++ arch/arm/kernel/entry-common.S | 14 ++++++++++++++ kernel/trace/Kconfig | 2 +- 4 files changed, 22 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 91344af75f3..4dbce538fec 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -27,6 +27,11 @@ config ARM_UNWIND the performance is not affected. Currently, this feature only works with EABI compilers. If unsure say Y. +config OLD_MCOUNT + bool + depends on FUNCTION_TRACER && FRAME_POINTER + default y + config DEBUG_USER bool "Verbose user fault messages" help diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 8214bfebfac..e5e1e538767 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -165,6 +165,8 @@ EXPORT_SYMBOL(_find_next_bit_be); #endif #ifdef CONFIG_FUNCTION_TRACER +#ifdef CONFIG_OLD_MCOUNT EXPORT_SYMBOL(mcount); +#endif EXPORT_SYMBOL(__gnu_mcount_nc); #endif diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f05a35a5969..6805a7216bf 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -128,6 +128,13 @@ ENDPROC(ret_from_fork) * allows it to be clobbered in subroutines and doesn't use it to hold * parameters.) */ + +#ifndef CONFIG_OLD_MCOUNT +#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) +#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. +#endif +#endif + #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(mcount) stmdb sp!, {r0-r3, lr} @@ -173,6 +180,12 @@ gnu_trace: ldmia sp!, {r0-r3, ip, lr} mov pc, ip +#ifdef CONFIG_OLD_MCOUNT +/* + * This is under an ifdef in order to force link-time errors for people trying + * to build with !FRAME_POINTER with a GCC which doesn't use the new-style + * mcount. + */ ENTRY(mcount) stmdb sp!, {r0-r3, lr} ldr r0, =ftrace_trace_function @@ -191,6 +204,7 @@ trace: mov pc, r2 ldr lr, [fp, #-4] @ restore lr ldmia sp!, {r0-r3, pc} +#endif #endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 538501c6ea5..6329d063b5e 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -121,7 +121,7 @@ if FTRACE config FUNCTION_TRACER bool "Kernel Function Tracer" depends on HAVE_FUNCTION_TRACER - select FRAME_POINTER + select FRAME_POINTER if (!ARM_UNWIND) select KALLSYMS select GENERIC_TRACER select CONTEXT_SWITCH_TRACER -- cgit v1.2.3-70-g09d2 From 81a294c44e973dc7182e4733421b7cb2daba3c29 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 30 Aug 2010 09:52:50 -0700 Subject: rcu: fix _oddness handling of verbose stall warnings CONFIG_RCU_CPU_STALL_VERBOSE depends on CONFIG_TREE_PREEMPT_RCU, but rcu_bootup_announce_oddness() complains if CONFIG_RCU_CPU_STALL_VERBOSE is not set even in the case of CONFIG_TREE_RCU. This commit therefore fixes rcu_bootup_announce_oddness() to avoid insisting on impossibilities. Reported-by: Guy Martin Signed-off-by: Paul E. McKenney --- kernel/rcutree_plugin.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index e9e0bc74ff3..71a4147473f 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -57,7 +57,7 @@ static void __init rcu_bootup_announce_oddness(void) printk(KERN_INFO "\tRCU-based detection of stalled CPUs is disabled.\n"); #endif -#ifndef CONFIG_RCU_CPU_STALL_VERBOSE +#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); #endif #if NUM_RCU_LVL_4 != 0 -- cgit v1.2.3-70-g09d2 From ed2d372c0738386b8a184a6a6bea9c16df6ffb68 Mon Sep 17 00:00:00 2001 From: Christian Dietrich Date: Mon, 6 Sep 2010 16:37:05 +0200 Subject: sched: Remove unnecessary #ifdef CONFIG_SMP The CONFIG_SMP ifdef isn't necessary at this point, because it is checked in an outer ifdef level already and has no effect here. Cleanup only, no functional effect. Signed-off-by: Christian Dietrich Cc: vamos-dev@i4.informatik.uni-erlangen.de Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Tejun Heo LKML-Reference: <7a3a39ef3f765a4473cb026b1f204059568a7098.1283782701.git.qy03fugy@stud.informatik.uni-erlangen.de> Signed-off-by: Ingo Molnar --- kernel/sched.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 09b574e7f4d..8eef8e5512d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -426,9 +426,7 @@ struct root_domain { */ cpumask_var_t rto_mask; atomic_t rto_count; -#ifdef CONFIG_SMP struct cpupri cpupri; -#endif }; /* @@ -437,7 +435,7 @@ struct root_domain { */ static struct root_domain def_root_domain; -#endif +#endif /* CONFIG_SMP */ /* * This is the main, per-CPU runqueue data structure. -- cgit v1.2.3-70-g09d2 From f269893c575167447cc9f6d1867e639fb5b6f0c5 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 31 Aug 2010 10:28:15 +0200 Subject: sched: Merge cpu_to_core_group functions Merge and simplify the two cpu_to_core_group variants so that the resulting function follows the same pattern like cpu_to_phys_group. Signed-off-by: Heiko Carstens Signed-off-by: Peter Zijlstra LKML-Reference: <20100831082843.953617555@de.ibm.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 8eef8e5512d..1a0c084b1cf 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6552,31 +6552,23 @@ cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, #ifdef CONFIG_SCHED_MC static DEFINE_PER_CPU(struct static_sched_domain, core_domains); static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); -#endif /* CONFIG_SCHED_MC */ -#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) static int cpu_to_core_group(int cpu, const struct cpumask *cpu_map, struct sched_group **sg, struct cpumask *mask) { int group; - +#ifdef CONFIG_SCHED_SMT cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); group = cpumask_first(mask); +#else + group = cpu; +#endif if (sg) *sg = &per_cpu(sched_group_core, group).sg; return group; } -#elif defined(CONFIG_SCHED_MC) -static int -cpu_to_core_group(int cpu, const struct cpumask *cpu_map, - struct sched_group **sg, struct cpumask *unused) -{ - if (sg) - *sg = &per_cpu(sched_group_core, cpu).sg; - return cpu; -} -#endif +#endif /* CONFIG_SCHED_MC */ static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); -- cgit v1.2.3-70-g09d2 From 01a08546af311c065f34727787dd0cc8dc0c216f Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 31 Aug 2010 10:28:16 +0200 Subject: sched: Add book scheduling domain On top of the SMT and MC scheduling domains this adds the BOOK scheduling domain. This is useful for NUMA like machines which do not have an interface which tells which piece of memory is attached to which node or where the hardware performs striping. Signed-off-by: Heiko Carstens Signed-off-by: Peter Zijlstra LKML-Reference: <20100831082844.253053798@de.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 + include/linux/topology.h | 6 ++++ kernel/sched.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 82 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched.h b/include/linux/sched.h index 1e2a6db2d7d..b51c53c285b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -875,6 +875,7 @@ enum sched_domain_level { SD_LV_NONE = 0, SD_LV_SIBLING, SD_LV_MC, + SD_LV_BOOK, SD_LV_CPU, SD_LV_NODE, SD_LV_ALLNODES, diff --git a/include/linux/topology.h b/include/linux/topology.h index 64e084ff5e5..b91a40e847d 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -201,6 +201,12 @@ int arch_update_cpu_topology(void); .balance_interval = 64, \ } +#ifdef CONFIG_SCHED_BOOK +#ifndef SD_BOOK_INIT +#error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!! +#endif +#endif /* CONFIG_SCHED_BOOK */ + #ifdef CONFIG_NUMA #ifndef SD_NODE_INIT #error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! diff --git a/kernel/sched.c b/kernel/sched.c index 1a0c084b1cf..26f83e2f153 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6506,6 +6506,7 @@ struct s_data { cpumask_var_t nodemask; cpumask_var_t this_sibling_map; cpumask_var_t this_core_map; + cpumask_var_t this_book_map; cpumask_var_t send_covered; cpumask_var_t tmpmask; struct sched_group **sched_group_nodes; @@ -6517,6 +6518,7 @@ enum s_alloc { sa_rootdomain, sa_tmpmask, sa_send_covered, + sa_this_book_map, sa_this_core_map, sa_this_sibling_map, sa_nodemask, @@ -6570,6 +6572,31 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map, } #endif /* CONFIG_SCHED_MC */ +/* + * book sched-domains: + */ +#ifdef CONFIG_SCHED_BOOK +static DEFINE_PER_CPU(struct static_sched_domain, book_domains); +static DEFINE_PER_CPU(struct static_sched_group, sched_group_book); + +static int +cpu_to_book_group(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, struct cpumask *mask) +{ + int group = cpu; +#ifdef CONFIG_SCHED_MC + cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); + group = cpumask_first(mask); +#elif defined(CONFIG_SCHED_SMT) + cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); + group = cpumask_first(mask); +#endif + if (sg) + *sg = &per_cpu(sched_group_book, group).sg; + return group; +} +#endif /* CONFIG_SCHED_BOOK */ + static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); @@ -6578,7 +6605,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, struct sched_group **sg, struct cpumask *mask) { int group; -#ifdef CONFIG_SCHED_MC +#ifdef CONFIG_SCHED_BOOK + cpumask_and(mask, cpu_book_mask(cpu), cpu_map); + group = cpumask_first(mask); +#elif defined(CONFIG_SCHED_MC) cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); group = cpumask_first(mask); #elif defined(CONFIG_SCHED_SMT) @@ -6839,6 +6869,9 @@ SD_INIT_FUNC(CPU) #ifdef CONFIG_SCHED_MC SD_INIT_FUNC(MC) #endif +#ifdef CONFIG_SCHED_BOOK + SD_INIT_FUNC(BOOK) +#endif static int default_relax_domain_level = -1; @@ -6888,6 +6921,8 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, free_cpumask_var(d->tmpmask); /* fall through */ case sa_send_covered: free_cpumask_var(d->send_covered); /* fall through */ + case sa_this_book_map: + free_cpumask_var(d->this_book_map); /* fall through */ case sa_this_core_map: free_cpumask_var(d->this_core_map); /* fall through */ case sa_this_sibling_map: @@ -6934,8 +6969,10 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, return sa_nodemask; if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) return sa_this_sibling_map; - if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) + if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL)) return sa_this_core_map; + if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) + return sa_this_book_map; if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) return sa_send_covered; d->rd = alloc_rootdomain(); @@ -6993,6 +7030,23 @@ static struct sched_domain *__build_cpu_sched_domain(struct s_data *d, return sd; } +static struct sched_domain *__build_book_sched_domain(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, + struct sched_domain *parent, int i) +{ + struct sched_domain *sd = parent; +#ifdef CONFIG_SCHED_BOOK + sd = &per_cpu(book_domains, i).sd; + SD_INIT(sd, BOOK); + set_domain_attribute(sd, attr); + cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i)); + sd->parent = parent; + parent->child = sd; + cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask); +#endif + return sd; +} + static struct sched_domain *__build_mc_sched_domain(struct s_data *d, const struct cpumask *cpu_map, struct sched_domain_attr *attr, struct sched_domain *parent, int i) @@ -7049,6 +7103,15 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l, &cpu_to_core_group, d->send_covered, d->tmpmask); break; +#endif +#ifdef CONFIG_SCHED_BOOK + case SD_LV_BOOK: /* set up book groups */ + cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu)); + if (cpu == cpumask_first(d->this_book_map)) + init_sched_build_groups(d->this_book_map, cpu_map, + &cpu_to_book_group, + d->send_covered, d->tmpmask); + break; #endif case SD_LV_CPU: /* set up physical groups */ cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); @@ -7097,12 +7160,14 @@ static int __build_sched_domains(const struct cpumask *cpu_map, sd = __build_numa_sched_domains(&d, cpu_map, attr, i); sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); + sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); } for_each_cpu(i, cpu_map) { build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); + build_sched_groups(&d, SD_LV_BOOK, cpu_map, i); build_sched_groups(&d, SD_LV_MC, cpu_map, i); } @@ -7133,6 +7198,12 @@ static int __build_sched_domains(const struct cpumask *cpu_map, init_sched_groups_power(i, sd); } #endif +#ifdef CONFIG_SCHED_BOOK + for_each_cpu(i, cpu_map) { + sd = &per_cpu(book_domains, i).sd; + init_sched_groups_power(i, sd); + } +#endif for_each_cpu(i, cpu_map) { sd = &per_cpu(phys_domains, i).sd; @@ -7158,6 +7229,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map, sd = &per_cpu(cpu_domains, i).sd; #elif defined(CONFIG_SCHED_MC) sd = &per_cpu(core_domains, i).sd; +#elif defined(CONFIG_SCHED_BOOK) + sd = &per_cpu(book_domains, i).sd; #else sd = &per_cpu(phys_domains, i).sd; #endif -- cgit v1.2.3-70-g09d2 From 51b0fe39549a04858001922919ab355dee9bdfcf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 11 Jun 2010 13:35:57 +0200 Subject: perf: Deconstify struct pmu sed -ie 's/const struct pmu\>/struct pmu/g' `git grep -l "const struct pmu\>"` Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Will Deacon Cc: Paul Mundt Cc: Frederic Weisbecker Cc: Cyrill Gorcunov Cc: Lin Ming Cc: Yanmin Cc: Deng-Cheng Zhu Cc: David Miller Cc: Michael Cree LKML-Reference: Signed-off-by: Ingo Molnar --- arch/alpha/kernel/perf_event.c | 4 ++-- arch/arm/kernel/perf_event.c | 2 +- arch/powerpc/kernel/perf_event.c | 8 ++++---- arch/powerpc/kernel/perf_event_fsl_emb.c | 2 +- arch/sh/kernel/perf_event.c | 4 ++-- arch/sparc/kernel/perf_event.c | 10 +++++----- arch/x86/kernel/cpu/perf_event.c | 14 +++++++------- include/linux/perf_event.h | 10 +++++----- kernel/perf_event.c | 26 +++++++++++++------------- 9 files changed, 40 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 51c39fa4169..56fa4159038 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -642,7 +642,7 @@ static int __hw_perf_event_init(struct perf_event *event) return 0; } -static const struct pmu pmu = { +static struct pmu pmu = { .enable = alpha_pmu_enable, .disable = alpha_pmu_disable, .read = alpha_pmu_read, @@ -653,7 +653,7 @@ static const struct pmu pmu = { /* * Main entry point to initialise a HW performance event. */ -const struct pmu *hw_perf_event_init(struct perf_event *event) +struct pmu *hw_perf_event_init(struct perf_event *event) { int err; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 64ca8c3ab94..0671e92c511 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -491,7 +491,7 @@ __hw_perf_event_init(struct perf_event *event) return err; } -const struct pmu * +struct pmu * hw_perf_event_init(struct perf_event *event) { int err = 0; diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index d301a30445e..5f78681ad90 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -857,7 +857,7 @@ static void power_pmu_unthrottle(struct perf_event *event) * Set the flag to make pmu::enable() not perform the * schedulability test, it will be performed at commit time */ -void power_pmu_start_txn(const struct pmu *pmu) +void power_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); @@ -870,7 +870,7 @@ void power_pmu_start_txn(const struct pmu *pmu) * Clear the flag and pmu::enable() will perform the * schedulability test. */ -void power_pmu_cancel_txn(const struct pmu *pmu) +void power_pmu_cancel_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); @@ -882,7 +882,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu) * Perform the group schedulability test as a whole * Return 0 if success */ -int power_pmu_commit_txn(const struct pmu *pmu) +int power_pmu_commit_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw; long i, n; @@ -1014,7 +1014,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) return 0; } -const struct pmu *hw_perf_event_init(struct perf_event *event) +struct pmu *hw_perf_event_init(struct perf_event *event) { u64 ev; unsigned long flags; diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 1ba45471ae4..d7619b5e7a6 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -428,7 +428,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) return 0; } -const struct pmu *hw_perf_event_init(struct perf_event *event) +struct pmu *hw_perf_event_init(struct perf_event *event) { u64 ev; struct perf_event *events[MAX_HWEVENTS]; diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 7a3dc356725..395572c94c6 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -257,13 +257,13 @@ static void sh_pmu_read(struct perf_event *event) sh_perf_event_update(event, &event->hw, event->hw.idx); } -static const struct pmu pmu = { +static struct pmu pmu = { .enable = sh_pmu_enable, .disable = sh_pmu_disable, .read = sh_pmu_read, }; -const struct pmu *hw_perf_event_init(struct perf_event *event) +struct pmu *hw_perf_event_init(struct perf_event *event) { int err = __hw_perf_event_init(event); if (unlikely(err)) { diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 4bc40293857..481b894a501 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1099,7 +1099,7 @@ static int __hw_perf_event_init(struct perf_event *event) * Set the flag to make pmu::enable() not perform the * schedulability test, it will be performed at commit time */ -static void sparc_pmu_start_txn(const struct pmu *pmu) +static void sparc_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); @@ -1111,7 +1111,7 @@ static void sparc_pmu_start_txn(const struct pmu *pmu) * Clear the flag and pmu::enable() will perform the * schedulability test. */ -static void sparc_pmu_cancel_txn(const struct pmu *pmu) +static void sparc_pmu_cancel_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); @@ -1123,7 +1123,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu) * Perform the group schedulability test as a whole * Return 0 if success */ -static int sparc_pmu_commit_txn(const struct pmu *pmu) +static int sparc_pmu_commit_txn(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int n; @@ -1142,7 +1142,7 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu) return 0; } -static const struct pmu pmu = { +static struct pmu pmu = { .enable = sparc_pmu_enable, .disable = sparc_pmu_disable, .read = sparc_pmu_read, @@ -1152,7 +1152,7 @@ static const struct pmu pmu = { .commit_txn = sparc_pmu_commit_txn, }; -const struct pmu *hw_perf_event_init(struct perf_event *event) +struct pmu *hw_perf_event_init(struct perf_event *event) { int err = __hw_perf_event_init(event); diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index de6569c04cd..fdd97f2e996 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -618,7 +618,7 @@ static void x86_pmu_enable_all(int added) } } -static const struct pmu pmu; +static struct pmu pmu; static inline int is_x86_event(struct perf_event *event) { @@ -1427,7 +1427,7 @@ static inline void x86_pmu_read(struct perf_event *event) * Set the flag to make pmu::enable() not perform the * schedulability test, it will be performed at commit time */ -static void x86_pmu_start_txn(const struct pmu *pmu) +static void x86_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -1440,7 +1440,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu) * Clear the flag and pmu::enable() will perform the * schedulability test. */ -static void x86_pmu_cancel_txn(const struct pmu *pmu) +static void x86_pmu_cancel_txn(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -1457,7 +1457,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu) * Perform the group schedulability test as a whole * Return 0 if success */ -static int x86_pmu_commit_txn(const struct pmu *pmu) +static int x86_pmu_commit_txn(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int assign[X86_PMC_IDX_MAX]; @@ -1483,7 +1483,7 @@ static int x86_pmu_commit_txn(const struct pmu *pmu) return 0; } -static const struct pmu pmu = { +static struct pmu pmu = { .enable = x86_pmu_enable, .disable = x86_pmu_disable, .start = x86_pmu_start, @@ -1569,9 +1569,9 @@ out: return ret; } -const struct pmu *hw_perf_event_init(struct perf_event *event) +struct pmu *hw_perf_event_init(struct perf_event *event) { - const struct pmu *tmp; + struct pmu *tmp; int err; err = __hw_perf_event_init(event); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 000610c4de7..09d048b5211 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -578,19 +578,19 @@ struct pmu { * Start the transaction, after this ->enable() doesn't need * to do schedulability tests. */ - void (*start_txn) (const struct pmu *pmu); + void (*start_txn) (struct pmu *pmu); /* * If ->start_txn() disabled the ->enable() schedulability test * then ->commit_txn() is required to perform one. On success * the transaction is closed. On error the transaction is kept * open until ->cancel_txn() is called. */ - int (*commit_txn) (const struct pmu *pmu); + int (*commit_txn) (struct pmu *pmu); /* * Will cancel the transaction, assumes ->disable() is called for * each successfull ->enable() during the transaction. */ - void (*cancel_txn) (const struct pmu *pmu); + void (*cancel_txn) (struct pmu *pmu); }; /** @@ -669,7 +669,7 @@ struct perf_event { int nr_siblings; int group_flags; struct perf_event *group_leader; - const struct pmu *pmu; + struct pmu *pmu; enum perf_event_active_state state; unsigned int attach_state; @@ -849,7 +849,7 @@ struct perf_output_handle { */ extern int perf_max_events; -extern const struct pmu *hw_perf_event_init(struct perf_event *event); +extern struct pmu *hw_perf_event_init(struct perf_event *event); extern void perf_event_task_sched_in(struct task_struct *task); extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 2d74f31220a..fb46fd13f31 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -75,7 +75,7 @@ static DEFINE_SPINLOCK(perf_resource_lock); /* * Architecture provided APIs - weak aliases: */ -extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) +extern __weak struct pmu *hw_perf_event_init(struct perf_event *event) { return NULL; } @@ -691,7 +691,7 @@ group_sched_in(struct perf_event *group_event, struct perf_event_context *ctx) { struct perf_event *event, *partial_group = NULL; - const struct pmu *pmu = group_event->pmu; + struct pmu *pmu = group_event->pmu; bool txn = false; if (group_event->state == PERF_EVENT_STATE_OFF) @@ -4501,7 +4501,7 @@ static int perf_swevent_int(struct perf_event *event) return 0; } -static const struct pmu perf_ops_generic = { +static struct pmu perf_ops_generic = { .enable = perf_swevent_enable, .disable = perf_swevent_disable, .start = perf_swevent_int, @@ -4614,7 +4614,7 @@ static void cpu_clock_perf_event_read(struct perf_event *event) cpu_clock_perf_event_update(event); } -static const struct pmu perf_ops_cpu_clock = { +static struct pmu perf_ops_cpu_clock = { .enable = cpu_clock_perf_event_enable, .disable = cpu_clock_perf_event_disable, .read = cpu_clock_perf_event_read, @@ -4671,7 +4671,7 @@ static void task_clock_perf_event_read(struct perf_event *event) task_clock_perf_event_update(event, time); } -static const struct pmu perf_ops_task_clock = { +static struct pmu perf_ops_task_clock = { .enable = task_clock_perf_event_enable, .disable = task_clock_perf_event_disable, .read = task_clock_perf_event_read, @@ -4785,7 +4785,7 @@ static int swevent_hlist_get(struct perf_event *event) #ifdef CONFIG_EVENT_TRACING -static const struct pmu perf_ops_tracepoint = { +static struct pmu perf_ops_tracepoint = { .enable = perf_trace_enable, .disable = perf_trace_disable, .start = perf_swevent_int, @@ -4849,7 +4849,7 @@ static void tp_perf_event_destroy(struct perf_event *event) perf_trace_destroy(event); } -static const struct pmu *tp_perf_event_init(struct perf_event *event) +static struct pmu *tp_perf_event_init(struct perf_event *event) { int err; @@ -4896,7 +4896,7 @@ static void perf_event_free_filter(struct perf_event *event) #else -static const struct pmu *tp_perf_event_init(struct perf_event *event) +static struct pmu *tp_perf_event_init(struct perf_event *event) { return NULL; } @@ -4918,7 +4918,7 @@ static void bp_perf_event_destroy(struct perf_event *event) release_bp_slot(event); } -static const struct pmu *bp_perf_event_init(struct perf_event *bp) +static struct pmu *bp_perf_event_init(struct perf_event *bp) { int err; @@ -4942,7 +4942,7 @@ void perf_bp_event(struct perf_event *bp, void *data) perf_swevent_add(bp, 1, 1, &sample, regs); } #else -static const struct pmu *bp_perf_event_init(struct perf_event *bp) +static struct pmu *bp_perf_event_init(struct perf_event *bp) { return NULL; } @@ -4964,9 +4964,9 @@ static void sw_perf_event_destroy(struct perf_event *event) swevent_hlist_put(event); } -static const struct pmu *sw_perf_event_init(struct perf_event *event) +static struct pmu *sw_perf_event_init(struct perf_event *event) { - const struct pmu *pmu = NULL; + struct pmu *pmu = NULL; u64 event_id = event->attr.config; /* @@ -5028,7 +5028,7 @@ perf_event_alloc(struct perf_event_attr *attr, perf_overflow_handler_t overflow_handler, gfp_t gfpflags) { - const struct pmu *pmu; + struct pmu *pmu; struct perf_event *event; struct hw_perf_event *hwc; long err; -- cgit v1.2.3-70-g09d2 From b0a873ebbf87bf38bf70b5e39a7cadc96099fa13 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 11 Jun 2010 13:35:08 +0200 Subject: perf: Register PMU implementations Simple registration interface for struct pmu, this provides the infrastructure for removing all the weak functions. Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Will Deacon Cc: Paul Mundt Cc: Frederic Weisbecker Cc: Cyrill Gorcunov Cc: Lin Ming Cc: Yanmin Cc: Deng-Cheng Zhu Cc: David Miller Cc: Michael Cree LKML-Reference: Signed-off-by: Ingo Molnar --- arch/alpha/kernel/perf_event.c | 37 +- arch/arm/kernel/perf_event.c | 38 +- arch/powerpc/kernel/perf_event.c | 46 +-- arch/powerpc/kernel/perf_event_fsl_emb.c | 37 +- arch/sh/kernel/perf_event.c | 35 +- arch/sparc/kernel/perf_event.c | 29 +- arch/x86/kernel/cpu/perf_event.c | 45 ++- include/linux/perf_event.h | 10 +- kernel/hw_breakpoint.c | 35 +- kernel/perf_event.c | 588 +++++++++++++++---------------- 10 files changed, 488 insertions(+), 412 deletions(-) (limited to 'kernel') diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 56fa4159038..19660b5c298 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -642,34 +642,39 @@ static int __hw_perf_event_init(struct perf_event *event) return 0; } -static struct pmu pmu = { - .enable = alpha_pmu_enable, - .disable = alpha_pmu_disable, - .read = alpha_pmu_read, - .unthrottle = alpha_pmu_unthrottle, -}; - - /* * Main entry point to initialise a HW performance event. */ -struct pmu *hw_perf_event_init(struct perf_event *event) +static int alpha_pmu_event_init(struct perf_event *event) { int err; + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + if (!alpha_pmu) - return ERR_PTR(-ENODEV); + return -ENODEV; /* Do the real initialisation work. */ err = __hw_perf_event_init(event); - if (err) - return ERR_PTR(err); - - return &pmu; + return err; } - +static struct pmu pmu = { + .event_init = alpha_pmu_event_init, + .enable = alpha_pmu_enable, + .disable = alpha_pmu_disable, + .read = alpha_pmu_read, + .unthrottle = alpha_pmu_unthrottle, +}; /* * Main entry point - enable HW performance counters. @@ -838,5 +843,7 @@ void __init init_hw_perf_events(void) /* And set up PMU specification */ alpha_pmu = &ev67_pmu; perf_max_events = alpha_pmu->num_pmcs; + + perf_pmu_register(&pmu); } diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 0671e92c511..f62f9db35db 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -306,12 +306,7 @@ out: return err; } -static struct pmu pmu = { - .enable = armpmu_enable, - .disable = armpmu_disable, - .unthrottle = armpmu_unthrottle, - .read = armpmu_read, -}; +static struct pmu pmu; static int validate_event(struct cpu_hw_events *cpuc, @@ -491,20 +486,29 @@ __hw_perf_event_init(struct perf_event *event) return err; } -struct pmu * -hw_perf_event_init(struct perf_event *event) +static int armpmu_event_init(struct perf_event *event) { int err = 0; + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + if (!armpmu) - return ERR_PTR(-ENODEV); + return -ENODEV; event->destroy = hw_perf_event_destroy; if (!atomic_inc_not_zero(&active_events)) { if (atomic_read(&active_events) > perf_max_events) { atomic_dec(&active_events); - return ERR_PTR(-ENOSPC); + return -ENOSPC; } mutex_lock(&pmu_reserve_mutex); @@ -518,15 +522,23 @@ hw_perf_event_init(struct perf_event *event) } if (err) - return ERR_PTR(err); + return err; err = __hw_perf_event_init(event); if (err) hw_perf_event_destroy(event); - return err ? ERR_PTR(err) : &pmu; + return err; } +static struct pmu pmu = { + .event_init = armpmu_event_init, + .enable = armpmu_enable, + .disable = armpmu_disable, + .unthrottle = armpmu_unthrottle, + .read = armpmu_read, +}; + void hw_perf_enable(void) { @@ -2994,6 +3006,8 @@ init_hw_perf_events(void) perf_max_events = -1; } + perf_pmu_register(&pmu); + return 0; } arch_initcall(init_hw_perf_events); diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 5f78681ad90..19131b2614b 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -904,16 +904,6 @@ int power_pmu_commit_txn(struct pmu *pmu) return 0; } -struct pmu power_pmu = { - .enable = power_pmu_enable, - .disable = power_pmu_disable, - .read = power_pmu_read, - .unthrottle = power_pmu_unthrottle, - .start_txn = power_pmu_start_txn, - .cancel_txn = power_pmu_cancel_txn, - .commit_txn = power_pmu_commit_txn, -}; - /* * Return 1 if we might be able to put event on a limited PMC, * or 0 if not. @@ -1014,7 +1004,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) return 0; } -struct pmu *hw_perf_event_init(struct perf_event *event) +static int power_pmu_event_init(struct perf_event *event) { u64 ev; unsigned long flags; @@ -1026,25 +1016,27 @@ struct pmu *hw_perf_event_init(struct perf_event *event) struct cpu_hw_events *cpuhw; if (!ppmu) - return ERR_PTR(-ENXIO); + return -ENOENT; + switch (event->attr.type) { case PERF_TYPE_HARDWARE: ev = event->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; ev = ppmu->generic_events[ev]; break; case PERF_TYPE_HW_CACHE: err = hw_perf_cache_event(event->attr.config, &ev); if (err) - return ERR_PTR(err); + return err; break; case PERF_TYPE_RAW: ev = event->attr.config; break; default: - return ERR_PTR(-EINVAL); + return -ENOENT; } + event->hw.config_base = ev; event->hw.idx = 0; @@ -1081,7 +1073,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event) */ ev = normal_pmc_alternative(ev, flags); if (!ev) - return ERR_PTR(-EINVAL); + return -EINVAL; } } @@ -1095,19 +1087,19 @@ struct pmu *hw_perf_event_init(struct perf_event *event) n = collect_events(event->group_leader, ppmu->n_counter - 1, ctrs, events, cflags); if (n < 0) - return ERR_PTR(-EINVAL); + return -EINVAL; } events[n] = ev; ctrs[n] = event; cflags[n] = flags; if (check_excludes(ctrs, cflags, n, 1)) - return ERR_PTR(-EINVAL); + return -EINVAL; cpuhw = &get_cpu_var(cpu_hw_events); err = power_check_constraints(cpuhw, events, cflags, n + 1); put_cpu_var(cpu_hw_events); if (err) - return ERR_PTR(-EINVAL); + return -EINVAL; event->hw.config = events[n]; event->hw.event_base = cflags[n]; @@ -1132,11 +1124,20 @@ struct pmu *hw_perf_event_init(struct perf_event *event) } event->destroy = hw_perf_event_destroy; - if (err) - return ERR_PTR(err); - return &power_pmu; + return err; } +struct pmu power_pmu = { + .event_init = power_pmu_event_init, + .enable = power_pmu_enable, + .disable = power_pmu_disable, + .read = power_pmu_read, + .unthrottle = power_pmu_unthrottle, + .start_txn = power_pmu_start_txn, + .cancel_txn = power_pmu_cancel_txn, + .commit_txn = power_pmu_commit_txn, +}; + /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled @@ -1342,6 +1343,7 @@ int register_power_pmu(struct power_pmu *pmu) freeze_events_kernel = MMCR0_FCHV; #endif /* CONFIG_PPC64 */ + perf_pmu_register(&power_pmu); perf_cpu_notifier(power_pmu_notifier); return 0; diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index d7619b5e7a6..ea6a804e43f 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -378,13 +378,6 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event) local_irq_restore(flags); } -static struct pmu fsl_emb_pmu = { - .enable = fsl_emb_pmu_enable, - .disable = fsl_emb_pmu_disable, - .read = fsl_emb_pmu_read, - .unthrottle = fsl_emb_pmu_unthrottle, -}; - /* * Release the PMU if this is the last perf_event. */ @@ -428,7 +421,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) return 0; } -struct pmu *hw_perf_event_init(struct perf_event *event) +static int fsl_emb_pmu_event_init(struct perf_event *event) { u64 ev; struct perf_event *events[MAX_HWEVENTS]; @@ -441,14 +434,14 @@ struct pmu *hw_perf_event_init(struct perf_event *event) case PERF_TYPE_HARDWARE: ev = event->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; ev = ppmu->generic_events[ev]; break; case PERF_TYPE_HW_CACHE: err = hw_perf_cache_event(event->attr.config, &ev); if (err) - return ERR_PTR(err); + return err; break; case PERF_TYPE_RAW: @@ -456,12 +449,12 @@ struct pmu *hw_perf_event_init(struct perf_event *event) break; default: - return ERR_PTR(-EINVAL); + return -ENOENT; } event->hw.config = ppmu->xlate_event(ev); if (!(event->hw.config & FSL_EMB_EVENT_VALID)) - return ERR_PTR(-EINVAL); + return -EINVAL; /* * If this is in a group, check if it can go on with all the @@ -473,7 +466,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event) n = collect_events(event->group_leader, ppmu->n_counter - 1, events); if (n < 0) - return ERR_PTR(-EINVAL); + return -EINVAL; } if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { @@ -484,7 +477,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event) } if (num_restricted >= ppmu->n_restricted) - return ERR_PTR(-EINVAL); + return -EINVAL; } event->hw.idx = -1; @@ -497,7 +490,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event) if (event->attr.exclude_kernel) event->hw.config_base |= PMLCA_FCS; if (event->attr.exclude_idle) - return ERR_PTR(-ENOTSUPP); + return -ENOTSUPP; event->hw.last_period = event->hw.sample_period; local64_set(&event->hw.period_left, event->hw.last_period); @@ -523,11 +516,17 @@ struct pmu *hw_perf_event_init(struct perf_event *event) } event->destroy = hw_perf_event_destroy; - if (err) - return ERR_PTR(err); - return &fsl_emb_pmu; + return err; } +static struct pmu fsl_emb_pmu = { + .event_init = fsl_emb_pmu_event_init, + .enable = fsl_emb_pmu_enable, + .disable = fsl_emb_pmu_disable, + .read = fsl_emb_pmu_read, + .unthrottle = fsl_emb_pmu_unthrottle, +}; + /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled @@ -651,5 +650,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) pr_info("%s performance monitor hardware support registered\n", pmu->name); + perf_pmu_register(&fsl_emb_pmu); + return 0; } diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 395572c94c6..8cb206597e0 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -257,26 +257,38 @@ static void sh_pmu_read(struct perf_event *event) sh_perf_event_update(event, &event->hw, event->hw.idx); } -static struct pmu pmu = { - .enable = sh_pmu_enable, - .disable = sh_pmu_disable, - .read = sh_pmu_read, -}; - -struct pmu *hw_perf_event_init(struct perf_event *event) +static int sh_pmu_event_init(struct perf_event *event) { - int err = __hw_perf_event_init(event); + int err; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HW_CACHE: + case PERF_TYPE_HARDWARE: + err = __hw_perf_event_init(event); + break; + + default: + return -ENOENT; + } + if (unlikely(err)) { if (event->destroy) event->destroy(event); - return ERR_PTR(err); } - return &pmu; + return err; } +static struct pmu pmu = { + .event_init = sh_pmu_event_init, + .enable = sh_pmu_enable, + .disable = sh_pmu_disable, + .read = sh_pmu_read, +}; + static void sh_pmu_setup(int cpu) -{ + struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); memset(cpuhw, 0, sizeof(struct cpu_hw_events)); @@ -325,6 +337,7 @@ int __cpuinit register_sh_pmu(struct sh_pmu *pmu) WARN_ON(pmu->num_events > MAX_HWEVENTS); + perf_pmu_register(&pmu); perf_cpu_notifier(sh_pmu_notifier); return 0; } diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 481b894a501..bed4327f5a7 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1025,7 +1025,7 @@ out: return ret; } -static int __hw_perf_event_init(struct perf_event *event) +static int sparc_pmu_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; struct perf_event *evts[MAX_HWEVENTS]; @@ -1038,17 +1038,27 @@ static int __hw_perf_event_init(struct perf_event *event) if (atomic_read(&nmi_active) < 0) return -ENODEV; - if (attr->type == PERF_TYPE_HARDWARE) { + switch (attr->type) { + case PERF_TYPE_HARDWARE: if (attr->config >= sparc_pmu->max_events) return -EINVAL; pmap = sparc_pmu->event_map(attr->config); - } else if (attr->type == PERF_TYPE_HW_CACHE) { + break; + + case PERF_TYPE_HW_CACHE: pmap = sparc_map_cache_event(attr->config); if (IS_ERR(pmap)) return PTR_ERR(pmap); - } else + break; + + case PERF_TYPE_RAW: return -EOPNOTSUPP; + default: + return -ENOENT; + + } + /* We save the enable bits in the config_base. */ hwc->config_base = sparc_pmu->irq_bit; if (!attr->exclude_user) @@ -1143,6 +1153,7 @@ static int sparc_pmu_commit_txn(struct pmu *pmu) } static struct pmu pmu = { + .event_init = sparc_pmu_event_init, .enable = sparc_pmu_enable, .disable = sparc_pmu_disable, .read = sparc_pmu_read, @@ -1152,15 +1163,6 @@ static struct pmu pmu = { .commit_txn = sparc_pmu_commit_txn, }; -struct pmu *hw_perf_event_init(struct perf_event *event) -{ - int err = __hw_perf_event_init(event); - - if (err) - return ERR_PTR(err); - return &pmu; -} - void perf_event_print_debug(void) { unsigned long flags; @@ -1280,6 +1282,7 @@ void __init init_hw_perf_events(void) /* All sparc64 PMUs currently have 2 events. */ perf_max_events = 2; + perf_pmu_register(&pmu); register_die_notifier(&perf_event_nmi_notifier); } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index fdd97f2e996..2c89264ee79 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -530,7 +530,7 @@ static int x86_pmu_hw_config(struct perf_event *event) /* * Setup the hardware configuration for a given attr_type */ -static int __hw_perf_event_init(struct perf_event *event) +static int __x86_pmu_event_init(struct perf_event *event) { int err; @@ -1414,6 +1414,7 @@ void __init init_hw_perf_events(void) pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); + perf_pmu_register(&pmu); perf_cpu_notifier(x86_pmu_notifier); } @@ -1483,18 +1484,6 @@ static int x86_pmu_commit_txn(struct pmu *pmu) return 0; } -static struct pmu pmu = { - .enable = x86_pmu_enable, - .disable = x86_pmu_disable, - .start = x86_pmu_start, - .stop = x86_pmu_stop, - .read = x86_pmu_read, - .unthrottle = x86_pmu_unthrottle, - .start_txn = x86_pmu_start_txn, - .cancel_txn = x86_pmu_cancel_txn, - .commit_txn = x86_pmu_commit_txn, -}; - /* * validate that we can schedule this event */ @@ -1569,12 +1558,22 @@ out: return ret; } -struct pmu *hw_perf_event_init(struct perf_event *event) +int x86_pmu_event_init(struct perf_event *event) { struct pmu *tmp; int err; - err = __hw_perf_event_init(event); + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + + err = __x86_pmu_event_init(event); if (!err) { /* * we temporarily connect event to its pmu @@ -1594,12 +1593,24 @@ struct pmu *hw_perf_event_init(struct perf_event *event) if (err) { if (event->destroy) event->destroy(event); - return ERR_PTR(err); } - return &pmu; + return err; } +static struct pmu pmu = { + .event_init = x86_pmu_event_init, + .enable = x86_pmu_enable, + .disable = x86_pmu_disable, + .start = x86_pmu_start, + .stop = x86_pmu_stop, + .read = x86_pmu_read, + .unthrottle = x86_pmu_unthrottle, + .start_txn = x86_pmu_start_txn, + .cancel_txn = x86_pmu_cancel_txn, + .commit_txn = x86_pmu_commit_txn, +}; + /* * callchain support */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 09d048b5211..ab72f56eb37 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -561,6 +561,13 @@ struct perf_event; * struct pmu - generic performance monitoring unit */ struct pmu { + struct list_head entry; + + /* + * Should return -ENOENT when the @event doesn't match this pmu + */ + int (*event_init) (struct perf_event *event); + int (*enable) (struct perf_event *event); void (*disable) (struct perf_event *event); int (*start) (struct perf_event *event); @@ -849,7 +856,8 @@ struct perf_output_handle { */ extern int perf_max_events; -extern struct pmu *hw_perf_event_init(struct perf_event *event); +extern int perf_pmu_register(struct pmu *pmu); +extern void perf_pmu_unregister(struct pmu *pmu); extern void perf_event_task_sched_in(struct task_struct *task); extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index d71a987fd2b..e9c5cfa1fd2 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -565,6 +565,34 @@ static struct notifier_block hw_breakpoint_exceptions_nb = { .priority = 0x7fffffff }; +static void bp_perf_event_destroy(struct perf_event *event) +{ + release_bp_slot(event); +} + +static int hw_breakpoint_event_init(struct perf_event *bp) +{ + int err; + + if (bp->attr.type != PERF_TYPE_BREAKPOINT) + return -ENOENT; + + err = register_perf_hw_breakpoint(bp); + if (err) + return err; + + bp->destroy = bp_perf_event_destroy; + + return 0; +} + +static struct pmu perf_breakpoint = { + .event_init = hw_breakpoint_event_init, + .enable = arch_install_hw_breakpoint, + .disable = arch_uninstall_hw_breakpoint, + .read = hw_breakpoint_pmu_read, +}; + static int __init init_hw_breakpoint(void) { unsigned int **task_bp_pinned; @@ -586,6 +614,8 @@ static int __init init_hw_breakpoint(void) constraints_initialized = 1; + perf_pmu_register(&perf_breakpoint); + return register_die_notifier(&hw_breakpoint_exceptions_nb); err_alloc: @@ -601,8 +631,3 @@ static int __init init_hw_breakpoint(void) core_initcall(init_hw_breakpoint); -struct pmu perf_ops_bp = { - .enable = arch_install_hw_breakpoint, - .disable = arch_uninstall_hw_breakpoint, - .read = hw_breakpoint_pmu_read, -}; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index fb46fd13f31..288ce43de57 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -31,7 +31,6 @@ #include #include #include -#include #include @@ -72,14 +71,6 @@ static atomic64_t perf_event_id; */ static DEFINE_SPINLOCK(perf_resource_lock); -/* - * Architecture provided APIs - weak aliases: - */ -extern __weak struct pmu *hw_perf_event_init(struct perf_event *event) -{ - return NULL; -} - void __weak hw_perf_disable(void) { barrier(); } void __weak hw_perf_enable(void) { barrier(); } @@ -4501,182 +4492,6 @@ static int perf_swevent_int(struct perf_event *event) return 0; } -static struct pmu perf_ops_generic = { - .enable = perf_swevent_enable, - .disable = perf_swevent_disable, - .start = perf_swevent_int, - .stop = perf_swevent_void, - .read = perf_swevent_read, - .unthrottle = perf_swevent_void, /* hwc->interrupts already reset */ -}; - -/* - * hrtimer based swevent callback - */ - -static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) -{ - enum hrtimer_restart ret = HRTIMER_RESTART; - struct perf_sample_data data; - struct pt_regs *regs; - struct perf_event *event; - u64 period; - - event = container_of(hrtimer, struct perf_event, hw.hrtimer); - event->pmu->read(event); - - perf_sample_data_init(&data, 0); - data.period = event->hw.last_period; - regs = get_irq_regs(); - - if (regs && !perf_exclude_event(event, regs)) { - if (!(event->attr.exclude_idle && current->pid == 0)) - if (perf_event_overflow(event, 0, &data, regs)) - ret = HRTIMER_NORESTART; - } - - period = max_t(u64, 10000, event->hw.sample_period); - hrtimer_forward_now(hrtimer, ns_to_ktime(period)); - - return ret; -} - -static void perf_swevent_start_hrtimer(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hwc->hrtimer.function = perf_swevent_hrtimer; - if (hwc->sample_period) { - u64 period; - - if (hwc->remaining) { - if (hwc->remaining < 0) - period = 10000; - else - period = hwc->remaining; - hwc->remaining = 0; - } else { - period = max_t(u64, 10000, hwc->sample_period); - } - __hrtimer_start_range_ns(&hwc->hrtimer, - ns_to_ktime(period), 0, - HRTIMER_MODE_REL, 0); - } -} - -static void perf_swevent_cancel_hrtimer(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - if (hwc->sample_period) { - ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); - hwc->remaining = ktime_to_ns(remaining); - - hrtimer_cancel(&hwc->hrtimer); - } -} - -/* - * Software event: cpu wall time clock - */ - -static void cpu_clock_perf_event_update(struct perf_event *event) -{ - int cpu = raw_smp_processor_id(); - s64 prev; - u64 now; - - now = cpu_clock(cpu); - prev = local64_xchg(&event->hw.prev_count, now); - local64_add(now - prev, &event->count); -} - -static int cpu_clock_perf_event_enable(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - int cpu = raw_smp_processor_id(); - - local64_set(&hwc->prev_count, cpu_clock(cpu)); - perf_swevent_start_hrtimer(event); - - return 0; -} - -static void cpu_clock_perf_event_disable(struct perf_event *event) -{ - perf_swevent_cancel_hrtimer(event); - cpu_clock_perf_event_update(event); -} - -static void cpu_clock_perf_event_read(struct perf_event *event) -{ - cpu_clock_perf_event_update(event); -} - -static struct pmu perf_ops_cpu_clock = { - .enable = cpu_clock_perf_event_enable, - .disable = cpu_clock_perf_event_disable, - .read = cpu_clock_perf_event_read, -}; - -/* - * Software event: task time clock - */ - -static void task_clock_perf_event_update(struct perf_event *event, u64 now) -{ - u64 prev; - s64 delta; - - prev = local64_xchg(&event->hw.prev_count, now); - delta = now - prev; - local64_add(delta, &event->count); -} - -static int task_clock_perf_event_enable(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - u64 now; - - now = event->ctx->time; - - local64_set(&hwc->prev_count, now); - - perf_swevent_start_hrtimer(event); - - return 0; -} - -static void task_clock_perf_event_disable(struct perf_event *event) -{ - perf_swevent_cancel_hrtimer(event); - task_clock_perf_event_update(event, event->ctx->time); - -} - -static void task_clock_perf_event_read(struct perf_event *event) -{ - u64 time; - - if (!in_nmi()) { - update_context_time(event->ctx); - time = event->ctx->time; - } else { - u64 now = perf_clock(); - u64 delta = now - event->ctx->timestamp; - time = event->ctx->time + delta; - } - - task_clock_perf_event_update(event, time); -} - -static struct pmu perf_ops_task_clock = { - .enable = task_clock_perf_event_enable, - .disable = task_clock_perf_event_disable, - .read = task_clock_perf_event_read, -}; - /* Deref the hlist from the update side */ static inline struct swevent_hlist * swevent_hlist_deref(struct perf_cpu_context *cpuctx) @@ -4783,17 +4598,63 @@ static int swevent_hlist_get(struct perf_event *event) return err; } -#ifdef CONFIG_EVENT_TRACING +atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; -static struct pmu perf_ops_tracepoint = { - .enable = perf_trace_enable, - .disable = perf_trace_disable, +static void sw_perf_event_destroy(struct perf_event *event) +{ + u64 event_id = event->attr.config; + + WARN_ON(event->parent); + + atomic_dec(&perf_swevent_enabled[event_id]); + swevent_hlist_put(event); +} + +static int perf_swevent_init(struct perf_event *event) +{ + int event_id = event->attr.config; + + if (event->attr.type != PERF_TYPE_SOFTWARE) + return -ENOENT; + + switch (event_id) { + case PERF_COUNT_SW_CPU_CLOCK: + case PERF_COUNT_SW_TASK_CLOCK: + return -ENOENT; + + default: + break; + } + + if (event_id > PERF_COUNT_SW_MAX) + return -ENOENT; + + if (!event->parent) { + int err; + + err = swevent_hlist_get(event); + if (err) + return err; + + atomic_inc(&perf_swevent_enabled[event_id]); + event->destroy = sw_perf_event_destroy; + } + + return 0; +} + +static struct pmu perf_swevent = { + .event_init = perf_swevent_init, + .enable = perf_swevent_enable, + .disable = perf_swevent_disable, .start = perf_swevent_int, .stop = perf_swevent_void, .read = perf_swevent_read, - .unthrottle = perf_swevent_void, + .unthrottle = perf_swevent_void, /* hwc->interrupts already reset */ }; +#ifdef CONFIG_EVENT_TRACING + static int perf_tp_filter_match(struct perf_event *event, struct perf_sample_data *data) { @@ -4849,10 +4710,13 @@ static void tp_perf_event_destroy(struct perf_event *event) perf_trace_destroy(event); } -static struct pmu *tp_perf_event_init(struct perf_event *event) +static int perf_tp_event_init(struct perf_event *event) { int err; + if (event->attr.type != PERF_TYPE_TRACEPOINT) + return -ENOENT; + /* * Raw tracepoint data is a severe data leak, only allow root to * have these. @@ -4860,15 +4724,30 @@ static struct pmu *tp_perf_event_init(struct perf_event *event) if ((event->attr.sample_type & PERF_SAMPLE_RAW) && perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) - return ERR_PTR(-EPERM); + return -EPERM; err = perf_trace_init(event); if (err) - return NULL; + return err; event->destroy = tp_perf_event_destroy; - return &perf_ops_tracepoint; + return 0; +} + +static struct pmu perf_tracepoint = { + .event_init = perf_tp_event_init, + .enable = perf_trace_enable, + .disable = perf_trace_disable, + .start = perf_swevent_int, + .stop = perf_swevent_void, + .read = perf_swevent_read, + .unthrottle = perf_swevent_void, +}; + +static inline void perf_tp_register(void) +{ + perf_pmu_register(&perf_tracepoint); } static int perf_event_set_filter(struct perf_event *event, void __user *arg) @@ -4896,9 +4775,8 @@ static void perf_event_free_filter(struct perf_event *event) #else -static struct pmu *tp_perf_event_init(struct perf_event *event) +static inline void perf_tp_register(void) { - return NULL; } static int perf_event_set_filter(struct perf_event *event, void __user *arg) @@ -4913,105 +4791,247 @@ static void perf_event_free_filter(struct perf_event *event) #endif /* CONFIG_EVENT_TRACING */ #ifdef CONFIG_HAVE_HW_BREAKPOINT -static void bp_perf_event_destroy(struct perf_event *event) +void perf_bp_event(struct perf_event *bp, void *data) { - release_bp_slot(event); + struct perf_sample_data sample; + struct pt_regs *regs = data; + + perf_sample_data_init(&sample, bp->attr.bp_addr); + + if (!perf_exclude_event(bp, regs)) + perf_swevent_add(bp, 1, 1, &sample, regs); } +#endif + +/* + * hrtimer based swevent callback + */ -static struct pmu *bp_perf_event_init(struct perf_event *bp) +static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) { - int err; + enum hrtimer_restart ret = HRTIMER_RESTART; + struct perf_sample_data data; + struct pt_regs *regs; + struct perf_event *event; + u64 period; - err = register_perf_hw_breakpoint(bp); - if (err) - return ERR_PTR(err); + event = container_of(hrtimer, struct perf_event, hw.hrtimer); + event->pmu->read(event); + + perf_sample_data_init(&data, 0); + data.period = event->hw.last_period; + regs = get_irq_regs(); + + if (regs && !perf_exclude_event(event, regs)) { + if (!(event->attr.exclude_idle && current->pid == 0)) + if (perf_event_overflow(event, 0, &data, regs)) + ret = HRTIMER_NORESTART; + } - bp->destroy = bp_perf_event_destroy; + period = max_t(u64, 10000, event->hw.sample_period); + hrtimer_forward_now(hrtimer, ns_to_ktime(period)); - return &perf_ops_bp; + return ret; } -void perf_bp_event(struct perf_event *bp, void *data) +static void perf_swevent_start_hrtimer(struct perf_event *event) { - struct perf_sample_data sample; - struct pt_regs *regs = data; + struct hw_perf_event *hwc = &event->hw; - perf_sample_data_init(&sample, bp->attr.bp_addr); + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swevent_hrtimer; + if (hwc->sample_period) { + u64 period; - if (!perf_exclude_event(bp, regs)) - perf_swevent_add(bp, 1, 1, &sample, regs); + if (hwc->remaining) { + if (hwc->remaining < 0) + period = 10000; + else + period = hwc->remaining; + hwc->remaining = 0; + } else { + period = max_t(u64, 10000, hwc->sample_period); + } + __hrtimer_start_range_ns(&hwc->hrtimer, + ns_to_ktime(period), 0, + HRTIMER_MODE_REL, 0); + } } -#else -static struct pmu *bp_perf_event_init(struct perf_event *bp) + +static void perf_swevent_cancel_hrtimer(struct perf_event *event) { - return NULL; + struct hw_perf_event *hwc = &event->hw; + + if (hwc->sample_period) { + ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); + hwc->remaining = ktime_to_ns(remaining); + + hrtimer_cancel(&hwc->hrtimer); + } } -void perf_bp_event(struct perf_event *bp, void *regs) +/* + * Software event: cpu wall time clock + */ + +static void cpu_clock_event_update(struct perf_event *event) { + int cpu = raw_smp_processor_id(); + s64 prev; + u64 now; + + now = cpu_clock(cpu); + prev = local64_xchg(&event->hw.prev_count, now); + local64_add(now - prev, &event->count); } -#endif -atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; +static int cpu_clock_event_enable(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int cpu = raw_smp_processor_id(); -static void sw_perf_event_destroy(struct perf_event *event) + local64_set(&hwc->prev_count, cpu_clock(cpu)); + perf_swevent_start_hrtimer(event); + + return 0; +} + +static void cpu_clock_event_disable(struct perf_event *event) { - u64 event_id = event->attr.config; + perf_swevent_cancel_hrtimer(event); + cpu_clock_event_update(event); +} - WARN_ON(event->parent); +static void cpu_clock_event_read(struct perf_event *event) +{ + cpu_clock_event_update(event); +} - atomic_dec(&perf_swevent_enabled[event_id]); - swevent_hlist_put(event); +static int cpu_clock_event_init(struct perf_event *event) +{ + if (event->attr.type != PERF_TYPE_SOFTWARE) + return -ENOENT; + + if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) + return -ENOENT; + + return 0; } -static struct pmu *sw_perf_event_init(struct perf_event *event) +static struct pmu perf_cpu_clock = { + .event_init = cpu_clock_event_init, + .enable = cpu_clock_event_enable, + .disable = cpu_clock_event_disable, + .read = cpu_clock_event_read, +}; + +/* + * Software event: task time clock + */ + +static void task_clock_event_update(struct perf_event *event, u64 now) { - struct pmu *pmu = NULL; - u64 event_id = event->attr.config; + u64 prev; + s64 delta; - /* - * Software events (currently) can't in general distinguish - * between user, kernel and hypervisor events. - * However, context switches and cpu migrations are considered - * to be kernel events, and page faults are never hypervisor - * events. - */ - switch (event_id) { - case PERF_COUNT_SW_CPU_CLOCK: - pmu = &perf_ops_cpu_clock; + prev = local64_xchg(&event->hw.prev_count, now); + delta = now - prev; + local64_add(delta, &event->count); +} - break; - case PERF_COUNT_SW_TASK_CLOCK: - /* - * If the user instantiates this as a per-cpu event, - * use the cpu_clock event instead. - */ - if (event->ctx->task) - pmu = &perf_ops_task_clock; - else - pmu = &perf_ops_cpu_clock; +static int task_clock_event_enable(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 now; - break; - case PERF_COUNT_SW_PAGE_FAULTS: - case PERF_COUNT_SW_PAGE_FAULTS_MIN: - case PERF_COUNT_SW_PAGE_FAULTS_MAJ: - case PERF_COUNT_SW_CONTEXT_SWITCHES: - case PERF_COUNT_SW_CPU_MIGRATIONS: - case PERF_COUNT_SW_ALIGNMENT_FAULTS: - case PERF_COUNT_SW_EMULATION_FAULTS: - if (!event->parent) { - int err; - - err = swevent_hlist_get(event); - if (err) - return ERR_PTR(err); + now = event->ctx->time; - atomic_inc(&perf_swevent_enabled[event_id]); - event->destroy = sw_perf_event_destroy; + local64_set(&hwc->prev_count, now); + + perf_swevent_start_hrtimer(event); + + return 0; +} + +static void task_clock_event_disable(struct perf_event *event) +{ + perf_swevent_cancel_hrtimer(event); + task_clock_event_update(event, event->ctx->time); + +} + +static void task_clock_event_read(struct perf_event *event) +{ + u64 time; + + if (!in_nmi()) { + update_context_time(event->ctx); + time = event->ctx->time; + } else { + u64 now = perf_clock(); + u64 delta = now - event->ctx->timestamp; + time = event->ctx->time + delta; + } + + task_clock_event_update(event, time); +} + +static int task_clock_event_init(struct perf_event *event) +{ + if (event->attr.type != PERF_TYPE_SOFTWARE) + return -ENOENT; + + if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) + return -ENOENT; + + return 0; +} + +static struct pmu perf_task_clock = { + .event_init = task_clock_event_init, + .enable = task_clock_event_enable, + .disable = task_clock_event_disable, + .read = task_clock_event_read, +}; + +static LIST_HEAD(pmus); +static DEFINE_MUTEX(pmus_lock); +static struct srcu_struct pmus_srcu; + +int perf_pmu_register(struct pmu *pmu) +{ + mutex_lock(&pmus_lock); + list_add_rcu(&pmu->entry, &pmus); + mutex_unlock(&pmus_lock); + + return 0; +} + +void perf_pmu_unregister(struct pmu *pmu) +{ + mutex_lock(&pmus_lock); + list_del_rcu(&pmu->entry); + mutex_unlock(&pmus_lock); + + synchronize_srcu(&pmus_srcu); +} + +struct pmu *perf_init_event(struct perf_event *event) +{ + struct pmu *pmu = NULL; + int idx; + + idx = srcu_read_lock(&pmus_srcu); + list_for_each_entry_rcu(pmu, &pmus, entry) { + int ret = pmu->event_init(event); + if (!ret) + break; + if (ret != -ENOENT) { + pmu = ERR_PTR(ret); + break; } - pmu = &perf_ops_generic; - break; } + srcu_read_unlock(&pmus_srcu, idx); return pmu; } @@ -5092,29 +5112,8 @@ perf_event_alloc(struct perf_event_attr *attr, if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) goto done; - switch (attr->type) { - case PERF_TYPE_RAW: - case PERF_TYPE_HARDWARE: - case PERF_TYPE_HW_CACHE: - pmu = hw_perf_event_init(event); - break; - - case PERF_TYPE_SOFTWARE: - pmu = sw_perf_event_init(event); - break; - - case PERF_TYPE_TRACEPOINT: - pmu = tp_perf_event_init(event); - break; + pmu = perf_init_event(event); - case PERF_TYPE_BREAKPOINT: - pmu = bp_perf_event_init(event); - break; - - - default: - break; - } done: err = 0; if (!pmu) @@ -5979,22 +5978,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) return NOTIFY_OK; } -/* - * This has to have a higher priority than migration_notifier in sched.c. - */ -static struct notifier_block __cpuinitdata perf_cpu_nb = { - .notifier_call = perf_cpu_notify, - .priority = 20, -}; - void __init perf_event_init(void) { perf_event_init_all_cpus(); - perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, - (void *)(long)smp_processor_id()); - perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, - (void *)(long)smp_processor_id()); - register_cpu_notifier(&perf_cpu_nb); + init_srcu_struct(&pmus_srcu); + perf_pmu_register(&perf_swevent); + perf_pmu_register(&perf_cpu_clock); + perf_pmu_register(&perf_task_clock); + perf_tp_register(); + perf_cpu_notifier(perf_cpu_notify); } static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, -- cgit v1.2.3-70-g09d2 From 9ed6060d286b1eb55974d09080f442f809408c42 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 11 Jun 2010 17:36:35 +0200 Subject: perf: Unindent labels Fixup random annoying style bits. Signed-off-by: Peter Zijlstra Cc: paulus LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 288ce43de57..149ca18371b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -147,7 +147,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags) struct perf_event_context *ctx; rcu_read_lock(); - retry: +retry: ctx = rcu_dereference(task->perf_event_ctxp); if (ctx) { /* @@ -619,7 +619,7 @@ void perf_event_disable(struct perf_event *event) return; } - retry: +retry: task_oncpu_function_call(task, __perf_event_disable, event); raw_spin_lock_irq(&ctx->lock); @@ -849,7 +849,7 @@ static void __perf_install_in_context(void *info) if (!err && !ctx->task && cpuctx->max_pertask) cpuctx->max_pertask--; - unlock: +unlock: perf_enable(); raw_spin_unlock(&ctx->lock); @@ -922,10 +922,12 @@ static void __perf_event_mark_enabled(struct perf_event *event, event->state = PERF_EVENT_STATE_INACTIVE; event->tstamp_enabled = ctx->time - event->total_time_enabled; - list_for_each_entry(sub, &event->sibling_list, group_entry) - if (sub->state >= PERF_EVENT_STATE_INACTIVE) + list_for_each_entry(sub, &event->sibling_list, group_entry) { + if (sub->state >= PERF_EVENT_STATE_INACTIVE) { sub->tstamp_enabled = ctx->time - sub->total_time_enabled; + } + } } /* @@ -991,7 +993,7 @@ static void __perf_event_enable(void *info) } } - unlock: +unlock: raw_spin_unlock(&ctx->lock); } @@ -1032,7 +1034,7 @@ void perf_event_enable(struct perf_event *event) if (event->state == PERF_EVENT_STATE_ERROR) event->state = PERF_EVENT_STATE_OFF; - retry: +retry: raw_spin_unlock_irq(&ctx->lock); task_oncpu_function_call(task, __perf_event_enable, event); @@ -1052,7 +1054,7 @@ void perf_event_enable(struct perf_event *event) if (event->state == PERF_EVENT_STATE_OFF) __perf_event_mark_enabled(event, ctx); - out: +out: raw_spin_unlock_irq(&ctx->lock); } @@ -1092,17 +1094,19 @@ static void ctx_sched_out(struct perf_event_context *ctx, if (!ctx->nr_active) goto out_enable; - if (event_type & EVENT_PINNED) + if (event_type & EVENT_PINNED) { list_for_each_entry(event, &ctx->pinned_groups, group_entry) group_sched_out(event, cpuctx, ctx); + } - if (event_type & EVENT_FLEXIBLE) + if (event_type & EVENT_FLEXIBLE) { list_for_each_entry(event, &ctx->flexible_groups, group_entry) group_sched_out(event, cpuctx, ctx); + } out_enable: perf_enable(); - out: +out: raw_spin_unlock(&ctx->lock); } @@ -1341,9 +1345,10 @@ ctx_flexible_sched_in(struct perf_event_context *ctx, if (event->cpu != -1 && event->cpu != smp_processor_id()) continue; - if (group_can_go_on(event, cpuctx, can_add_hw)) + if (group_can_go_on(event, cpuctx, can_add_hw)) { if (group_sched_in(event, cpuctx, ctx)) can_add_hw = 0; + } } } @@ -1373,7 +1378,7 @@ ctx_sched_in(struct perf_event_context *ctx, ctx_flexible_sched_in(ctx, cpuctx); perf_enable(); - out: +out: raw_spin_unlock(&ctx->lock); } @@ -1714,7 +1719,7 @@ static void perf_event_enable_on_exec(struct task_struct *task) raw_spin_unlock(&ctx->lock); perf_event_task_sched_in(task); - out: +out: local_irq_restore(flags); } @@ -2053,7 +2058,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) if (!ptrace_may_access(task, PTRACE_MODE_READ)) goto errout; - retry: +retry: ctx = perf_lock_task_context(task, &flags); if (ctx) { unclone_ctx(ctx); @@ -2081,7 +2086,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) put_task_struct(task); return ctx; - errout: +errout: put_task_struct(task); return ERR_PTR(err); } @@ -3264,7 +3269,7 @@ again: if (handle->wakeup != local_read(&buffer->wakeup)) perf_output_wakeup(handle); - out: +out: preempt_enable(); } @@ -4562,7 +4567,7 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) rcu_assign_pointer(cpuctx->swevent_hlist, hlist); } cpuctx->hlist_refcount++; - exit: +exit: mutex_unlock(&cpuctx->hlist_mutex); return err; @@ -4587,7 +4592,7 @@ static int swevent_hlist_get(struct perf_event *event) put_online_cpus(); return 0; - fail: +fail: for_each_possible_cpu(cpu) { if (cpu == failed_cpu) break; -- cgit v1.2.3-70-g09d2 From 24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 11 Jun 2010 17:32:03 +0200 Subject: perf: Reduce perf_disable() usage Since the current perf_disable() usage is only an optimization, remove it for now. This eases the removal of the __weak hw_perf_enable() interface. Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Will Deacon Cc: Paul Mundt Cc: Frederic Weisbecker Cc: Cyrill Gorcunov Cc: Lin Ming Cc: Yanmin Cc: Deng-Cheng Zhu Cc: David Miller Cc: Michael Cree LKML-Reference: Signed-off-by: Ingo Molnar --- arch/arm/kernel/perf_event.c | 3 +++ arch/powerpc/kernel/perf_event.c | 3 +++ arch/powerpc/kernel/perf_event_fsl_emb.c | 8 +++++-- arch/sh/kernel/perf_event.c | 11 +++++++--- arch/sparc/kernel/perf_event.c | 3 +++ arch/x86/kernel/cpu/perf_event.c | 22 ++++++++++++------- include/linux/perf_event.h | 20 ++++++++--------- kernel/perf_event.c | 37 +------------------------------- 8 files changed, 48 insertions(+), 59 deletions(-) (limited to 'kernel') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index f62f9db35db..afc92c580d1 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -277,6 +277,8 @@ armpmu_enable(struct perf_event *event) int idx; int err = 0; + perf_disable(); + /* If we don't have a space for the counter then finish early. */ idx = armpmu->get_event_idx(cpuc, hwc); if (idx < 0) { @@ -303,6 +305,7 @@ armpmu_enable(struct perf_event *event) perf_event_update_userpage(event); out: + perf_enable(); return err; } diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 19131b2614b..c1408821dbc 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -861,6 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + perf_disable(); cpuhw->group_flag |= PERF_EVENT_TXN; cpuhw->n_txn_start = cpuhw->n_events; } @@ -875,6 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu) struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); cpuhw->group_flag &= ~PERF_EVENT_TXN; + perf_enable(); } /* @@ -901,6 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu) cpuhw->event[i]->hw.config = cpuhw->events[i]; cpuhw->group_flag &= ~PERF_EVENT_TXN; + perf_enable(); return 0; } diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index ea6a804e43f..9bc84a7fd90 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -262,7 +262,7 @@ static int collect_events(struct perf_event *group, int max_count, return n; } -/* perf must be disabled, context locked on entry */ +/* context locked on entry */ static int fsl_emb_pmu_enable(struct perf_event *event) { struct cpu_hw_events *cpuhw; @@ -271,6 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event) u64 val; int i; + perf_disable(); cpuhw = &get_cpu_var(cpu_hw_events); if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) @@ -310,15 +311,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event) ret = 0; out: put_cpu_var(cpu_hw_events); + perf_enable(); return ret; } -/* perf must be disabled, context locked on entry */ +/* context locked on entry */ static void fsl_emb_pmu_disable(struct perf_event *event) { struct cpu_hw_events *cpuhw; int i = event->hw.idx; + perf_disable(); if (i < 0) goto out; @@ -346,6 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event) cpuhw->n_events--; out: + perf_enable(); put_cpu_var(cpu_hw_events); } diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 8cb206597e0..d042989ceb4 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -230,11 +230,14 @@ static int sh_pmu_enable(struct perf_event *event) struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; + int ret = -EAGAIN; + + perf_disable(); if (test_and_set_bit(idx, cpuc->used_mask)) { idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); if (idx == sh_pmu->num_events) - return -EAGAIN; + goto out; set_bit(idx, cpuc->used_mask); hwc->idx = idx; @@ -248,8 +251,10 @@ static int sh_pmu_enable(struct perf_event *event) sh_pmu->enable(hwc, idx); perf_event_update_userpage(event); - - return 0; + ret = 0; +out: + perf_enable(); + return ret; } static void sh_pmu_read(struct perf_event *event) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index bed4327f5a7..d0131deeeaf 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1113,6 +1113,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + perf_disable(); cpuhw->group_flag |= PERF_EVENT_TXN; } @@ -1126,6 +1127,7 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu) struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); cpuhw->group_flag &= ~PERF_EVENT_TXN; + perf_enable(); } /* @@ -1149,6 +1151,7 @@ static int sparc_pmu_commit_txn(struct pmu *pmu) return -EAGAIN; cpuc->group_flag &= ~PERF_EVENT_TXN; + perf_enable(); return 0; } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2c89264ee79..846070ce49c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -969,10 +969,11 @@ static int x86_pmu_enable(struct perf_event *event) hwc = &event->hw; + perf_disable(); n0 = cpuc->n_events; - n = collect_events(cpuc, event, false); - if (n < 0) - return n; + ret = n = collect_events(cpuc, event, false); + if (ret < 0) + goto out; /* * If group events scheduling transaction was started, @@ -980,23 +981,26 @@ static int x86_pmu_enable(struct perf_event *event) * at commit time(->commit_txn) as a whole */ if (cpuc->group_flag & PERF_EVENT_TXN) - goto out; + goto done_collect; ret = x86_pmu.schedule_events(cpuc, n, assign); if (ret) - return ret; + goto out; /* * copy new assignment, now we know it is possible * will be used by hw_perf_enable() */ memcpy(cpuc->assign, assign, n*sizeof(int)); -out: +done_collect: cpuc->n_events = n; cpuc->n_added += n - n0; cpuc->n_txn += n - n0; - return 0; + ret = 0; +out: + perf_enable(); + return ret; } static int x86_pmu_start(struct perf_event *event) @@ -1432,6 +1436,7 @@ static void x86_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + perf_disable(); cpuc->group_flag |= PERF_EVENT_TXN; cpuc->n_txn = 0; } @@ -1451,6 +1456,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) */ cpuc->n_added -= cpuc->n_txn; cpuc->n_events -= cpuc->n_txn; + perf_enable(); } /* @@ -1480,7 +1486,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu) memcpy(cpuc->assign, assign, n*sizeof(int)); cpuc->group_flag &= ~PERF_EVENT_TXN; - + perf_enable(); return 0; } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index ab72f56eb37..243286a8ded 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -564,26 +564,26 @@ struct pmu { struct list_head entry; /* - * Should return -ENOENT when the @event doesn't match this pmu + * Should return -ENOENT when the @event doesn't match this PMU. */ int (*event_init) (struct perf_event *event); - int (*enable) (struct perf_event *event); + int (*enable) (struct perf_event *event); void (*disable) (struct perf_event *event); - int (*start) (struct perf_event *event); + int (*start) (struct perf_event *event); void (*stop) (struct perf_event *event); void (*read) (struct perf_event *event); void (*unthrottle) (struct perf_event *event); /* - * Group events scheduling is treated as a transaction, add group - * events as a whole and perform one schedulability test. If the test - * fails, roll back the whole group + * Group events scheduling is treated as a transaction, add + * group events as a whole and perform one schedulability test. + * If the test fails, roll back the whole group */ /* - * Start the transaction, after this ->enable() doesn't need - * to do schedulability tests. + * Start the transaction, after this ->enable() doesn't need to + * do schedulability tests. */ void (*start_txn) (struct pmu *pmu); /* @@ -594,8 +594,8 @@ struct pmu { */ int (*commit_txn) (struct pmu *pmu); /* - * Will cancel the transaction, assumes ->disable() is called for - * each successfull ->enable() during the transaction. + * Will cancel the transaction, assumes ->disable() is called + * for each successfull ->enable() during the transaction. */ void (*cancel_txn) (struct pmu *pmu); }; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 149ca18371b..9a98ce95356 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -478,11 +478,6 @@ static void __perf_event_remove_from_context(void *info) return; raw_spin_lock(&ctx->lock); - /* - * Protect the list operation against NMI by disabling the - * events on a global level. - */ - perf_disable(); event_sched_out(event, cpuctx, ctx); @@ -498,7 +493,6 @@ static void __perf_event_remove_from_context(void *info) perf_max_events - perf_reserved_percpu); } - perf_enable(); raw_spin_unlock(&ctx->lock); } @@ -803,12 +797,6 @@ static void __perf_install_in_context(void *info) ctx->is_active = 1; update_context_time(ctx); - /* - * Protect the list operation against NMI by disabling the - * events on a global level. NOP for non NMI based events. - */ - perf_disable(); - add_event_to_ctx(event, ctx); if (event->cpu != -1 && event->cpu != smp_processor_id()) @@ -850,8 +838,6 @@ static void __perf_install_in_context(void *info) cpuctx->max_pertask--; unlock: - perf_enable(); - raw_spin_unlock(&ctx->lock); } @@ -972,12 +958,10 @@ static void __perf_event_enable(void *info) if (!group_can_go_on(event, cpuctx, 1)) { err = -EEXIST; } else { - perf_disable(); if (event == leader) err = group_sched_in(event, cpuctx, ctx); else err = event_sched_in(event, cpuctx, ctx); - perf_enable(); } if (err) { @@ -1090,9 +1074,8 @@ static void ctx_sched_out(struct perf_event_context *ctx, goto out; update_context_time(ctx); - perf_disable(); if (!ctx->nr_active) - goto out_enable; + goto out; if (event_type & EVENT_PINNED) { list_for_each_entry(event, &ctx->pinned_groups, group_entry) @@ -1103,9 +1086,6 @@ static void ctx_sched_out(struct perf_event_context *ctx, list_for_each_entry(event, &ctx->flexible_groups, group_entry) group_sched_out(event, cpuctx, ctx); } - - out_enable: - perf_enable(); out: raw_spin_unlock(&ctx->lock); } @@ -1364,8 +1344,6 @@ ctx_sched_in(struct perf_event_context *ctx, ctx->timestamp = perf_clock(); - perf_disable(); - /* * First go through the list and put on any pinned groups * in order to give them the best chance of going on. @@ -1377,7 +1355,6 @@ ctx_sched_in(struct perf_event_context *ctx, if (event_type & EVENT_FLEXIBLE) ctx_flexible_sched_in(ctx, cpuctx); - perf_enable(); out: raw_spin_unlock(&ctx->lock); } @@ -1425,8 +1402,6 @@ void perf_event_task_sched_in(struct task_struct *task) if (cpuctx->task_ctx == ctx) return; - perf_disable(); - /* * We want to keep the following priority order: * cpu pinned (that don't need to move), task pinned, @@ -1439,8 +1414,6 @@ void perf_event_task_sched_in(struct task_struct *task) ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE); cpuctx->task_ctx = ctx; - - perf_enable(); } #define MAX_INTERRUPTS (~0ULL) @@ -1555,11 +1528,9 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) hwc->sample_period = sample_period; if (local64_read(&hwc->period_left) > 8*sample_period) { - perf_disable(); perf_event_stop(event); local64_set(&hwc->period_left, 0); perf_event_start(event); - perf_enable(); } } @@ -1588,15 +1559,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) */ if (interrupts == MAX_INTERRUPTS) { perf_log_throttle(event, 1); - perf_disable(); event->pmu->unthrottle(event); - perf_enable(); } if (!event->attr.freq || !event->attr.sample_freq) continue; - perf_disable(); event->pmu->read(event); now = local64_read(&event->count); delta = now - hwc->freq_count_stamp; @@ -1604,7 +1572,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) if (delta > 0) perf_adjust_period(event, TICK_NSEC, delta); - perf_enable(); } raw_spin_unlock(&ctx->lock); } @@ -1647,7 +1614,6 @@ void perf_event_task_tick(struct task_struct *curr) if (!rotate) return; - perf_disable(); cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); if (ctx) task_ctx_sched_out(ctx, EVENT_FLEXIBLE); @@ -1659,7 +1625,6 @@ void perf_event_task_tick(struct task_struct *curr) cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); if (ctx) task_ctx_sched_in(curr, EVENT_FLEXIBLE); - perf_enable(); } static int event_enable_on_exec(struct perf_event *event, -- cgit v1.2.3-70-g09d2 From 33696fc0d141bbbcb12f75b69608ea83282e3117 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Jun 2010 08:49:00 +0200 Subject: perf: Per PMU disable Changes perf_disable() into perf_pmu_disable(). Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Will Deacon Cc: Paul Mundt Cc: Frederic Weisbecker Cc: Cyrill Gorcunov Cc: Lin Ming Cc: Yanmin Cc: Deng-Cheng Zhu Cc: David Miller Cc: Michael Cree LKML-Reference: Signed-off-by: Ingo Molnar --- arch/alpha/kernel/perf_event.c | 30 +++++++++++++------------ arch/arm/kernel/perf_event.c | 28 +++++++++++------------ arch/powerpc/kernel/perf_event.c | 24 +++++++++++--------- arch/powerpc/kernel/perf_event_fsl_emb.c | 18 ++++++++------- arch/sh/kernel/perf_event.c | 38 +++++++++++++++++--------------- arch/sparc/kernel/perf_event.c | 20 +++++++++-------- arch/x86/kernel/cpu/perf_event.c | 16 ++++++++------ include/linux/perf_event.h | 13 ++++++----- kernel/perf_event.c | 31 ++++++++++++++++---------- 9 files changed, 119 insertions(+), 99 deletions(-) (limited to 'kernel') diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 19660b5c298..3e260731f8e 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -435,7 +435,7 @@ static int alpha_pmu_enable(struct perf_event *event) * nevertheless we disable the PMCs first to enable a potential * final PMI to occur before we disable interrupts. */ - perf_disable(); + perf_pmu_disable(event->pmu); local_irq_save(flags); /* Default to error to be returned */ @@ -456,7 +456,7 @@ static int alpha_pmu_enable(struct perf_event *event) } local_irq_restore(flags); - perf_enable(); + perf_pmu_enable(event->pmu); return ret; } @@ -474,7 +474,7 @@ static void alpha_pmu_disable(struct perf_event *event) unsigned long flags; int j; - perf_disable(); + perf_pmu_disable(event->pmu); local_irq_save(flags); for (j = 0; j < cpuc->n_events; j++) { @@ -502,7 +502,7 @@ static void alpha_pmu_disable(struct perf_event *event) } local_irq_restore(flags); - perf_enable(); + perf_pmu_enable(event->pmu); } @@ -668,18 +668,10 @@ static int alpha_pmu_event_init(struct perf_event *event) return err; } -static struct pmu pmu = { - .event_init = alpha_pmu_event_init, - .enable = alpha_pmu_enable, - .disable = alpha_pmu_disable, - .read = alpha_pmu_read, - .unthrottle = alpha_pmu_unthrottle, -}; - /* * Main entry point - enable HW performance counters. */ -void hw_perf_enable(void) +static void alpha_pmu_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -705,7 +697,7 @@ void hw_perf_enable(void) * Main entry point - disable HW performance counters. */ -void hw_perf_disable(void) +static void alpha_pmu_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -718,6 +710,16 @@ void hw_perf_disable(void) wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); } +static struct pmu pmu = { + .pmu_enable = alpha_pmu_pmu_enable, + .pmu_disable = alpha_pmu_pmu_disable, + .event_init = alpha_pmu_event_init, + .enable = alpha_pmu_enable, + .disable = alpha_pmu_disable, + .read = alpha_pmu_read, + .unthrottle = alpha_pmu_unthrottle, +}; + /* * Main entry point - don't know when this is called but it diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index afc92c580d1..3343f3f4b97 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -277,7 +277,7 @@ armpmu_enable(struct perf_event *event) int idx; int err = 0; - perf_disable(); + perf_pmu_disable(event->pmu); /* If we don't have a space for the counter then finish early. */ idx = armpmu->get_event_idx(cpuc, hwc); @@ -305,7 +305,7 @@ armpmu_enable(struct perf_event *event) perf_event_update_userpage(event); out: - perf_enable(); + perf_pmu_enable(event->pmu); return err; } @@ -534,16 +534,7 @@ static int armpmu_event_init(struct perf_event *event) return err; } -static struct pmu pmu = { - .event_init = armpmu_event_init, - .enable = armpmu_enable, - .disable = armpmu_disable, - .unthrottle = armpmu_unthrottle, - .read = armpmu_read, -}; - -void -hw_perf_enable(void) +static void armpmu_pmu_enable(struct pmu *pmu) { /* Enable all of the perf events on hardware. */ int idx; @@ -564,13 +555,22 @@ hw_perf_enable(void) armpmu->start(); } -void -hw_perf_disable(void) +static void armpmu_pmu_disable(struct pmu *pmu) { if (armpmu) armpmu->stop(); } +static struct pmu pmu = { + .pmu_enable = armpmu_pmu_enable, + .pmu_disable= armpmu_pmu_disable, + .event_init = armpmu_event_init, + .enable = armpmu_enable, + .disable = armpmu_disable, + .unthrottle = armpmu_unthrottle, + .read = armpmu_read, +}; + /* * ARMv6 Performance counter handling code. * diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index c1408821dbc..deb84bbcb0e 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -517,7 +517,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) * Disable all events to prevent PMU interrupts and to allow * events to be added or removed. */ -void hw_perf_disable(void) +static void power_pmu_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; @@ -565,7 +565,7 @@ void hw_perf_disable(void) * If we were previously disabled and events were added, then * put the new config on the PMU. */ -void hw_perf_enable(void) +static void power_pmu_pmu_enable(struct pmu *pmu) { struct perf_event *event; struct cpu_hw_events *cpuhw; @@ -735,7 +735,7 @@ static int power_pmu_enable(struct perf_event *event) int ret = -EAGAIN; local_irq_save(flags); - perf_disable(); + perf_pmu_disable(event->pmu); /* * Add the event to the list (if there is room) @@ -769,7 +769,7 @@ nocheck: ret = 0; out: - perf_enable(); + perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; } @@ -784,7 +784,7 @@ static void power_pmu_disable(struct perf_event *event) unsigned long flags; local_irq_save(flags); - perf_disable(); + perf_pmu_disable(event->pmu); power_pmu_read(event); @@ -821,7 +821,7 @@ static void power_pmu_disable(struct perf_event *event) cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); } - perf_enable(); + perf_pmu_enable(event->pmu); local_irq_restore(flags); } @@ -837,7 +837,7 @@ static void power_pmu_unthrottle(struct perf_event *event) if (!event->hw.idx || !event->hw.sample_period) return; local_irq_save(flags); - perf_disable(); + perf_pmu_disable(event->pmu); power_pmu_read(event); left = event->hw.sample_period; event->hw.last_period = left; @@ -848,7 +848,7 @@ static void power_pmu_unthrottle(struct perf_event *event) local64_set(&event->hw.prev_count, val); local64_set(&event->hw.period_left, left); perf_event_update_userpage(event); - perf_enable(); + perf_pmu_enable(event->pmu); local_irq_restore(flags); } @@ -861,7 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); - perf_disable(); + perf_pmu_disable(pmu); cpuhw->group_flag |= PERF_EVENT_TXN; cpuhw->n_txn_start = cpuhw->n_events; } @@ -876,7 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu) struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); cpuhw->group_flag &= ~PERF_EVENT_TXN; - perf_enable(); + perf_pmu_enable(pmu); } /* @@ -903,7 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu) cpuhw->event[i]->hw.config = cpuhw->events[i]; cpuhw->group_flag &= ~PERF_EVENT_TXN; - perf_enable(); + perf_pmu_enable(pmu); return 0; } @@ -1131,6 +1131,8 @@ static int power_pmu_event_init(struct perf_event *event) } struct pmu power_pmu = { + .pmu_enable = power_pmu_pmu_enable, + .pmu_disable = power_pmu_pmu_disable, .event_init = power_pmu_event_init, .enable = power_pmu_enable, .disable = power_pmu_disable, diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 9bc84a7fd90..84b1974c628 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -177,7 +177,7 @@ static void fsl_emb_pmu_read(struct perf_event *event) * Disable all events to prevent PMU interrupts and to allow * events to be added or removed. */ -void hw_perf_disable(void) +static void fsl_emb_pmu_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; @@ -216,7 +216,7 @@ void hw_perf_disable(void) * If we were previously disabled and events were added, then * put the new config on the PMU. */ -void hw_perf_enable(void) +static void fsl_emb_pmu_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; @@ -271,7 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event) u64 val; int i; - perf_disable(); + perf_pmu_disable(event->pmu); cpuhw = &get_cpu_var(cpu_hw_events); if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) @@ -311,7 +311,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event) ret = 0; out: put_cpu_var(cpu_hw_events); - perf_enable(); + perf_pmu_enable(event->pmu); return ret; } @@ -321,7 +321,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event) struct cpu_hw_events *cpuhw; int i = event->hw.idx; - perf_disable(); + perf_pmu_disable(event->pmu); if (i < 0) goto out; @@ -349,7 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event) cpuhw->n_events--; out: - perf_enable(); + perf_pmu_enable(event->pmu); put_cpu_var(cpu_hw_events); } @@ -367,7 +367,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event) if (event->hw.idx < 0 || !event->hw.sample_period) return; local_irq_save(flags); - perf_disable(); + perf_pmu_disable(event->pmu); fsl_emb_pmu_read(event); left = event->hw.sample_period; event->hw.last_period = left; @@ -378,7 +378,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event) local64_set(&event->hw.prev_count, val); local64_set(&event->hw.period_left, left); perf_event_update_userpage(event); - perf_enable(); + perf_pmu_enable(event->pmu); local_irq_restore(flags); } @@ -524,6 +524,8 @@ static int fsl_emb_pmu_event_init(struct perf_event *event) } static struct pmu fsl_emb_pmu = { + .pmu_enable = fsl_emb_pmu_pmu_enable, + .pmu_disable = fsl_emb_pmu_pmu_disable, .event_init = fsl_emb_pmu_event_init, .enable = fsl_emb_pmu_enable, .disable = fsl_emb_pmu_disable, diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index d042989ceb4..4bbe19058a5 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -232,7 +232,7 @@ static int sh_pmu_enable(struct perf_event *event) int idx = hwc->idx; int ret = -EAGAIN; - perf_disable(); + perf_pmu_disable(event->pmu); if (test_and_set_bit(idx, cpuc->used_mask)) { idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); @@ -253,7 +253,7 @@ static int sh_pmu_enable(struct perf_event *event) perf_event_update_userpage(event); ret = 0; out: - perf_enable(); + perf_pmu_enable(event->pmu); return ret; } @@ -285,7 +285,25 @@ static int sh_pmu_event_init(struct perf_event *event) return err; } +static void sh_pmu_pmu_enable(struct pmu *pmu) +{ + if (!sh_pmu_initialized()) + return; + + sh_pmu->enable_all(); +} + +static void sh_pmu_pmu_disable(struct pmu *pmu) +{ + if (!sh_pmu_initialized()) + return; + + sh_pmu->disable_all(); +} + static struct pmu pmu = { + .pmu_enable = sh_pmu_pmu_enable, + .pmu_disable = sh_pmu_pmu_disable, .event_init = sh_pmu_event_init, .enable = sh_pmu_enable, .disable = sh_pmu_disable, @@ -316,22 +334,6 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) return NOTIFY_OK; } -void hw_perf_enable(void) -{ - if (!sh_pmu_initialized()) - return; - - sh_pmu->enable_all(); -} - -void hw_perf_disable(void) -{ - if (!sh_pmu_initialized()) - return; - - sh_pmu->disable_all(); -} - int __cpuinit register_sh_pmu(struct sh_pmu *pmu) { if (sh_pmu) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index d0131deeeaf..37cae676536 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -664,7 +664,7 @@ out: return pcr; } -void hw_perf_enable(void) +static void sparc_pmu_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); u64 pcr; @@ -691,7 +691,7 @@ void hw_perf_enable(void) pcr_ops->write(cpuc->pcr); } -void hw_perf_disable(void) +static void sparc_pmu_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); u64 val; @@ -718,7 +718,7 @@ static void sparc_pmu_disable(struct perf_event *event) int i; local_irq_save(flags); - perf_disable(); + perf_pmu_disable(event->pmu); for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event[i]) { @@ -748,7 +748,7 @@ static void sparc_pmu_disable(struct perf_event *event) } } - perf_enable(); + perf_pmu_enable(event->pmu); local_irq_restore(flags); } @@ -991,7 +991,7 @@ static int sparc_pmu_enable(struct perf_event *event) unsigned long flags; local_irq_save(flags); - perf_disable(); + perf_pmu_disable(event->pmu); n0 = cpuc->n_events; if (n0 >= perf_max_events) @@ -1020,7 +1020,7 @@ nocheck: ret = 0; out: - perf_enable(); + perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; } @@ -1113,7 +1113,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); - perf_disable(); + perf_pmu_disable(pmu); cpuhw->group_flag |= PERF_EVENT_TXN; } @@ -1127,7 +1127,7 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu) struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); cpuhw->group_flag &= ~PERF_EVENT_TXN; - perf_enable(); + perf_pmu_enable(pmu); } /* @@ -1151,11 +1151,13 @@ static int sparc_pmu_commit_txn(struct pmu *pmu) return -EAGAIN; cpuc->group_flag &= ~PERF_EVENT_TXN; - perf_enable(); + perf_pmu_enable(pmu); return 0; } static struct pmu pmu = { + .pmu_enable = sparc_pmu_pmu_enable, + .pmu_disable = sparc_pmu_pmu_disable, .event_init = sparc_pmu_event_init, .enable = sparc_pmu_enable, .disable = sparc_pmu_disable, diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 846070ce49c..79705ac4501 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -583,7 +583,7 @@ static void x86_pmu_disable_all(void) } } -void hw_perf_disable(void) +static void x86_pmu_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -803,7 +803,7 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc, static int x86_pmu_start(struct perf_event *event); static void x86_pmu_stop(struct perf_event *event); -void hw_perf_enable(void) +static void x86_pmu_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct perf_event *event; @@ -969,7 +969,7 @@ static int x86_pmu_enable(struct perf_event *event) hwc = &event->hw; - perf_disable(); + perf_pmu_disable(event->pmu); n0 = cpuc->n_events; ret = n = collect_events(cpuc, event, false); if (ret < 0) @@ -999,7 +999,7 @@ done_collect: ret = 0; out: - perf_enable(); + perf_pmu_enable(event->pmu); return ret; } @@ -1436,7 +1436,7 @@ static void x86_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - perf_disable(); + perf_pmu_disable(pmu); cpuc->group_flag |= PERF_EVENT_TXN; cpuc->n_txn = 0; } @@ -1456,7 +1456,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) */ cpuc->n_added -= cpuc->n_txn; cpuc->n_events -= cpuc->n_txn; - perf_enable(); + perf_pmu_enable(pmu); } /* @@ -1486,7 +1486,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu) memcpy(cpuc->assign, assign, n*sizeof(int)); cpuc->group_flag &= ~PERF_EVENT_TXN; - perf_enable(); + perf_pmu_enable(pmu); return 0; } @@ -1605,6 +1605,8 @@ int x86_pmu_event_init(struct perf_event *event) } static struct pmu pmu = { + .pmu_enable = x86_pmu_pmu_enable, + .pmu_disable = x86_pmu_pmu_disable, .event_init = x86_pmu_event_init, .enable = x86_pmu_enable, .disable = x86_pmu_disable, diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 243286a8ded..6abf103fb7f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -563,6 +563,11 @@ struct perf_event; struct pmu { struct list_head entry; + int *pmu_disable_count; + + void (*pmu_enable) (struct pmu *pmu); + void (*pmu_disable) (struct pmu *pmu); + /* * Should return -ENOENT when the @event doesn't match this PMU. */ @@ -868,10 +873,8 @@ extern void perf_event_free_task(struct task_struct *task); extern void set_perf_event_pending(void); extern void perf_event_do_pending(void); extern void perf_event_print_debug(void); -extern void __perf_disable(void); -extern bool __perf_enable(void); -extern void perf_disable(void); -extern void perf_enable(void); +extern void perf_pmu_disable(struct pmu *pmu); +extern void perf_pmu_enable(struct pmu *pmu); extern int perf_event_task_disable(void); extern int perf_event_task_enable(void); extern void perf_event_update_userpage(struct perf_event *event); @@ -1056,8 +1059,6 @@ static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_do_pending(void) { } static inline void perf_event_print_debug(void) { } -static inline void perf_disable(void) { } -static inline void perf_enable(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; } diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9a98ce95356..5ed0c06765b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -71,23 +71,20 @@ static atomic64_t perf_event_id; */ static DEFINE_SPINLOCK(perf_resource_lock); -void __weak hw_perf_disable(void) { barrier(); } -void __weak hw_perf_enable(void) { barrier(); } - void __weak perf_event_print_debug(void) { } -static DEFINE_PER_CPU(int, perf_disable_count); - -void perf_disable(void) +void perf_pmu_disable(struct pmu *pmu) { - if (!__get_cpu_var(perf_disable_count)++) - hw_perf_disable(); + int *count = this_cpu_ptr(pmu->pmu_disable_count); + if (!(*count)++) + pmu->pmu_disable(pmu); } -void perf_enable(void) +void perf_pmu_enable(struct pmu *pmu) { - if (!--__get_cpu_var(perf_disable_count)) - hw_perf_enable(); + int *count = this_cpu_ptr(pmu->pmu_disable_count); + if (!--(*count)) + pmu->pmu_enable(pmu); } static void get_ctx(struct perf_event_context *ctx) @@ -4970,11 +4967,19 @@ static struct srcu_struct pmus_srcu; int perf_pmu_register(struct pmu *pmu) { + int ret; + mutex_lock(&pmus_lock); + ret = -ENOMEM; + pmu->pmu_disable_count = alloc_percpu(int); + if (!pmu->pmu_disable_count) + goto unlock; list_add_rcu(&pmu->entry, &pmus); + ret = 0; +unlock: mutex_unlock(&pmus_lock); - return 0; + return ret; } void perf_pmu_unregister(struct pmu *pmu) @@ -4984,6 +4989,8 @@ void perf_pmu_unregister(struct pmu *pmu) mutex_unlock(&pmus_lock); synchronize_srcu(&pmus_srcu); + + free_percpu(pmu->pmu_disable_count); } struct pmu *perf_init_event(struct perf_event *event) -- cgit v1.2.3-70-g09d2 From ad5133b7030d04ce7701aa7cbe98f561347c79c2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 15 Jun 2010 12:22:39 +0200 Subject: perf: Default PMU ops Provide default implementations for the pmu txn methods, this allows us to remove some conditional code. Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Will Deacon Cc: Paul Mundt Cc: Frederic Weisbecker Cc: Cyrill Gorcunov Cc: Lin Ming Cc: Yanmin Cc: Deng-Cheng Zhu Cc: David Miller Cc: Michael Cree LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 10 ++++---- kernel/perf_event.c | 64 +++++++++++++++++++++++++++++++++++++--------- 2 files changed, 57 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6abf103fb7f..bf85733597e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -565,8 +565,8 @@ struct pmu { int *pmu_disable_count; - void (*pmu_enable) (struct pmu *pmu); - void (*pmu_disable) (struct pmu *pmu); + void (*pmu_enable) (struct pmu *pmu); /* optional */ + void (*pmu_disable) (struct pmu *pmu); /* optional */ /* * Should return -ENOENT when the @event doesn't match this PMU. @@ -590,19 +590,19 @@ struct pmu { * Start the transaction, after this ->enable() doesn't need to * do schedulability tests. */ - void (*start_txn) (struct pmu *pmu); + void (*start_txn) (struct pmu *pmu); /* optional */ /* * If ->start_txn() disabled the ->enable() schedulability test * then ->commit_txn() is required to perform one. On success * the transaction is closed. On error the transaction is kept * open until ->cancel_txn() is called. */ - int (*commit_txn) (struct pmu *pmu); + int (*commit_txn) (struct pmu *pmu); /* optional */ /* * Will cancel the transaction, assumes ->disable() is called * for each successfull ->enable() during the transaction. */ - void (*cancel_txn) (struct pmu *pmu); + void (*cancel_txn) (struct pmu *pmu); /* optional */ }; /** diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 5ed0c06765b..8ef4ba3bcb1 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -674,21 +674,14 @@ group_sched_in(struct perf_event *group_event, { struct perf_event *event, *partial_group = NULL; struct pmu *pmu = group_event->pmu; - bool txn = false; if (group_event->state == PERF_EVENT_STATE_OFF) return 0; - /* Check if group transaction availabe */ - if (pmu->start_txn) - txn = true; - - if (txn) - pmu->start_txn(pmu); + pmu->start_txn(pmu); if (event_sched_in(group_event, cpuctx, ctx)) { - if (txn) - pmu->cancel_txn(pmu); + pmu->cancel_txn(pmu); return -EAGAIN; } @@ -702,7 +695,7 @@ group_sched_in(struct perf_event *group_event, } } - if (!txn || !pmu->commit_txn(pmu)) + if (!pmu->commit_txn(pmu)) return 0; group_error: @@ -717,8 +710,7 @@ group_error: } event_sched_out(group_event, cpuctx, ctx); - if (txn) - pmu->cancel_txn(pmu); + pmu->cancel_txn(pmu); return -EAGAIN; } @@ -4965,6 +4957,31 @@ static LIST_HEAD(pmus); static DEFINE_MUTEX(pmus_lock); static struct srcu_struct pmus_srcu; +static void perf_pmu_nop_void(struct pmu *pmu) +{ +} + +static int perf_pmu_nop_int(struct pmu *pmu) +{ + return 0; +} + +static void perf_pmu_start_txn(struct pmu *pmu) +{ + perf_pmu_disable(pmu); +} + +static int perf_pmu_commit_txn(struct pmu *pmu) +{ + perf_pmu_enable(pmu); + return 0; +} + +static void perf_pmu_cancel_txn(struct pmu *pmu) +{ + perf_pmu_enable(pmu); +} + int perf_pmu_register(struct pmu *pmu) { int ret; @@ -4974,6 +4991,29 @@ int perf_pmu_register(struct pmu *pmu) pmu->pmu_disable_count = alloc_percpu(int); if (!pmu->pmu_disable_count) goto unlock; + + if (!pmu->start_txn) { + if (pmu->pmu_enable) { + /* + * If we have pmu_enable/pmu_disable calls, install + * transaction stubs that use that to try and batch + * hardware accesses. + */ + pmu->start_txn = perf_pmu_start_txn; + pmu->commit_txn = perf_pmu_commit_txn; + pmu->cancel_txn = perf_pmu_cancel_txn; + } else { + pmu->start_txn = perf_pmu_nop_void; + pmu->commit_txn = perf_pmu_nop_int; + pmu->cancel_txn = perf_pmu_nop_void; + } + } + + if (!pmu->pmu_enable) { + pmu->pmu_enable = perf_pmu_nop_void; + pmu->pmu_disable = perf_pmu_nop_void; + } + list_add_rcu(&pmu->entry, &pmus); ret = 0; unlock: -- cgit v1.2.3-70-g09d2 From fa407f35e0298d841e4088f95a7f9cf6e725c6d5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 24 Jun 2010 12:35:12 +0200 Subject: perf: Shrink hw_perf_event Use hw_perf_event::period_left instead of hw_perf_event::remaining and win back 8 bytes. Signed-off-by: Peter Zijlstra Cc: paulus Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 1 - kernel/perf_event.c | 13 ++++++------- 2 files changed, 6 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index bf85733597e..8cafa15af60 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -529,7 +529,6 @@ struct hw_perf_event { int last_cpu; }; struct { /* software */ - s64 remaining; struct hrtimer hrtimer; }; #ifdef CONFIG_HAVE_HW_BREAKPOINT diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 8ef4ba3bcb1..1a6cdbf0d09 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4800,14 +4800,13 @@ static void perf_swevent_start_hrtimer(struct perf_event *event) hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swevent_hrtimer; if (hwc->sample_period) { - u64 period; + s64 period = local64_read(&hwc->period_left); - if (hwc->remaining) { - if (hwc->remaining < 0) + if (period) { + if (period < 0) period = 10000; - else - period = hwc->remaining; - hwc->remaining = 0; + + local64_set(&hwc->period_left, 0); } else { period = max_t(u64, 10000, hwc->sample_period); } @@ -4823,7 +4822,7 @@ static void perf_swevent_cancel_hrtimer(struct perf_event *event) if (hwc->sample_period) { ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); - hwc->remaining = ktime_to_ns(remaining); + local64_set(&hwc->period_left, ktime_to_ns(remaining)); hrtimer_cancel(&hwc->hrtimer); } -- cgit v1.2.3-70-g09d2 From a4eaf7f14675cb512d69f0c928055e73d0c6d252 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 16 Jun 2010 14:37:10 +0200 Subject: perf: Rework the PMU methods Replace pmu::{enable,disable,start,stop,unthrottle} with pmu::{add,del,start,stop}, all of which take a flags argument. The new interface extends the capability to stop a counter while keeping it scheduled on the PMU. We replace the throttled state with the generic stopped state. This also allows us to efficiently stop/start counters over certain code paths (like IRQ handlers). It also allows scheduling a counter without it starting, allowing for a generic frozen state (useful for rotating stopped counters). The stopped state is implemented in two different ways, depending on how the architecture implemented the throttled state: 1) We disable the counter: a) the pmu has per-counter enable bits, we flip that b) we program a NOP event, preserving the counter state 2) We store the counter state and ignore all read/overflow events Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Will Deacon Cc: Paul Mundt Cc: Frederic Weisbecker Cc: Cyrill Gorcunov Cc: Lin Ming Cc: Yanmin Cc: Deng-Cheng Zhu Cc: David Miller Cc: Michael Cree LKML-Reference: Signed-off-by: Ingo Molnar --- arch/alpha/kernel/perf_event.c | 71 +++++++++++---- arch/arm/kernel/perf_event.c | 96 ++++++++++++-------- arch/powerpc/kernel/perf_event.c | 105 ++++++++++++++-------- arch/powerpc/kernel/perf_event_fsl_emb.c | 107 ++++++++++++++--------- arch/sh/kernel/perf_event.c | 75 +++++++++++----- arch/sparc/kernel/perf_event.c | 109 ++++++++++++++--------- arch/x86/kernel/cpu/perf_event.c | 106 ++++++++++++---------- arch/x86/kernel/cpu/perf_event_intel.c | 2 +- arch/x86/kernel/cpu/perf_event_intel_ds.c | 2 +- include/linux/ftrace_event.h | 4 +- include/linux/perf_event.h | 54 +++++++++--- kernel/hw_breakpoint.c | 29 ++++++- kernel/perf_event.c | 140 +++++++++++++++--------------- kernel/trace/trace_event_perf.c | 7 +- 14 files changed, 576 insertions(+), 331 deletions(-) (limited to 'kernel') diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 3e260731f8e..380ef02d557 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -307,7 +307,7 @@ again: new_raw_count) != prev_raw_count) goto again; - delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; + delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; /* It is possible on very rare occasions that the PMC has overflowed * but the interrupt is yet to come. Detect and fix this situation. @@ -402,14 +402,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) struct hw_perf_event *hwc = &pe->hw; int idx = hwc->idx; - if (cpuc->current_idx[j] != PMC_NO_INDEX) { - cpuc->idx_mask |= (1<current_idx[j]); - continue; + if (cpuc->current_idx[j] == PMC_NO_INDEX) { + alpha_perf_event_set_period(pe, hwc, idx); + cpuc->current_idx[j] = idx; } - alpha_perf_event_set_period(pe, hwc, idx); - cpuc->current_idx[j] = idx; - cpuc->idx_mask |= (1<current_idx[j]); + if (!(hwc->state & PERF_HES_STOPPED)) + cpuc->idx_mask |= (1<current_idx[j]); } cpuc->config = cpuc->event[0]->hw.config_base; } @@ -420,7 +419,7 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) * - this function is called from outside this module via the pmu struct * returned from perf event initialisation. */ -static int alpha_pmu_enable(struct perf_event *event) +static int alpha_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int n0; @@ -455,6 +454,10 @@ static int alpha_pmu_enable(struct perf_event *event) } } + hwc->state = PERF_HES_UPTODATE; + if (!(flags & PERF_EF_START)) + hwc->state |= PERF_HES_STOPPED; + local_irq_restore(flags); perf_pmu_enable(event->pmu); @@ -467,7 +470,7 @@ static int alpha_pmu_enable(struct perf_event *event) * - this function is called from outside this module via the pmu struct * returned from perf event initialisation. */ -static void alpha_pmu_disable(struct perf_event *event) +static void alpha_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; @@ -514,13 +517,44 @@ static void alpha_pmu_read(struct perf_event *event) } -static void alpha_pmu_unthrottle(struct perf_event *event) +static void alpha_pmu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + if (!(hwc->state & PERF_HES_STOPPED)) { + cpuc->idx_mask &= !(1UL<idx); + hwc->state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + alpha_perf_event_update(event, hwc, hwc->idx, 0); + hwc->state |= PERF_HES_UPTODATE; + } + + if (cpuc->enabled) + wrperfmon(PERFMON_CMD_ENABLE, (1UL<idx)); +} + + +static void alpha_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + alpha_perf_event_set_period(event, hwc, hwc->idx); + } + + hwc->state = 0; + cpuc->idx_mask |= 1UL<idx; - wrperfmon(PERFMON_CMD_ENABLE, (1UL<idx)); + if (cpuc->enabled) + wrperfmon(PERFMON_CMD_ENABLE, (1UL<idx)); } @@ -671,7 +705,7 @@ static int alpha_pmu_event_init(struct perf_event *event) /* * Main entry point - enable HW performance counters. */ -static void alpha_pmu_pmu_enable(struct pmu *pmu) +static void alpha_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -697,7 +731,7 @@ static void alpha_pmu_pmu_enable(struct pmu *pmu) * Main entry point - disable HW performance counters. */ -static void alpha_pmu_pmu_disable(struct pmu *pmu) +static void alpha_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -711,13 +745,14 @@ static void alpha_pmu_pmu_disable(struct pmu *pmu) } static struct pmu pmu = { - .pmu_enable = alpha_pmu_pmu_enable, - .pmu_disable = alpha_pmu_pmu_disable, + .pmu_enable = alpha_pmu_enable, + .pmu_disable = alpha_pmu_disable, .event_init = alpha_pmu_event_init, - .enable = alpha_pmu_enable, - .disable = alpha_pmu_disable, + .add = alpha_pmu_add, + .del = alpha_pmu_del, + .start = alpha_pmu_start, + .stop = alpha_pmu_stop, .read = alpha_pmu_read, - .unthrottle = alpha_pmu_unthrottle, }; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 3343f3f4b97..448cfa6b3ef 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -221,46 +221,56 @@ again: } static void -armpmu_disable(struct perf_event *event) +armpmu_read(struct perf_event *event) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - - WARN_ON(idx < 0); - - clear_bit(idx, cpuc->active_mask); - armpmu->disable(hwc, idx); - - barrier(); - armpmu_event_update(event, hwc, idx); - cpuc->events[idx] = NULL; - clear_bit(idx, cpuc->used_mask); + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; - perf_event_update_userpage(event); + armpmu_event_update(event, hwc, hwc->idx); } static void -armpmu_read(struct perf_event *event) +armpmu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; - /* Don't read disabled counters! */ - if (hwc->idx < 0) + if (!armpmu) return; - armpmu_event_update(event, hwc, hwc->idx); + /* + * ARM pmu always has to update the counter, so ignore + * PERF_EF_UPDATE, see comments in armpmu_start(). + */ + if (!(hwc->state & PERF_HES_STOPPED)) { + armpmu->disable(hwc, hwc->idx); + barrier(); /* why? */ + armpmu_event_update(event, hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } } static void -armpmu_unthrottle(struct perf_event *event) +armpmu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; + if (!armpmu) + return; + + /* + * ARM pmu always has to reprogram the period, so ignore + * PERF_EF_RELOAD, see the comment below. + */ + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; /* * Set the period again. Some counters can't be stopped, so when we - * were throttled we simply disabled the IRQ source and the counter + * were stopped we simply disabled the IRQ source and the counter * may have been left counting. If we don't do this step then we may * get an interrupt too soon or *way* too late if the overflow has * happened since disabling. @@ -269,8 +279,25 @@ armpmu_unthrottle(struct perf_event *event) armpmu->enable(hwc, hwc->idx); } +static void +armpmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + WARN_ON(idx < 0); + + clear_bit(idx, cpuc->active_mask); + armpmu_stop(event, PERF_EF_UPDATE); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + static int -armpmu_enable(struct perf_event *event) +armpmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; @@ -295,11 +322,9 @@ armpmu_enable(struct perf_event *event) cpuc->events[idx] = event; set_bit(idx, cpuc->active_mask); - /* Set the period for the event. */ - armpmu_event_set_period(event, hwc, idx); - - /* Enable the event. */ - armpmu->enable(hwc, idx); + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + armpmu_start(event, PERF_EF_RELOAD); /* Propagate our changes to the userspace mapping. */ perf_event_update_userpage(event); @@ -534,7 +559,7 @@ static int armpmu_event_init(struct perf_event *event) return err; } -static void armpmu_pmu_enable(struct pmu *pmu) +static void armpmu_enable(struct pmu *pmu) { /* Enable all of the perf events on hardware. */ int idx; @@ -555,20 +580,21 @@ static void armpmu_pmu_enable(struct pmu *pmu) armpmu->start(); } -static void armpmu_pmu_disable(struct pmu *pmu) +static void armpmu_disable(struct pmu *pmu) { if (armpmu) armpmu->stop(); } static struct pmu pmu = { - .pmu_enable = armpmu_pmu_enable, - .pmu_disable= armpmu_pmu_disable, - .event_init = armpmu_event_init, - .enable = armpmu_enable, - .disable = armpmu_disable, - .unthrottle = armpmu_unthrottle, - .read = armpmu_read, + .pmu_enable = armpmu_enable, + .pmu_disable = armpmu_disable, + .event_init = armpmu_event_init, + .add = armpmu_add, + .del = armpmu_del, + .start = armpmu_start, + .stop = armpmu_stop, + .read = armpmu_read, }; /* diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index deb84bbcb0e..9cb4924b6c0 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -402,6 +402,9 @@ static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; + if (event->hw.state & PERF_HES_STOPPED) + return; + if (!event->hw.idx) return; /* @@ -517,7 +520,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) * Disable all events to prevent PMU interrupts and to allow * events to be added or removed. */ -static void power_pmu_pmu_disable(struct pmu *pmu) +static void power_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; @@ -565,7 +568,7 @@ static void power_pmu_pmu_disable(struct pmu *pmu) * If we were previously disabled and events were added, then * put the new config on the PMU. */ -static void power_pmu_pmu_enable(struct pmu *pmu) +static void power_pmu_enable(struct pmu *pmu) { struct perf_event *event; struct cpu_hw_events *cpuhw; @@ -672,6 +675,8 @@ static void power_pmu_pmu_enable(struct pmu *pmu) } local64_set(&event->hw.prev_count, val); event->hw.idx = idx; + if (event->hw.state & PERF_HES_STOPPED) + val = 0; write_pmc(idx, val); perf_event_update_userpage(event); } @@ -727,7 +732,7 @@ static int collect_events(struct perf_event *group, int max_count, * re-enable the PMU in order to get hw_perf_enable to do the * actual work of reconfiguring the PMU. */ -static int power_pmu_enable(struct perf_event *event) +static int power_pmu_add(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuhw; unsigned long flags; @@ -749,6 +754,9 @@ static int power_pmu_enable(struct perf_event *event) cpuhw->events[n0] = event->hw.config; cpuhw->flags[n0] = event->hw.event_base; + if (!(ef_flags & PERF_EF_START)) + event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + /* * If group events scheduling transaction was started, * skip the schedulability test here, it will be peformed @@ -777,7 +785,7 @@ nocheck: /* * Remove a event from the PMU. */ -static void power_pmu_disable(struct perf_event *event) +static void power_pmu_del(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuhw; long i; @@ -826,27 +834,53 @@ static void power_pmu_disable(struct perf_event *event) } /* - * Re-enable interrupts on a event after they were throttled - * because they were coming too fast. + * POWER-PMU does not support disabling individual counters, hence + * program their cycle counter to their max value and ignore the interrupts. */ -static void power_pmu_unthrottle(struct perf_event *event) + +static void power_pmu_start(struct perf_event *event, int ef_flags) { - s64 val, left; unsigned long flags; + s64 left; if (!event->hw.idx || !event->hw.sample_period) return; + + if (!(event->hw.state & PERF_HES_STOPPED)) + return; + + if (ef_flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + + local_irq_save(flags); + perf_pmu_disable(event->pmu); + + event->hw.state = 0; + left = local64_read(&event->hw.period_left); + write_pmc(event->hw.idx, left); + + perf_event_update_userpage(event); + perf_pmu_enable(event->pmu); + local_irq_restore(flags); +} + +static void power_pmu_stop(struct perf_event *event, int ef_flags) +{ + unsigned long flags; + + if (!event->hw.idx || !event->hw.sample_period) + return; + + if (event->hw.state & PERF_HES_STOPPED) + return; + local_irq_save(flags); perf_pmu_disable(event->pmu); + power_pmu_read(event); - left = event->hw.sample_period; - event->hw.last_period = left; - val = 0; - if (left < 0x80000000L) - val = 0x80000000L - left; - write_pmc(event->hw.idx, val); - local64_set(&event->hw.prev_count, val); - local64_set(&event->hw.period_left, left); + event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + write_pmc(event->hw.idx, 0); + perf_event_update_userpage(event); perf_pmu_enable(event->pmu); local_irq_restore(flags); @@ -1131,13 +1165,14 @@ static int power_pmu_event_init(struct perf_event *event) } struct pmu power_pmu = { - .pmu_enable = power_pmu_pmu_enable, - .pmu_disable = power_pmu_pmu_disable, + .pmu_enable = power_pmu_enable, + .pmu_disable = power_pmu_disable, .event_init = power_pmu_event_init, - .enable = power_pmu_enable, - .disable = power_pmu_disable, + .add = power_pmu_add, + .del = power_pmu_del, + .start = power_pmu_start, + .stop = power_pmu_stop, .read = power_pmu_read, - .unthrottle = power_pmu_unthrottle, .start_txn = power_pmu_start_txn, .cancel_txn = power_pmu_cancel_txn, .commit_txn = power_pmu_commit_txn, @@ -1155,6 +1190,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, s64 prev, delta, left; int record = 0; + if (event->hw.state & PERF_HES_STOPPED) { + write_pmc(event->hw.idx, 0); + return; + } + /* we don't have to worry about interrupts here */ prev = local64_read(&event->hw.prev_count); delta = (val - prev) & 0xfffffffful; @@ -1177,6 +1217,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, val = 0x80000000LL - left; } + write_pmc(event->hw.idx, val); + local64_set(&event->hw.prev_count, val); + local64_set(&event->hw.period_left, left); + perf_event_update_userpage(event); + /* * Finally record data if requested. */ @@ -1189,23 +1234,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (event->attr.sample_type & PERF_SAMPLE_ADDR) perf_get_data_addr(regs, &data.addr); - if (perf_event_overflow(event, nmi, &data, regs)) { - /* - * Interrupts are coming too fast - throttle them - * by setting the event to 0, so it will be - * at least 2^30 cycles until the next interrupt - * (assuming each event counts at most 2 counts - * per cycle). - */ - val = 0; - left = ~0ULL >> 1; - } + if (perf_event_overflow(event, nmi, &data, regs)) + power_pmu_stop(event, 0); } - - write_pmc(event->hw.idx, val); - local64_set(&event->hw.prev_count, val); - local64_set(&event->hw.period_left, left); - perf_event_update_userpage(event); } /* diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 84b1974c628..7ecca59ddf7 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -156,6 +156,9 @@ static void fsl_emb_pmu_read(struct perf_event *event) { s64 val, delta, prev; + if (event->hw.state & PERF_HES_STOPPED) + return; + /* * Performance monitor interrupts come even when interrupts * are soft-disabled, as long as interrupts are hard-enabled. @@ -177,7 +180,7 @@ static void fsl_emb_pmu_read(struct perf_event *event) * Disable all events to prevent PMU interrupts and to allow * events to be added or removed. */ -static void fsl_emb_pmu_pmu_disable(struct pmu *pmu) +static void fsl_emb_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; @@ -216,7 +219,7 @@ static void fsl_emb_pmu_pmu_disable(struct pmu *pmu) * If we were previously disabled and events were added, then * put the new config on the PMU. */ -static void fsl_emb_pmu_pmu_enable(struct pmu *pmu) +static void fsl_emb_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; @@ -263,7 +266,7 @@ static int collect_events(struct perf_event *group, int max_count, } /* context locked on entry */ -static int fsl_emb_pmu_enable(struct perf_event *event) +static int fsl_emb_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuhw; int ret = -EAGAIN; @@ -302,6 +305,12 @@ static int fsl_emb_pmu_enable(struct perf_event *event) val = 0x80000000L - left; } local64_set(&event->hw.prev_count, val); + + if (!(flags & PERF_EF_START)) { + event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + val = 0; + } + write_pmc(i, val); perf_event_update_userpage(event); @@ -316,7 +325,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event) } /* context locked on entry */ -static void fsl_emb_pmu_disable(struct perf_event *event) +static void fsl_emb_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuhw; int i = event->hw.idx; @@ -353,30 +362,49 @@ static void fsl_emb_pmu_disable(struct perf_event *event) put_cpu_var(cpu_hw_events); } -/* - * Re-enable interrupts on a event after they were throttled - * because they were coming too fast. - * - * Context is locked on entry, but perf is not disabled. - */ -static void fsl_emb_pmu_unthrottle(struct perf_event *event) +static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags) +{ + unsigned long flags; + s64 left; + + if (event->hw.idx < 0 || !event->hw.sample_period) + return; + + if (!(event->hw.state & PERF_HES_STOPPED)) + return; + + if (ef_flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + + local_irq_save(flags); + perf_pmu_disable(event->pmu); + + event->hw.state = 0; + left = local64_read(&event->hw.period_left); + write_pmc(event->hw.idx, left); + + perf_event_update_userpage(event); + perf_pmu_enable(event->pmu); + local_irq_restore(flags); +} + +static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags) { - s64 val, left; unsigned long flags; if (event->hw.idx < 0 || !event->hw.sample_period) return; + + if (event->hw.state & PERF_HES_STOPPED) + return; + local_irq_save(flags); perf_pmu_disable(event->pmu); + fsl_emb_pmu_read(event); - left = event->hw.sample_period; - event->hw.last_period = left; - val = 0; - if (left < 0x80000000L) - val = 0x80000000L - left; - write_pmc(event->hw.idx, val); - local64_set(&event->hw.prev_count, val); - local64_set(&event->hw.period_left, left); + event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + write_pmc(event->hw.idx, 0); + perf_event_update_userpage(event); perf_pmu_enable(event->pmu); local_irq_restore(flags); @@ -524,13 +552,14 @@ static int fsl_emb_pmu_event_init(struct perf_event *event) } static struct pmu fsl_emb_pmu = { - .pmu_enable = fsl_emb_pmu_pmu_enable, - .pmu_disable = fsl_emb_pmu_pmu_disable, + .pmu_enable = fsl_emb_pmu_enable, + .pmu_disable = fsl_emb_pmu_disable, .event_init = fsl_emb_pmu_event_init, - .enable = fsl_emb_pmu_enable, - .disable = fsl_emb_pmu_disable, + .add = fsl_emb_pmu_add, + .del = fsl_emb_pmu_del, + .start = fsl_emb_pmu_start, + .stop = fsl_emb_pmu_stop, .read = fsl_emb_pmu_read, - .unthrottle = fsl_emb_pmu_unthrottle, }; /* @@ -545,6 +574,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, s64 prev, delta, left; int record = 0; + if (event->hw.state & PERF_HES_STOPPED) { + write_pmc(event->hw.idx, 0); + return; + } + /* we don't have to worry about interrupts here */ prev = local64_read(&event->hw.prev_count); delta = (val - prev) & 0xfffffffful; @@ -567,6 +601,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, val = 0x80000000LL - left; } + write_pmc(event->hw.idx, val); + local64_set(&event->hw.prev_count, val); + local64_set(&event->hw.period_left, left); + perf_event_update_userpage(event); + /* * Finally record data if requested. */ @@ -576,23 +615,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, perf_sample_data_init(&data, 0); data.period = event->hw.last_period; - if (perf_event_overflow(event, nmi, &data, regs)) { - /* - * Interrupts are coming too fast - throttle them - * by setting the event to 0, so it will be - * at least 2^30 cycles until the next interrupt - * (assuming each event counts at most 2 counts - * per cycle). - */ - val = 0; - left = ~0ULL >> 1; - } + if (perf_event_overflow(event, nmi, &data, regs)) + fsl_emb_pmu_stop(event, 0); } - - write_pmc(event->hw.idx, val); - local64_set(&event->hw.prev_count, val); - local64_set(&event->hw.period_left, left); - perf_event_update_userpage(event); } static void perf_event_interrupt(struct pt_regs *regs) diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 4bbe19058a5..cf39c487346 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -206,26 +206,52 @@ again: local64_add(delta, &event->count); } -static void sh_pmu_disable(struct perf_event *event) +static void sh_pmu_stop(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - clear_bit(idx, cpuc->active_mask); - sh_pmu->disable(hwc, idx); + if (!(event->hw.state & PERF_HES_STOPPED)) { + sh_pmu->disable(hwc, idx); + cpuc->events[idx] = NULL; + event->hw.state |= PERF_HES_STOPPED; + } - barrier(); + if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { + sh_perf_event_update(event, &event->hw, idx); + event->hw.state |= PERF_HES_UPTODATE; + } +} - sh_perf_event_update(event, &event->hw, idx); +static void sh_pmu_start(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; - cpuc->events[idx] = NULL; - clear_bit(idx, cpuc->used_mask); + if (WARN_ON_ONCE(idx == -1)) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + + cpuc->events[idx] = event; + event->hw.state = 0; + sh_pmu->enable(hwc, idx); +} + +static void sh_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + sh_pmu_stop(event, PERF_EF_UPDATE); + __clear_bit(event->hw.idx, cpuc->used_mask); perf_event_update_userpage(event); } -static int sh_pmu_enable(struct perf_event *event) +static int sh_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; @@ -234,21 +260,20 @@ static int sh_pmu_enable(struct perf_event *event) perf_pmu_disable(event->pmu); - if (test_and_set_bit(idx, cpuc->used_mask)) { + if (__test_and_set_bit(idx, cpuc->used_mask)) { idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); if (idx == sh_pmu->num_events) goto out; - set_bit(idx, cpuc->used_mask); + __set_bit(idx, cpuc->used_mask); hwc->idx = idx; } sh_pmu->disable(hwc, idx); - cpuc->events[idx] = event; - set_bit(idx, cpuc->active_mask); - - sh_pmu->enable(hwc, idx); + event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (flags & PERF_EF_START) + sh_pmu_start(event, PERF_EF_RELOAD); perf_event_update_userpage(event); ret = 0; @@ -285,7 +310,7 @@ static int sh_pmu_event_init(struct perf_event *event) return err; } -static void sh_pmu_pmu_enable(struct pmu *pmu) +static void sh_pmu_enable(struct pmu *pmu) { if (!sh_pmu_initialized()) return; @@ -293,7 +318,7 @@ static void sh_pmu_pmu_enable(struct pmu *pmu) sh_pmu->enable_all(); } -static void sh_pmu_pmu_disable(struct pmu *pmu) +static void sh_pmu_disable(struct pmu *pmu) { if (!sh_pmu_initialized()) return; @@ -302,11 +327,13 @@ static void sh_pmu_pmu_disable(struct pmu *pmu) } static struct pmu pmu = { - .pmu_enable = sh_pmu_pmu_enable, - .pmu_disable = sh_pmu_pmu_disable, + .pmu_enable = sh_pmu_enable, + .pmu_disable = sh_pmu_disable, .event_init = sh_pmu_event_init, - .enable = sh_pmu_enable, - .disable = sh_pmu_disable, + .add = sh_pmu_add, + .del = sh_pmu_del, + .start = sh_pmu_start, + .stop = sh_pmu_stop, .read = sh_pmu_read, }; @@ -334,15 +361,15 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) return NOTIFY_OK; } -int __cpuinit register_sh_pmu(struct sh_pmu *pmu) +int __cpuinit register_sh_pmu(struct sh_pmu *_pmu) { if (sh_pmu) return -EBUSY; - sh_pmu = pmu; + sh_pmu = _pmu; - pr_info("Performance Events: %s support registered\n", pmu->name); + pr_info("Performance Events: %s support registered\n", _pmu->name); - WARN_ON(pmu->num_events > MAX_HWEVENTS); + WARN_ON(_pmu->num_events > MAX_HWEVENTS); perf_pmu_register(&pmu); perf_cpu_notifier(sh_pmu_notifier); diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 37cae676536..516be2314b5 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -658,13 +658,16 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr) enc = perf_event_get_enc(cpuc->events[i]); pcr &= ~mask_for_index(idx); - pcr |= event_encoding(enc, idx); + if (hwc->state & PERF_HES_STOPPED) + pcr |= nop_for_index(idx); + else + pcr |= event_encoding(enc, idx); } out: return pcr; } -static void sparc_pmu_pmu_enable(struct pmu *pmu) +static void sparc_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); u64 pcr; @@ -691,7 +694,7 @@ static void sparc_pmu_pmu_enable(struct pmu *pmu) pcr_ops->write(cpuc->pcr); } -static void sparc_pmu_pmu_disable(struct pmu *pmu) +static void sparc_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); u64 val; @@ -710,10 +713,53 @@ static void sparc_pmu_pmu_disable(struct pmu *pmu) pcr_ops->write(cpuc->pcr); } -static void sparc_pmu_disable(struct perf_event *event) +static int active_event_index(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + int i; + + for (i = 0; i < cpuc->n_events; i++) { + if (cpuc->event[i] == event) + break; + } + BUG_ON(i == cpuc->n_events); + return cpuc->current_idx[i]; +} + +static void sparc_pmu_start(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx = active_event_index(cpuc, event); + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + sparc_perf_event_set_period(event, &event->hw, idx); + } + + event->hw.state = 0; + + sparc_pmu_enable_event(cpuc, &event->hw, idx); +} + +static void sparc_pmu_stop(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx = active_event_index(cpuc, event); + + if (!(event->hw.state & PERF_HES_STOPPED)) { + sparc_pmu_disable_event(cpuc, &event->hw, idx); + event->hw.state |= PERF_HES_STOPPED; + } + + if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) { + sparc_perf_event_update(event, &event->hw, idx); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +static void sparc_pmu_del(struct perf_event *event, int _flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; unsigned long flags; int i; @@ -722,7 +768,10 @@ static void sparc_pmu_disable(struct perf_event *event) for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event[i]) { - int idx = cpuc->current_idx[i]; + /* Absorb the final count and turn off the + * event. + */ + sparc_pmu_stop(event, PERF_EF_UPDATE); /* Shift remaining entries down into * the existing slot. @@ -734,13 +783,6 @@ static void sparc_pmu_disable(struct perf_event *event) cpuc->current_idx[i]; } - /* Absorb the final count and turn off the - * event. - */ - sparc_pmu_disable_event(cpuc, hwc, idx); - barrier(); - sparc_perf_event_update(event, hwc, idx); - perf_event_update_userpage(event); cpuc->n_events--; @@ -752,19 +794,6 @@ static void sparc_pmu_disable(struct perf_event *event) local_irq_restore(flags); } -static int active_event_index(struct cpu_hw_events *cpuc, - struct perf_event *event) -{ - int i; - - for (i = 0; i < cpuc->n_events; i++) { - if (cpuc->event[i] == event) - break; - } - BUG_ON(i == cpuc->n_events); - return cpuc->current_idx[i]; -} - static void sparc_pmu_read(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -774,15 +803,6 @@ static void sparc_pmu_read(struct perf_event *event) sparc_perf_event_update(event, hwc, idx); } -static void sparc_pmu_unthrottle(struct perf_event *event) -{ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - int idx = active_event_index(cpuc, event); - struct hw_perf_event *hwc = &event->hw; - - sparc_pmu_enable_event(cpuc, hwc, idx); -} - static atomic_t active_events = ATOMIC_INIT(0); static DEFINE_MUTEX(pmc_grab_mutex); @@ -984,7 +1004,7 @@ static int collect_events(struct perf_event *group, int max_count, return n; } -static int sparc_pmu_enable(struct perf_event *event) +static int sparc_pmu_add(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int n0, ret = -EAGAIN; @@ -1001,6 +1021,10 @@ static int sparc_pmu_enable(struct perf_event *event) cpuc->events[n0] = event->hw.event_base; cpuc->current_idx[n0] = PIC_NO_INDEX; + event->hw.state = PERF_HES_UPTODATE; + if (!(ef_flags & PERF_EF_START)) + event->hw.state |= PERF_HES_STOPPED; + /* * If group events scheduling transaction was started, * skip the schedulability test here, it will be peformed @@ -1156,13 +1180,14 @@ static int sparc_pmu_commit_txn(struct pmu *pmu) } static struct pmu pmu = { - .pmu_enable = sparc_pmu_pmu_enable, - .pmu_disable = sparc_pmu_pmu_disable, + .pmu_enable = sparc_pmu_enable, + .pmu_disable = sparc_pmu_disable, .event_init = sparc_pmu_event_init, - .enable = sparc_pmu_enable, - .disable = sparc_pmu_disable, + .add = sparc_pmu_add, + .del = sparc_pmu_del, + .start = sparc_pmu_start, + .stop = sparc_pmu_stop, .read = sparc_pmu_read, - .unthrottle = sparc_pmu_unthrottle, .start_txn = sparc_pmu_start_txn, .cancel_txn = sparc_pmu_cancel_txn, .commit_txn = sparc_pmu_commit_txn, @@ -1243,7 +1268,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, continue; if (perf_event_overflow(event, 1, &data, regs)) - sparc_pmu_disable_event(cpuc, hwc, idx); + sparc_pmu_stop(event, 0); } return NOTIFY_STOP; diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 79705ac4501..dd6fec71067 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -583,7 +583,7 @@ static void x86_pmu_disable_all(void) } } -static void x86_pmu_pmu_disable(struct pmu *pmu) +static void x86_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -800,10 +800,10 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc, hwc->last_tag == cpuc->tags[i]; } -static int x86_pmu_start(struct perf_event *event); -static void x86_pmu_stop(struct perf_event *event); +static void x86_pmu_start(struct perf_event *event, int flags); +static void x86_pmu_stop(struct perf_event *event, int flags); -static void x86_pmu_pmu_enable(struct pmu *pmu) +static void x86_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct perf_event *event; @@ -839,7 +839,14 @@ static void x86_pmu_pmu_enable(struct pmu *pmu) match_prev_assignment(hwc, cpuc, i)) continue; - x86_pmu_stop(event); + /* + * Ensure we don't accidentally enable a stopped + * counter simply because we rescheduled. + */ + if (hwc->state & PERF_HES_STOPPED) + hwc->state |= PERF_HES_ARCH; + + x86_pmu_stop(event, PERF_EF_UPDATE); } for (i = 0; i < cpuc->n_events; i++) { @@ -851,7 +858,10 @@ static void x86_pmu_pmu_enable(struct pmu *pmu) else if (i < n_running) continue; - x86_pmu_start(event); + if (hwc->state & PERF_HES_ARCH) + continue; + + x86_pmu_start(event, PERF_EF_RELOAD); } cpuc->n_added = 0; perf_events_lapic_init(); @@ -952,15 +962,12 @@ static void x86_pmu_enable_event(struct perf_event *event) } /* - * activate a single event + * Add a single event to the PMU. * * The event is added to the group of enabled events * but only if it can be scehduled with existing events. - * - * Called with PMU disabled. If successful and return value 1, - * then guaranteed to call perf_enable() and hw_perf_enable() */ -static int x86_pmu_enable(struct perf_event *event) +static int x86_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc; @@ -975,10 +982,14 @@ static int x86_pmu_enable(struct perf_event *event) if (ret < 0) goto out; + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (!(flags & PERF_EF_START)) + hwc->state |= PERF_HES_ARCH; + /* * If group events scheduling transaction was started, * skip the schedulability test here, it will be peformed - * at commit time(->commit_txn) as a whole + * at commit time (->commit_txn) as a whole */ if (cpuc->group_flag & PERF_EVENT_TXN) goto done_collect; @@ -1003,27 +1014,28 @@ out: return ret; } -static int x86_pmu_start(struct perf_event *event) +static void x86_pmu_start(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx = event->hw.idx; - if (idx == -1) - return -EAGAIN; + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + if (WARN_ON_ONCE(idx == -1)) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + x86_perf_event_set_period(event); + } + + event->hw.state = 0; - x86_perf_event_set_period(event); cpuc->events[idx] = event; __set_bit(idx, cpuc->active_mask); x86_pmu.enable(event); perf_event_update_userpage(event); - - return 0; -} - -static void x86_pmu_unthrottle(struct perf_event *event) -{ - int ret = x86_pmu_start(event); - WARN_ON_ONCE(ret); } void perf_event_print_debug(void) @@ -1080,27 +1092,29 @@ void perf_event_print_debug(void) local_irq_restore(flags); } -static void x86_pmu_stop(struct perf_event *event) +static void x86_pmu_stop(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - - if (!__test_and_clear_bit(idx, cpuc->active_mask)) - return; - - x86_pmu.disable(event); - /* - * Drain the remaining delta count out of a event - * that we are disabling: - */ - x86_perf_event_update(event); + if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { + x86_pmu.disable(event); + cpuc->events[hwc->idx] = NULL; + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + } - cpuc->events[idx] = NULL; + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + x86_perf_event_update(event); + hwc->state |= PERF_HES_UPTODATE; + } } -static void x86_pmu_disable(struct perf_event *event) +static void x86_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int i; @@ -1113,7 +1127,7 @@ static void x86_pmu_disable(struct perf_event *event) if (cpuc->group_flag & PERF_EVENT_TXN) return; - x86_pmu_stop(event); + x86_pmu_stop(event, PERF_EF_UPDATE); for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event_list[i]) { @@ -1165,7 +1179,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) continue; if (perf_event_overflow(event, 1, &data, regs)) - x86_pmu_stop(event); + x86_pmu_stop(event, 0); } if (handled) @@ -1605,15 +1619,17 @@ int x86_pmu_event_init(struct perf_event *event) } static struct pmu pmu = { - .pmu_enable = x86_pmu_pmu_enable, - .pmu_disable = x86_pmu_pmu_disable, + .pmu_enable = x86_pmu_enable, + .pmu_disable = x86_pmu_disable, + .event_init = x86_pmu_event_init, - .enable = x86_pmu_enable, - .disable = x86_pmu_disable, + + .add = x86_pmu_add, + .del = x86_pmu_del, .start = x86_pmu_start, .stop = x86_pmu_stop, .read = x86_pmu_read, - .unthrottle = x86_pmu_unthrottle, + .start_txn = x86_pmu_start_txn, .cancel_txn = x86_pmu_cancel_txn, .commit_txn = x86_pmu_commit_txn, diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index ee05c90012d..82395f2378e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -763,7 +763,7 @@ again: data.period = event->hw.last_period; if (perf_event_overflow(event, 1, &data, regs)) - x86_pmu_stop(event); + x86_pmu_stop(event, 0); } /* diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 18018d1311c..9893a2f77b7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -491,7 +491,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, regs.flags &= ~PERF_EFLAGS_EXACT; if (perf_event_overflow(event, 1, &data, ®s)) - x86_pmu_stop(event); + x86_pmu_stop(event, 0); } static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 5f8ad7bec63..8beabb958f6 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -252,8 +252,8 @@ DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); extern int perf_trace_init(struct perf_event *event); extern void perf_trace_destroy(struct perf_event *event); -extern int perf_trace_enable(struct perf_event *event); -extern void perf_trace_disable(struct perf_event *event); +extern int perf_trace_add(struct perf_event *event, int flags); +extern void perf_trace_del(struct perf_event *event, int flags); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8cafa15af60..402073c6166 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -538,6 +538,7 @@ struct hw_perf_event { }; #endif }; + int state; local64_t prev_count; u64 sample_period; u64 last_period; @@ -549,6 +550,13 @@ struct hw_perf_event { #endif }; +/* + * hw_perf_event::state flags + */ +#define PERF_HES_STOPPED 0x01 /* the counter is stopped */ +#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ +#define PERF_HES_ARCH 0x04 + struct perf_event; /* @@ -564,42 +572,62 @@ struct pmu { int *pmu_disable_count; + /* + * Fully disable/enable this PMU, can be used to protect from the PMI + * as well as for lazy/batch writing of the MSRs. + */ void (*pmu_enable) (struct pmu *pmu); /* optional */ void (*pmu_disable) (struct pmu *pmu); /* optional */ /* + * Try and initialize the event for this PMU. * Should return -ENOENT when the @event doesn't match this PMU. */ int (*event_init) (struct perf_event *event); - int (*enable) (struct perf_event *event); - void (*disable) (struct perf_event *event); - int (*start) (struct perf_event *event); - void (*stop) (struct perf_event *event); +#define PERF_EF_START 0x01 /* start the counter when adding */ +#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ +#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ + + /* + * Adds/Removes a counter to/from the PMU, can be done inside + * a transaction, see the ->*_txn() methods. + */ + int (*add) (struct perf_event *event, int flags); + void (*del) (struct perf_event *event, int flags); + + /* + * Starts/Stops a counter present on the PMU. The PMI handler + * should stop the counter when perf_event_overflow() returns + * !0. ->start() will be used to continue. + */ + void (*start) (struct perf_event *event, int flags); + void (*stop) (struct perf_event *event, int flags); + + /* + * Updates the counter value of the event. + */ void (*read) (struct perf_event *event); - void (*unthrottle) (struct perf_event *event); /* * Group events scheduling is treated as a transaction, add * group events as a whole and perform one schedulability test. * If the test fails, roll back the whole group - */ - - /* - * Start the transaction, after this ->enable() doesn't need to + * + * Start the transaction, after this ->add() doesn't need to * do schedulability tests. */ void (*start_txn) (struct pmu *pmu); /* optional */ /* - * If ->start_txn() disabled the ->enable() schedulability test + * If ->start_txn() disabled the ->add() schedulability test * then ->commit_txn() is required to perform one. On success * the transaction is closed. On error the transaction is kept * open until ->cancel_txn() is called. */ int (*commit_txn) (struct pmu *pmu); /* optional */ /* - * Will cancel the transaction, assumes ->disable() is called - * for each successfull ->enable() during the transaction. + * Will cancel the transaction, assumes ->del() is called + * for each successfull ->add() during the transaction. */ void (*cancel_txn) (struct pmu *pmu); /* optional */ }; @@ -680,7 +708,7 @@ struct perf_event { int nr_siblings; int group_flags; struct perf_event *group_leader; - struct pmu *pmu; + struct pmu *pmu; enum perf_event_active_state state; unsigned int attach_state; diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index e9c5cfa1fd2..6f150095caf 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -586,10 +586,35 @@ static int hw_breakpoint_event_init(struct perf_event *bp) return 0; } +static int hw_breakpoint_add(struct perf_event *bp, int flags) +{ + if (!(flags & PERF_EF_START)) + bp->hw.state = PERF_HES_STOPPED; + + return arch_install_hw_breakpoint(bp); +} + +static void hw_breakpoint_del(struct perf_event *bp, int flags) +{ + arch_uninstall_hw_breakpoint(bp); +} + +static void hw_breakpoint_start(struct perf_event *bp, int flags) +{ + bp->hw.state = 0; +} + +static void hw_breakpoint_stop(struct perf_event *bp, int flags) +{ + bp->hw.state = PERF_HES_STOPPED; +} + static struct pmu perf_breakpoint = { .event_init = hw_breakpoint_event_init, - .enable = arch_install_hw_breakpoint, - .disable = arch_uninstall_hw_breakpoint, + .add = hw_breakpoint_add, + .del = hw_breakpoint_del, + .start = hw_breakpoint_start, + .stop = hw_breakpoint_stop, .read = hw_breakpoint_pmu_read, }; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 1a6cdbf0d09..3bace4fd035 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -424,7 +424,7 @@ event_sched_out(struct perf_event *event, event->state = PERF_EVENT_STATE_OFF; } event->tstamp_stopped = ctx->time; - event->pmu->disable(event); + event->pmu->del(event, 0); event->oncpu = -1; if (!is_software_event(event)) @@ -649,7 +649,7 @@ event_sched_in(struct perf_event *event, */ smp_wmb(); - if (event->pmu->enable(event)) { + if (event->pmu->add(event, PERF_EF_START)) { event->state = PERF_EVENT_STATE_INACTIVE; event->oncpu = -1; return -EAGAIN; @@ -1482,22 +1482,6 @@ do { \ return div64_u64(dividend, divisor); } -static void perf_event_stop(struct perf_event *event) -{ - if (!event->pmu->stop) - return event->pmu->disable(event); - - return event->pmu->stop(event); -} - -static int perf_event_start(struct perf_event *event) -{ - if (!event->pmu->start) - return event->pmu->enable(event); - - return event->pmu->start(event); -} - static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) { struct hw_perf_event *hwc = &event->hw; @@ -1517,9 +1501,9 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) hwc->sample_period = sample_period; if (local64_read(&hwc->period_left) > 8*sample_period) { - perf_event_stop(event); + event->pmu->stop(event, PERF_EF_UPDATE); local64_set(&hwc->period_left, 0); - perf_event_start(event); + event->pmu->start(event, PERF_EF_RELOAD); } } @@ -1548,7 +1532,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) */ if (interrupts == MAX_INTERRUPTS) { perf_log_throttle(event, 1); - event->pmu->unthrottle(event); + event->pmu->start(event, 0); } if (!event->attr.freq || !event->attr.sample_freq) @@ -2506,6 +2490,9 @@ int perf_event_task_disable(void) static int perf_event_index(struct perf_event *event) { + if (event->hw.state & PERF_HES_STOPPED) + return 0; + if (event->state != PERF_EVENT_STATE_ACTIVE) return 0; @@ -4120,8 +4107,6 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, struct hw_perf_event *hwc = &event->hw; int ret = 0; - throttle = (throttle && event->pmu->unthrottle != NULL); - if (!throttle) { hwc->interrupts++; } else { @@ -4246,7 +4231,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow, } } -static void perf_swevent_add(struct perf_event *event, u64 nr, +static void perf_swevent_event(struct perf_event *event, u64 nr, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { @@ -4272,6 +4257,9 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { + if (event->hw.state & PERF_HES_STOPPED) + return 0; + if (regs) { if (event->attr.exclude_user && user_mode(regs)) return 1; @@ -4371,7 +4359,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id, hlist_for_each_entry_rcu(event, node, head, hlist_entry) { if (perf_swevent_match(event, type, event_id, data, regs)) - perf_swevent_add(event, nr, nmi, data, regs); + perf_swevent_event(event, nr, nmi, data, regs); } end: rcu_read_unlock(); @@ -4415,7 +4403,7 @@ static void perf_swevent_read(struct perf_event *event) { } -static int perf_swevent_enable(struct perf_event *event) +static int perf_swevent_add(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct perf_cpu_context *cpuctx; @@ -4428,6 +4416,8 @@ static int perf_swevent_enable(struct perf_event *event) perf_swevent_set_period(event); } + hwc->state = !(flags & PERF_EF_START); + head = find_swevent_head(cpuctx, event); if (WARN_ON_ONCE(!head)) return -EINVAL; @@ -4437,18 +4427,19 @@ static int perf_swevent_enable(struct perf_event *event) return 0; } -static void perf_swevent_disable(struct perf_event *event) +static void perf_swevent_del(struct perf_event *event, int flags) { hlist_del_rcu(&event->hlist_entry); } -static void perf_swevent_void(struct perf_event *event) +static void perf_swevent_start(struct perf_event *event, int flags) { + event->hw.state = 0; } -static int perf_swevent_int(struct perf_event *event) +static void perf_swevent_stop(struct perf_event *event, int flags) { - return 0; + event->hw.state = PERF_HES_STOPPED; } /* Deref the hlist from the update side */ @@ -4604,12 +4595,11 @@ static int perf_swevent_init(struct perf_event *event) static struct pmu perf_swevent = { .event_init = perf_swevent_init, - .enable = perf_swevent_enable, - .disable = perf_swevent_disable, - .start = perf_swevent_int, - .stop = perf_swevent_void, + .add = perf_swevent_add, + .del = perf_swevent_del, + .start = perf_swevent_start, + .stop = perf_swevent_stop, .read = perf_swevent_read, - .unthrottle = perf_swevent_void, /* hwc->interrupts already reset */ }; #ifdef CONFIG_EVENT_TRACING @@ -4657,7 +4647,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, hlist_for_each_entry_rcu(event, node, head, hlist_entry) { if (perf_tp_event_match(event, &data, regs)) - perf_swevent_add(event, count, 1, &data, regs); + perf_swevent_event(event, count, 1, &data, regs); } perf_swevent_put_recursion_context(rctx); @@ -4696,12 +4686,11 @@ static int perf_tp_event_init(struct perf_event *event) static struct pmu perf_tracepoint = { .event_init = perf_tp_event_init, - .enable = perf_trace_enable, - .disable = perf_trace_disable, - .start = perf_swevent_int, - .stop = perf_swevent_void, + .add = perf_trace_add, + .del = perf_trace_del, + .start = perf_swevent_start, + .stop = perf_swevent_stop, .read = perf_swevent_read, - .unthrottle = perf_swevent_void, }; static inline void perf_tp_register(void) @@ -4757,8 +4746,8 @@ void perf_bp_event(struct perf_event *bp, void *data) perf_sample_data_init(&sample, bp->attr.bp_addr); - if (!perf_exclude_event(bp, regs)) - perf_swevent_add(bp, 1, 1, &sample, regs); + if (!bp->hw.state && !perf_exclude_event(bp, regs)) + perf_swevent_event(bp, 1, 1, &sample, regs); } #endif @@ -4834,32 +4823,39 @@ static void perf_swevent_cancel_hrtimer(struct perf_event *event) static void cpu_clock_event_update(struct perf_event *event) { - int cpu = raw_smp_processor_id(); s64 prev; u64 now; - now = cpu_clock(cpu); + now = local_clock(); prev = local64_xchg(&event->hw.prev_count, now); local64_add(now - prev, &event->count); } -static int cpu_clock_event_enable(struct perf_event *event) +static void cpu_clock_event_start(struct perf_event *event, int flags) { - struct hw_perf_event *hwc = &event->hw; - int cpu = raw_smp_processor_id(); - - local64_set(&hwc->prev_count, cpu_clock(cpu)); + local64_set(&event->hw.prev_count, local_clock()); perf_swevent_start_hrtimer(event); - - return 0; } -static void cpu_clock_event_disable(struct perf_event *event) +static void cpu_clock_event_stop(struct perf_event *event, int flags) { perf_swevent_cancel_hrtimer(event); cpu_clock_event_update(event); } +static int cpu_clock_event_add(struct perf_event *event, int flags) +{ + if (flags & PERF_EF_START) + cpu_clock_event_start(event, flags); + + return 0; +} + +static void cpu_clock_event_del(struct perf_event *event, int flags) +{ + cpu_clock_event_stop(event, flags); +} + static void cpu_clock_event_read(struct perf_event *event) { cpu_clock_event_update(event); @@ -4878,8 +4874,10 @@ static int cpu_clock_event_init(struct perf_event *event) static struct pmu perf_cpu_clock = { .event_init = cpu_clock_event_init, - .enable = cpu_clock_event_enable, - .disable = cpu_clock_event_disable, + .add = cpu_clock_event_add, + .del = cpu_clock_event_del, + .start = cpu_clock_event_start, + .stop = cpu_clock_event_stop, .read = cpu_clock_event_read, }; @@ -4897,25 +4895,29 @@ static void task_clock_event_update(struct perf_event *event, u64 now) local64_add(delta, &event->count); } -static int task_clock_event_enable(struct perf_event *event) +static void task_clock_event_start(struct perf_event *event, int flags) { - struct hw_perf_event *hwc = &event->hw; - u64 now; - - now = event->ctx->time; - - local64_set(&hwc->prev_count, now); - + local64_set(&event->hw.prev_count, event->ctx->time); perf_swevent_start_hrtimer(event); - - return 0; } -static void task_clock_event_disable(struct perf_event *event) +static void task_clock_event_stop(struct perf_event *event, int flags) { perf_swevent_cancel_hrtimer(event); task_clock_event_update(event, event->ctx->time); +} + +static int task_clock_event_add(struct perf_event *event, int flags) +{ + if (flags & PERF_EF_START) + task_clock_event_start(event, flags); + return 0; +} + +static void task_clock_event_del(struct perf_event *event, int flags) +{ + task_clock_event_stop(event, PERF_EF_UPDATE); } static void task_clock_event_read(struct perf_event *event) @@ -4947,8 +4949,10 @@ static int task_clock_event_init(struct perf_event *event) static struct pmu perf_task_clock = { .event_init = task_clock_event_init, - .enable = task_clock_event_enable, - .disable = task_clock_event_disable, + .add = task_clock_event_add, + .del = task_clock_event_del, + .start = task_clock_event_start, + .stop = task_clock_event_stop, .read = task_clock_event_read, }; diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index f3bbcd1c90c..39c059ca670 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -101,7 +101,7 @@ int perf_trace_init(struct perf_event *p_event) return ret; } -int perf_trace_enable(struct perf_event *p_event) +int perf_trace_add(struct perf_event *p_event, int flags) { struct ftrace_event_call *tp_event = p_event->tp_event; struct hlist_head __percpu *pcpu_list; @@ -111,13 +111,16 @@ int perf_trace_enable(struct perf_event *p_event) if (WARN_ON_ONCE(!pcpu_list)) return -EINVAL; + if (!(flags & PERF_EF_START)) + p_event->hw.state = PERF_HES_STOPPED; + list = this_cpu_ptr(pcpu_list); hlist_add_head_rcu(&p_event->hlist_entry, list); return 0; } -void perf_trace_disable(struct perf_event *p_event) +void perf_trace_del(struct perf_event *p_event, int flags) { hlist_del_rcu(&p_event->hlist_entry); } -- cgit v1.2.3-70-g09d2 From 15ac9a395a753cb28c674e7ea80386ffdff21785 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Sep 2010 15:51:45 +0200 Subject: perf: Remove the sysfs bits Neither the overcommit nor the reservation sysfs parameter were actually working, remove them as they'll only get in the way. Signed-off-by: Peter Zijlstra Cc: paulus LKML-Reference: Signed-off-by: Ingo Molnar --- arch/alpha/kernel/perf_event.c | 3 +- arch/arm/kernel/perf_event.c | 9 +-- arch/sparc/kernel/perf_event.c | 9 +-- arch/x86/kernel/cpu/perf_event.c | 1 - include/linux/perf_event.h | 6 -- kernel/perf_event.c | 124 --------------------------------------- 6 files changed, 5 insertions(+), 147 deletions(-) (limited to 'kernel') diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 380ef02d557..9bb8c024080 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -808,7 +808,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); /* la_ptr is the counter that overflowed. */ - if (unlikely(la_ptr >= perf_max_events)) { + if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { /* This should never occur! */ irq_err_count++; pr_warning("PMI: silly index %ld\n", la_ptr); @@ -879,7 +879,6 @@ void __init init_hw_perf_events(void) /* And set up PMU specification */ alpha_pmu = &ev67_pmu; - perf_max_events = alpha_pmu->num_pmcs; perf_pmu_register(&pmu); } diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 448cfa6b3ef..45d6a35217c 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -534,7 +534,7 @@ static int armpmu_event_init(struct perf_event *event) event->destroy = hw_perf_event_destroy; if (!atomic_inc_not_zero(&active_events)) { - if (atomic_read(&active_events) > perf_max_events) { + if (atomic_read(&active_events) > armpmu.num_events) { atomic_dec(&active_events); return -ENOSPC; } @@ -2974,14 +2974,12 @@ init_hw_perf_events(void) armpmu = &armv6pmu; memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, sizeof(armv6_perf_cache_map)); - perf_max_events = armv6pmu.num_events; break; case 0xB020: /* ARM11mpcore */ armpmu = &armv6mpcore_pmu; memcpy(armpmu_perf_cache_map, armv6mpcore_perf_cache_map, sizeof(armv6mpcore_perf_cache_map)); - perf_max_events = armv6mpcore_pmu.num_events; break; case 0xC080: /* Cortex-A8 */ armv7pmu.id = ARM_PERF_PMU_ID_CA8; @@ -2993,7 +2991,6 @@ init_hw_perf_events(void) /* Reset PMNC and read the nb of CNTx counters supported */ armv7pmu.num_events = armv7_reset_read_pmnc(); - perf_max_events = armv7pmu.num_events; break; case 0xC090: /* Cortex-A9 */ armv7pmu.id = ARM_PERF_PMU_ID_CA9; @@ -3005,7 +3002,6 @@ init_hw_perf_events(void) /* Reset PMNC and read the nb of CNTx counters supported */ armv7pmu.num_events = armv7_reset_read_pmnc(); - perf_max_events = armv7pmu.num_events; break; } /* Intel CPUs [xscale]. */ @@ -3016,13 +3012,11 @@ init_hw_perf_events(void) armpmu = &xscale1pmu; memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, sizeof(xscale_perf_cache_map)); - perf_max_events = xscale1pmu.num_events; break; case 2: armpmu = &xscale2pmu; memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, sizeof(xscale_perf_cache_map)); - perf_max_events = xscale2pmu.num_events; break; } } @@ -3032,7 +3026,6 @@ init_hw_perf_events(void) arm_pmu_names[armpmu->id], armpmu->num_events); } else { pr_info("no hardware support available\n"); - perf_max_events = -1; } perf_pmu_register(&pmu); diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 516be2314b5..f9a70675936 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -897,7 +897,7 @@ static int sparc_check_constraints(struct perf_event **evts, if (!n_ev) return 0; - if (n_ev > perf_max_events) + if (n_ev > MAX_HWEVENTS) return -1; msk0 = perf_event_get_msk(events[0]); @@ -1014,7 +1014,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) perf_pmu_disable(event->pmu); n0 = cpuc->n_events; - if (n0 >= perf_max_events) + if (n0 >= MAX_HWEVENTS) goto out; cpuc->event[n0] = event; @@ -1097,7 +1097,7 @@ static int sparc_pmu_event_init(struct perf_event *event) n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, - perf_max_events - 1, + MAX_HWEVENTS - 1, evts, events, current_idx_dmy); if (n < 0) return -EINVAL; @@ -1309,9 +1309,6 @@ void __init init_hw_perf_events(void) pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); - /* All sparc64 PMUs currently have 2 events. */ - perf_max_events = 2; - perf_pmu_register(&pmu); register_die_notifier(&perf_event_nmi_notifier); } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index dd6fec71067..0fb17050360 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1396,7 +1396,6 @@ void __init init_hw_perf_events(void) x86_pmu.num_counters = X86_PMC_MAX_GENERIC; } x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; - perf_max_events = x86_pmu.num_counters; if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 402073c6166..b22176d3ebd 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -860,7 +860,6 @@ struct perf_cpu_context { struct perf_event_context ctx; struct perf_event_context *task_ctx; int active_oncpu; - int max_pertask; int exclusive; struct swevent_hlist *swevent_hlist; struct mutex hlist_mutex; @@ -883,11 +882,6 @@ struct perf_output_handle { #ifdef CONFIG_PERF_EVENTS -/* - * Set by architecture code: - */ -extern int perf_max_events; - extern int perf_pmu_register(struct pmu *pmu); extern void perf_pmu_unregister(struct pmu *pmu); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 3bace4fd035..8462e69409a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -39,10 +39,6 @@ */ static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); -int perf_max_events __read_mostly = 1; -static int perf_reserved_percpu __read_mostly; -static int perf_overcommit __read_mostly = 1; - static atomic_t nr_events __read_mostly; static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_comm_events __read_mostly; @@ -66,11 +62,6 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000; static atomic64_t perf_event_id; -/* - * Lock for (sysadmin-configurable) event reservations: - */ -static DEFINE_SPINLOCK(perf_resource_lock); - void __weak perf_event_print_debug(void) { } void perf_pmu_disable(struct pmu *pmu) @@ -480,16 +471,6 @@ static void __perf_event_remove_from_context(void *info) list_del_event(event, ctx); - if (!ctx->task) { - /* - * Allow more per task events with respect to the - * reservation: - */ - cpuctx->max_pertask = - min(perf_max_events - ctx->nr_events, - perf_max_events - perf_reserved_percpu); - } - raw_spin_unlock(&ctx->lock); } @@ -823,9 +804,6 @@ static void __perf_install_in_context(void *info) } } - if (!err && !ctx->task && cpuctx->max_pertask) - cpuctx->max_pertask--; - unlock: raw_spin_unlock(&ctx->lock); } @@ -5930,10 +5908,6 @@ static void __cpuinit perf_event_init_cpu(int cpu) cpuctx = &per_cpu(perf_cpu_context, cpu); - spin_lock(&perf_resource_lock); - cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; - spin_unlock(&perf_resource_lock); - mutex_lock(&cpuctx->hlist_mutex); if (cpuctx->hlist_refcount > 0) { struct swevent_hlist *hlist; @@ -6008,101 +5982,3 @@ void __init perf_event_init(void) perf_tp_register(); perf_cpu_notifier(perf_cpu_notify); } - -static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, - struct sysdev_class_attribute *attr, - char *buf) -{ - return sprintf(buf, "%d\n", perf_reserved_percpu); -} - -static ssize_t -perf_set_reserve_percpu(struct sysdev_class *class, - struct sysdev_class_attribute *attr, - const char *buf, - size_t count) -{ - struct perf_cpu_context *cpuctx; - unsigned long val; - int err, cpu, mpt; - - err = strict_strtoul(buf, 10, &val); - if (err) - return err; - if (val > perf_max_events) - return -EINVAL; - - spin_lock(&perf_resource_lock); - perf_reserved_percpu = val; - for_each_online_cpu(cpu) { - cpuctx = &per_cpu(perf_cpu_context, cpu); - raw_spin_lock_irq(&cpuctx->ctx.lock); - mpt = min(perf_max_events - cpuctx->ctx.nr_events, - perf_max_events - perf_reserved_percpu); - cpuctx->max_pertask = mpt; - raw_spin_unlock_irq(&cpuctx->ctx.lock); - } - spin_unlock(&perf_resource_lock); - - return count; -} - -static ssize_t perf_show_overcommit(struct sysdev_class *class, - struct sysdev_class_attribute *attr, - char *buf) -{ - return sprintf(buf, "%d\n", perf_overcommit); -} - -static ssize_t -perf_set_overcommit(struct sysdev_class *class, - struct sysdev_class_attribute *attr, - const char *buf, size_t count) -{ - unsigned long val; - int err; - - err = strict_strtoul(buf, 10, &val); - if (err) - return err; - if (val > 1) - return -EINVAL; - - spin_lock(&perf_resource_lock); - perf_overcommit = val; - spin_unlock(&perf_resource_lock); - - return count; -} - -static SYSDEV_CLASS_ATTR( - reserve_percpu, - 0644, - perf_show_reserve_percpu, - perf_set_reserve_percpu - ); - -static SYSDEV_CLASS_ATTR( - overcommit, - 0644, - perf_show_overcommit, - perf_set_overcommit - ); - -static struct attribute *perfclass_attrs[] = { - &attr_reserve_percpu.attr, - &attr_overcommit.attr, - NULL -}; - -static struct attribute_group perfclass_attr_group = { - .attrs = perfclass_attrs, - .name = "perf_events", -}; - -static int __init perf_event_sysfs_init(void) -{ - return sysfs_create_group(&cpu_sysdev_class.kset.kobj, - &perfclass_attr_group); -} -device_initcall(perf_event_sysfs_init); -- cgit v1.2.3-70-g09d2 From c3f00c70276d8ae82578c8b773e2db657f69a478 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 18 Aug 2010 14:37:15 +0200 Subject: perf: Separate find_get_context() from event initialization Separate find_get_context() from the event allocation and initialization so that we may make find_get_context() depend on the event pmu in a later patch. Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Frederic Weisbecker Cc: Lin Ming Cc: Yanmin LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 73 +++++++++++++++++++++++++---------------------------- 1 file changed, 35 insertions(+), 38 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 8462e69409a..a3c86a8335c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -827,6 +827,8 @@ perf_install_in_context(struct perf_event_context *ctx, { struct task_struct *task = ctx->task; + event->ctx = ctx; + if (!task) { /* * Per cpu events are installed via an smp call and @@ -5038,20 +5040,17 @@ struct pmu *perf_init_event(struct perf_event *event) * Allocate and initialize a event structure */ static struct perf_event * -perf_event_alloc(struct perf_event_attr *attr, - int cpu, - struct perf_event_context *ctx, +perf_event_alloc(struct perf_event_attr *attr, int cpu, struct perf_event *group_leader, struct perf_event *parent_event, - perf_overflow_handler_t overflow_handler, - gfp_t gfpflags) + perf_overflow_handler_t overflow_handler) { struct pmu *pmu; struct perf_event *event; struct hw_perf_event *hwc; long err; - event = kzalloc(sizeof(*event), gfpflags); + event = kzalloc(sizeof(*event), GFP_KERNEL); if (!event) return ERR_PTR(-ENOMEM); @@ -5076,7 +5075,6 @@ perf_event_alloc(struct perf_event_attr *attr, event->attr = *attr; event->group_leader = group_leader; event->pmu = NULL; - event->ctx = ctx; event->oncpu = -1; event->parent = parent_event; @@ -5321,20 +5319,26 @@ SYSCALL_DEFINE5(perf_event_open, if (event_fd < 0) return event_fd; + event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL); + if (IS_ERR(event)) { + err = PTR_ERR(event); + goto err_fd; + } + /* * Get the target context (task or percpu): */ ctx = find_get_context(pid, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); - goto err_fd; + goto err_alloc; } if (group_fd != -1) { group_leader = perf_fget_light(group_fd, &fput_needed); if (IS_ERR(group_leader)) { err = PTR_ERR(group_leader); - goto err_put_context; + goto err_context; } group_file = group_leader->filp; if (flags & PERF_FLAG_FD_OUTPUT) @@ -5354,37 +5358,30 @@ SYSCALL_DEFINE5(perf_event_open, * becoming part of another group-sibling): */ if (group_leader->group_leader != group_leader) - goto err_put_context; + goto err_context; /* * Do not allow to attach to a group in a different * task or CPU context: */ if (group_leader->ctx != ctx) - goto err_put_context; + goto err_context; /* * Only a group leader can be exclusive or pinned */ if (attr.exclusive || attr.pinned) - goto err_put_context; - } - - event = perf_event_alloc(&attr, cpu, ctx, group_leader, - NULL, NULL, GFP_KERNEL); - if (IS_ERR(event)) { - err = PTR_ERR(event); - goto err_put_context; + goto err_context; } if (output_event) { err = perf_event_set_output(event, output_event); if (err) - goto err_free_put_context; + goto err_context; } event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); if (IS_ERR(event_file)) { err = PTR_ERR(event_file); - goto err_free_put_context; + goto err_context; } event->filp = event_file; @@ -5410,11 +5407,11 @@ SYSCALL_DEFINE5(perf_event_open, fd_install(event_fd, event_file); return event_fd; -err_free_put_context: - free_event(event); -err_put_context: +err_context: fput_light(group_file, fput_needed); put_ctx(ctx); +err_alloc: + free_event(event); err_fd: put_unused_fd(event_fd); return err; @@ -5432,25 +5429,24 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, pid_t pid, perf_overflow_handler_t overflow_handler) { - struct perf_event *event; struct perf_event_context *ctx; + struct perf_event *event; int err; /* * Get the target context (task or percpu): */ + event = perf_event_alloc(attr, cpu, NULL, NULL, overflow_handler); + if (IS_ERR(event)) { + err = PTR_ERR(event); + goto err; + } + ctx = find_get_context(pid, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); - goto err_exit; - } - - event = perf_event_alloc(attr, cpu, ctx, NULL, - NULL, overflow_handler, GFP_KERNEL); - if (IS_ERR(event)) { - err = PTR_ERR(event); - goto err_put_context; + goto err_free; } event->filp = NULL; @@ -5468,9 +5464,9 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, return event; - err_put_context: - put_ctx(ctx); - err_exit: +err_free: + free_event(event); +err: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); @@ -5498,9 +5494,9 @@ inherit_event(struct perf_event *parent_event, parent_event = parent_event->parent; child_event = perf_event_alloc(&parent_event->attr, - parent_event->cpu, child_ctx, + parent_event->cpu, group_leader, parent_event, - NULL, GFP_KERNEL); + NULL); if (IS_ERR(child_event)) return child_event; get_ctx(child_ctx); @@ -5525,6 +5521,7 @@ inherit_event(struct perf_event *parent_event, local64_set(&hwc->period_left, sample_period); } + child_event->ctx = child_ctx; child_event->overflow_handler = parent_event->overflow_handler; /* -- cgit v1.2.3-70-g09d2 From b28ab83c595e767f2028276b7398d17f2253cec0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Sep 2010 14:48:15 +0200 Subject: perf: Remove the swevent hash-table from the cpu context Separate the swevent hash-table from the cpu_context bits in preparation for per pmu cpu contexts. This keeps the swevent hash a global entity. Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Frederic Weisbecker Cc: Lin Ming Cc: Yanmin LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 6 --- kernel/perf_event.c | 104 +++++++++++++++++++++++++-------------------- 2 files changed, 58 insertions(+), 52 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b22176d3ebd..4ab4f0ca09a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -861,12 +861,6 @@ struct perf_cpu_context { struct perf_event_context *task_ctx; int active_oncpu; int exclusive; - struct swevent_hlist *swevent_hlist; - struct mutex hlist_mutex; - int hlist_refcount; - - /* Recursion avoidance in each contexts */ - int recursion[PERF_NR_CONTEXTS]; }; struct perf_output_handle { diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a3c86a8335c..2c47ed6c4f2 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4154,6 +4154,17 @@ int perf_event_overflow(struct perf_event *event, int nmi, * Generic software event infrastructure */ +struct swevent_htable { + struct swevent_hlist *swevent_hlist; + struct mutex hlist_mutex; + int hlist_refcount; + + /* Recursion avoidance in each contexts */ + int recursion[PERF_NR_CONTEXTS]; +}; + +static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); + /* * We directly increment event->count and keep a second value in * event->hw.period_left to count intervals. This period event @@ -4286,11 +4297,11 @@ __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) /* For the read side: events when they trigger */ static inline struct hlist_head * -find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id) +find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) { struct swevent_hlist *hlist; - hlist = rcu_dereference(ctx->swevent_hlist); + hlist = rcu_dereference(swhash->swevent_hlist); if (!hlist) return NULL; @@ -4299,7 +4310,7 @@ find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id) /* For the event head insertion and removal in the hlist */ static inline struct hlist_head * -find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event) +find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) { struct swevent_hlist *hlist; u32 event_id = event->attr.config; @@ -4310,7 +4321,7 @@ find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event) * and release. Which makes the protected version suitable here. * The context lock guarantees that. */ - hlist = rcu_dereference_protected(ctx->swevent_hlist, + hlist = rcu_dereference_protected(swhash->swevent_hlist, lockdep_is_held(&event->ctx->lock)); if (!hlist) return NULL; @@ -4323,17 +4334,13 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id, struct perf_sample_data *data, struct pt_regs *regs) { - struct perf_cpu_context *cpuctx; + struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); struct perf_event *event; struct hlist_node *node; struct hlist_head *head; - cpuctx = &__get_cpu_var(perf_cpu_context); - rcu_read_lock(); - - head = find_swevent_head_rcu(cpuctx, type, event_id); - + head = find_swevent_head_rcu(swhash, type, event_id); if (!head) goto end; @@ -4347,17 +4354,17 @@ end: int perf_swevent_get_recursion_context(void) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); - return get_recursion_context(cpuctx->recursion); + return get_recursion_context(swhash->recursion); } EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); void inline perf_swevent_put_recursion_context(int rctx) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); - put_recursion_context(cpuctx->recursion, rctx); + put_recursion_context(swhash->recursion, rctx); } void __perf_sw_event(u32 event_id, u64 nr, int nmi, @@ -4385,12 +4392,10 @@ static void perf_swevent_read(struct perf_event *event) static int perf_swevent_add(struct perf_event *event, int flags) { + struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); struct hw_perf_event *hwc = &event->hw; - struct perf_cpu_context *cpuctx; struct hlist_head *head; - cpuctx = &__get_cpu_var(perf_cpu_context); - if (hwc->sample_period) { hwc->last_period = hwc->sample_period; perf_swevent_set_period(event); @@ -4398,7 +4403,7 @@ static int perf_swevent_add(struct perf_event *event, int flags) hwc->state = !(flags & PERF_EF_START); - head = find_swevent_head(cpuctx, event); + head = find_swevent_head(swhash, event); if (WARN_ON_ONCE(!head)) return -EINVAL; @@ -4424,10 +4429,10 @@ static void perf_swevent_stop(struct perf_event *event, int flags) /* Deref the hlist from the update side */ static inline struct swevent_hlist * -swevent_hlist_deref(struct perf_cpu_context *cpuctx) +swevent_hlist_deref(struct swevent_htable *swhash) { - return rcu_dereference_protected(cpuctx->swevent_hlist, - lockdep_is_held(&cpuctx->hlist_mutex)); + return rcu_dereference_protected(swhash->swevent_hlist, + lockdep_is_held(&swhash->hlist_mutex)); } static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) @@ -4438,27 +4443,27 @@ static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) kfree(hlist); } -static void swevent_hlist_release(struct perf_cpu_context *cpuctx) +static void swevent_hlist_release(struct swevent_htable *swhash) { - struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx); + struct swevent_hlist *hlist = swevent_hlist_deref(swhash); if (!hlist) return; - rcu_assign_pointer(cpuctx->swevent_hlist, NULL); + rcu_assign_pointer(swhash->swevent_hlist, NULL); call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu); } static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) { - struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); - mutex_lock(&cpuctx->hlist_mutex); + mutex_lock(&swhash->hlist_mutex); - if (!--cpuctx->hlist_refcount) - swevent_hlist_release(cpuctx); + if (!--swhash->hlist_refcount) + swevent_hlist_release(swhash); - mutex_unlock(&cpuctx->hlist_mutex); + mutex_unlock(&swhash->hlist_mutex); } static void swevent_hlist_put(struct perf_event *event) @@ -4476,12 +4481,12 @@ static void swevent_hlist_put(struct perf_event *event) static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) { - struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); int err = 0; - mutex_lock(&cpuctx->hlist_mutex); + mutex_lock(&swhash->hlist_mutex); - if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) { + if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { struct swevent_hlist *hlist; hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); @@ -4489,11 +4494,11 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) err = -ENOMEM; goto exit; } - rcu_assign_pointer(cpuctx->swevent_hlist, hlist); + rcu_assign_pointer(swhash->swevent_hlist, hlist); } - cpuctx->hlist_refcount++; + swhash->hlist_refcount++; exit: - mutex_unlock(&cpuctx->hlist_mutex); + mutex_unlock(&swhash->hlist_mutex); return err; } @@ -5889,12 +5894,15 @@ int perf_event_init_task(struct task_struct *child) static void __init perf_event_init_all_cpus(void) { - int cpu; struct perf_cpu_context *cpuctx; + struct swevent_htable *swhash; + int cpu; for_each_possible_cpu(cpu) { + swhash = &per_cpu(swevent_htable, cpu); + mutex_init(&swhash->hlist_mutex); + cpuctx = &per_cpu(perf_cpu_context, cpu); - mutex_init(&cpuctx->hlist_mutex); __perf_event_init_context(&cpuctx->ctx, NULL); } } @@ -5902,18 +5910,21 @@ static void __init perf_event_init_all_cpus(void) static void __cpuinit perf_event_init_cpu(int cpu) { struct perf_cpu_context *cpuctx; + struct swevent_htable *swhash; cpuctx = &per_cpu(perf_cpu_context, cpu); - mutex_lock(&cpuctx->hlist_mutex); - if (cpuctx->hlist_refcount > 0) { + swhash = &per_cpu(swevent_htable, cpu); + + mutex_lock(&swhash->hlist_mutex); + if (swhash->hlist_refcount > 0) { struct swevent_hlist *hlist; - hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); - WARN_ON_ONCE(!hlist); - rcu_assign_pointer(cpuctx->swevent_hlist, hlist); + hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); + WARN_ON(!hlist); + rcu_assign_pointer(swhash->swevent_hlist, hlist); } - mutex_unlock(&cpuctx->hlist_mutex); + mutex_unlock(&swhash->hlist_mutex); } #ifdef CONFIG_HOTPLUG_CPU @@ -5931,11 +5942,12 @@ static void __perf_event_exit_cpu(void *info) static void perf_event_exit_cpu(int cpu) { struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); struct perf_event_context *ctx = &cpuctx->ctx; - mutex_lock(&cpuctx->hlist_mutex); - swevent_hlist_release(cpuctx); - mutex_unlock(&cpuctx->hlist_mutex); + mutex_lock(&swhash->hlist_mutex); + swevent_hlist_release(swhash); + mutex_unlock(&swhash->hlist_mutex); mutex_lock(&ctx->mutex); smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1); -- cgit v1.2.3-70-g09d2 From b5ab4cd563e7ab49b27957704112a8ecade54e1f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Sep 2010 16:32:21 +0200 Subject: perf: Per cpu-context rotation timer Give each cpu-context its own timer so that it is a self contained entity, this eases the way for per-pmu-per-cpu contexts as well as provides the basic infrastructure to allow different rotation times per pmu. Things to look at: - folding the tick and these TICK_NSEC timers - separate task context rotation Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Frederic Weisbecker Cc: Lin Ming Cc: Yanmin LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 5 ++- kernel/perf_event.c | 80 ++++++++++++++++++++++++++++++++++++---------- kernel/sched.c | 2 -- 3 files changed, 65 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 4ab4f0ca09a..fa04537df55 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -861,6 +861,8 @@ struct perf_cpu_context { struct perf_event_context *task_ctx; int active_oncpu; int exclusive; + u64 timer_interval; + struct hrtimer timer; }; struct perf_output_handle { @@ -881,7 +883,6 @@ extern void perf_pmu_unregister(struct pmu *pmu); extern void perf_event_task_sched_in(struct task_struct *task); extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); -extern void perf_event_task_tick(struct task_struct *task); extern int perf_event_init_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); @@ -1067,8 +1068,6 @@ perf_event_task_sched_in(struct task_struct *task) { } static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) { } -static inline void -perf_event_task_tick(struct task_struct *task) { } static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 2c47ed6c4f2..d75e4c8727f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -78,6 +78,25 @@ void perf_pmu_enable(struct pmu *pmu) pmu->pmu_enable(pmu); } +static void perf_pmu_rotate_start(void) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + + if (hrtimer_active(&cpuctx->timer)) + return; + + __hrtimer_start_range_ns(&cpuctx->timer, + ns_to_ktime(cpuctx->timer_interval), 0, + HRTIMER_MODE_REL_PINNED, 0); +} + +static void perf_pmu_rotate_stop(void) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + + hrtimer_cancel(&cpuctx->timer); +} + static void get_ctx(struct perf_event_context *ctx) { WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); @@ -281,6 +300,8 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) } list_add_rcu(&event->event_entry, &ctx->event_list); + if (!ctx->nr_events) + perf_pmu_rotate_start(); ctx->nr_events++; if (event->attr.inherit_stat) ctx->nr_stat++; @@ -1383,6 +1404,12 @@ void perf_event_task_sched_in(struct task_struct *task) ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE); cpuctx->task_ctx = ctx; + + /* + * Since these rotations are per-cpu, we need to ensure the + * cpu-context we got scheduled on is actually rotating. + */ + perf_pmu_rotate_start(); } #define MAX_INTERRUPTS (~0ULL) @@ -1487,7 +1514,7 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) } } -static void perf_ctx_adjust_freq(struct perf_event_context *ctx) +static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) { struct perf_event *event; struct hw_perf_event *hwc; @@ -1524,7 +1551,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) hwc->freq_count_stamp = now; if (delta > 0) - perf_adjust_period(event, TICK_NSEC, delta); + perf_adjust_period(event, period, delta); } raw_spin_unlock(&ctx->lock); } @@ -1542,30 +1569,39 @@ static void rotate_ctx(struct perf_event_context *ctx) raw_spin_unlock(&ctx->lock); } -void perf_event_task_tick(struct task_struct *curr) +/* + * Cannot race with ->pmu_rotate_start() because this is ran from hardirq + * context, and ->pmu_rotate_start() is called with irqs disabled (both are + * cpu affine, so there are no SMP races). + */ +static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) { + enum hrtimer_restart restart = HRTIMER_NORESTART; struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; int rotate = 0; - if (!atomic_read(&nr_events)) - return; + cpuctx = container_of(timer, struct perf_cpu_context, timer); - cpuctx = &__get_cpu_var(perf_cpu_context); - if (cpuctx->ctx.nr_events && - cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) - rotate = 1; + if (cpuctx->ctx.nr_events) { + restart = HRTIMER_RESTART; + if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) + rotate = 1; + } - ctx = curr->perf_event_ctxp; - if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active) - rotate = 1; + ctx = current->perf_event_ctxp; + if (ctx && ctx->nr_events) { + restart = HRTIMER_RESTART; + if (ctx->nr_events != ctx->nr_active) + rotate = 1; + } - perf_ctx_adjust_freq(&cpuctx->ctx); + perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval); if (ctx) - perf_ctx_adjust_freq(ctx); + perf_ctx_adjust_freq(ctx, cpuctx->timer_interval); if (!rotate) - return; + goto done; cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); if (ctx) @@ -1577,7 +1613,12 @@ void perf_event_task_tick(struct task_struct *curr) cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); if (ctx) - task_ctx_sched_in(curr, EVENT_FLEXIBLE); + task_ctx_sched_in(current, EVENT_FLEXIBLE); + +done: + hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval)); + + return restart; } static int event_enable_on_exec(struct perf_event *event, @@ -4786,7 +4827,7 @@ static void perf_swevent_start_hrtimer(struct perf_event *event) } __hrtimer_start_range_ns(&hwc->hrtimer, ns_to_ktime(period), 0, - HRTIMER_MODE_REL, 0); + HRTIMER_MODE_REL_PINNED, 0); } } @@ -5904,6 +5945,9 @@ static void __init perf_event_init_all_cpus(void) cpuctx = &per_cpu(perf_cpu_context, cpu); __perf_event_init_context(&cpuctx->ctx, NULL); + cpuctx->timer_interval = TICK_NSEC; + hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cpuctx->timer.function = perf_event_context_tick; } } @@ -5934,6 +5978,8 @@ static void __perf_event_exit_cpu(void *info) struct perf_event_context *ctx = &cpuctx->ctx; struct perf_event *event, *tmp; + perf_pmu_rotate_stop(); + list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) __perf_event_remove_from_context(event); list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) diff --git a/kernel/sched.c b/kernel/sched.c index 09b574e7f4d..66a02ba83c0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3578,8 +3578,6 @@ void scheduler_tick(void) curr->sched_class->task_tick(rq, curr, 0); raw_spin_unlock(&rq->lock); - perf_event_task_tick(curr); - #ifdef CONFIG_SMP rq->idle_at_tick = idle_cpu(cpu); trigger_load_balance(rq, cpu); -- cgit v1.2.3-70-g09d2 From 108b02cfce04ee90b0a07ee0b104baffd39f5934 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 6 Sep 2010 14:32:03 +0200 Subject: perf: Per-pmu-per-cpu contexts Allocate per-cpu contexts per pmu. Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Frederic Weisbecker Cc: Lin Ming Cc: Yanmin LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 4 +- kernel/perf_event.c | 178 ++++++++++++++++++++++++++++----------------- 2 files changed, 113 insertions(+), 69 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index fa04537df55..22155ef3b36 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -570,7 +570,8 @@ struct perf_event; struct pmu { struct list_head entry; - int *pmu_disable_count; + int * __percpu pmu_disable_count; + struct perf_cpu_context * __percpu pmu_cpu_context; /* * Fully disable/enable this PMU, can be used to protect from the PMI @@ -808,6 +809,7 @@ struct perf_event { * Used as a container for task events and CPU events as well: */ struct perf_event_context { + struct pmu *pmu; /* * Protect the states of the events in the list, * nr_active, and the list: diff --git a/kernel/perf_event.c b/kernel/perf_event.c index d75e4c8727f..8ca6e690ffe 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -34,16 +34,15 @@ #include -/* - * Each CPU has a list of per CPU events: - */ -static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); - static atomic_t nr_events __read_mostly; static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_comm_events __read_mostly; static atomic_t nr_task_events __read_mostly; +static LIST_HEAD(pmus); +static DEFINE_MUTEX(pmus_lock); +static struct srcu_struct pmus_srcu; + /* * perf event paranoia level: * -1 - not paranoid at all @@ -78,9 +77,9 @@ void perf_pmu_enable(struct pmu *pmu) pmu->pmu_enable(pmu); } -static void perf_pmu_rotate_start(void) +static void perf_pmu_rotate_start(struct pmu *pmu) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); if (hrtimer_active(&cpuctx->timer)) return; @@ -90,9 +89,9 @@ static void perf_pmu_rotate_start(void) HRTIMER_MODE_REL_PINNED, 0); } -static void perf_pmu_rotate_stop(void) +static void perf_pmu_rotate_stop(struct pmu *pmu) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); hrtimer_cancel(&cpuctx->timer); } @@ -301,7 +300,7 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) list_add_rcu(&event->event_entry, &ctx->event_list); if (!ctx->nr_events) - perf_pmu_rotate_start(); + perf_pmu_rotate_start(ctx->pmu); ctx->nr_events++; if (event->attr.inherit_stat) ctx->nr_stat++; @@ -466,6 +465,12 @@ group_sched_out(struct perf_event *group_event, cpuctx->exclusive = 0; } +static inline struct perf_cpu_context * +__get_cpu_context(struct perf_event_context *ctx) +{ + return this_cpu_ptr(ctx->pmu->pmu_cpu_context); +} + /* * Cross CPU call to remove a performance event * @@ -474,9 +479,9 @@ group_sched_out(struct perf_event *group_event, */ static void __perf_event_remove_from_context(void *info) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); /* * If this is a task context, we need to check whether it is @@ -556,8 +561,8 @@ retry: static void __perf_event_disable(void *info) { struct perf_event *event = info; - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); /* * If this is a per-task event, need to check whether this @@ -765,10 +770,10 @@ static void add_event_to_ctx(struct perf_event *event, */ static void __perf_install_in_context(void *info) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; struct perf_event *leader = event->group_leader; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); int err; /* @@ -912,9 +917,9 @@ static void __perf_event_mark_enabled(struct perf_event *event, static void __perf_event_enable(void *info) { struct perf_event *event = info; - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_event_context *ctx = event->ctx; struct perf_event *leader = event->group_leader; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); int err; /* @@ -1188,15 +1193,19 @@ static void perf_event_sync_stat(struct perf_event_context *ctx, void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_event_context *ctx = task->perf_event_ctxp; struct perf_event_context *next_ctx; struct perf_event_context *parent; + struct perf_cpu_context *cpuctx; int do_switch = 1; perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); - if (likely(!ctx || !cpuctx->task_ctx)) + if (likely(!ctx)) + return; + + cpuctx = __get_cpu_context(ctx); + if (!cpuctx->task_ctx) return; rcu_read_lock(); @@ -1242,7 +1251,7 @@ void perf_event_task_sched_out(struct task_struct *task, static void task_ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); if (!cpuctx->task_ctx) return; @@ -1360,8 +1369,8 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, static void task_ctx_sched_in(struct task_struct *task, enum event_type_t event_type) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_event_context *ctx = task->perf_event_ctxp; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); if (likely(!ctx)) return; @@ -1383,12 +1392,13 @@ static void task_ctx_sched_in(struct task_struct *task, */ void perf_event_task_sched_in(struct task_struct *task) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_event_context *ctx = task->perf_event_ctxp; + struct perf_cpu_context *cpuctx; if (likely(!ctx)) return; + cpuctx = __get_cpu_context(ctx); if (cpuctx->task_ctx == ctx) return; @@ -1409,7 +1419,7 @@ void perf_event_task_sched_in(struct task_struct *task) * Since these rotations are per-cpu, we need to ensure the * cpu-context we got scheduled on is actually rotating. */ - perf_pmu_rotate_start(); + perf_pmu_rotate_start(ctx->pmu); } #define MAX_INTERRUPTS (~0ULL) @@ -1687,9 +1697,9 @@ out: */ static void __perf_event_read(void *info) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); /* * If this is a task context, we need to check whether it is @@ -1962,7 +1972,8 @@ __perf_event_init_context(struct perf_event_context *ctx, ctx->task = task; } -static struct perf_event_context *find_get_context(pid_t pid, int cpu) +static struct perf_event_context * +find_get_context(struct pmu *pmu, pid_t pid, int cpu) { struct perf_event_context *ctx; struct perf_cpu_context *cpuctx; @@ -1986,7 +1997,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) if (!cpu_online(cpu)) return ERR_PTR(-ENODEV); - cpuctx = &per_cpu(perf_cpu_context, cpu); + cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); ctx = &cpuctx->ctx; get_ctx(ctx); @@ -2030,6 +2041,7 @@ retry: if (!ctx) goto errout; __perf_event_init_context(ctx, task); + ctx->pmu = pmu; get_ctx(ctx); if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) { /* @@ -3745,18 +3757,20 @@ static void perf_event_task_ctx(struct perf_event_context *ctx, static void perf_event_task_event(struct perf_task_event *task_event) { - struct perf_cpu_context *cpuctx; struct perf_event_context *ctx = task_event->task_ctx; + struct perf_cpu_context *cpuctx; + struct pmu *pmu; - rcu_read_lock(); - cpuctx = &get_cpu_var(perf_cpu_context); - perf_event_task_ctx(&cpuctx->ctx, task_event); + rcu_read_lock_sched(); + list_for_each_entry_rcu(pmu, &pmus, entry) { + cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + perf_event_task_ctx(&cpuctx->ctx, task_event); + } if (!ctx) ctx = rcu_dereference(current->perf_event_ctxp); if (ctx) perf_event_task_ctx(ctx, task_event); - put_cpu_var(perf_cpu_context); - rcu_read_unlock(); + rcu_read_unlock_sched(); } static void perf_event_task(struct task_struct *task, @@ -3861,6 +3875,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; unsigned int size; + struct pmu *pmu; char comm[TASK_COMM_LEN]; memset(comm, 0, sizeof(comm)); @@ -3872,14 +3887,15 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; - rcu_read_lock(); - cpuctx = &get_cpu_var(perf_cpu_context); - perf_event_comm_ctx(&cpuctx->ctx, comm_event); + rcu_read_lock_sched(); + list_for_each_entry_rcu(pmu, &pmus, entry) { + cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + perf_event_comm_ctx(&cpuctx->ctx, comm_event); + } ctx = rcu_dereference(current->perf_event_ctxp); if (ctx) perf_event_comm_ctx(ctx, comm_event); - put_cpu_var(perf_cpu_context); - rcu_read_unlock(); + rcu_read_unlock_sched(); } void perf_event_comm(struct task_struct *task) @@ -3989,6 +4005,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) char tmp[16]; char *buf = NULL; const char *name; + struct pmu *pmu; memset(tmp, 0, sizeof(tmp)); @@ -4040,14 +4057,16 @@ got_name: mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; - rcu_read_lock(); - cpuctx = &get_cpu_var(perf_cpu_context); - perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC); + rcu_read_lock_sched(); + list_for_each_entry_rcu(pmu, &pmus, entry) { + cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, + vma->vm_flags & VM_EXEC); + } ctx = rcu_dereference(current->perf_event_ctxp); if (ctx) perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC); - put_cpu_var(perf_cpu_context); - rcu_read_unlock(); + rcu_read_unlock_sched(); kfree(buf); } @@ -4982,10 +5001,6 @@ static struct pmu perf_task_clock = { .read = task_clock_event_read, }; -static LIST_HEAD(pmus); -static DEFINE_MUTEX(pmus_lock); -static struct srcu_struct pmus_srcu; - static void perf_pmu_nop_void(struct pmu *pmu) { } @@ -5013,7 +5028,7 @@ static void perf_pmu_cancel_txn(struct pmu *pmu) int perf_pmu_register(struct pmu *pmu) { - int ret; + int cpu, ret; mutex_lock(&pmus_lock); ret = -ENOMEM; @@ -5021,6 +5036,21 @@ int perf_pmu_register(struct pmu *pmu) if (!pmu->pmu_disable_count) goto unlock; + pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); + if (!pmu->pmu_cpu_context) + goto free_pdc; + + for_each_possible_cpu(cpu) { + struct perf_cpu_context *cpuctx; + + cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); + __perf_event_init_context(&cpuctx->ctx, NULL); + cpuctx->ctx.pmu = pmu; + cpuctx->timer_interval = TICK_NSEC; + hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cpuctx->timer.function = perf_event_context_tick; + } + if (!pmu->start_txn) { if (pmu->pmu_enable) { /* @@ -5049,6 +5079,10 @@ unlock: mutex_unlock(&pmus_lock); return ret; + +free_pdc: + free_percpu(pmu->pmu_disable_count); + goto unlock; } void perf_pmu_unregister(struct pmu *pmu) @@ -5057,9 +5091,14 @@ void perf_pmu_unregister(struct pmu *pmu) list_del_rcu(&pmu->entry); mutex_unlock(&pmus_lock); + /* + * We use the pmu list either under SRCU or preempt_disable, + * synchronize_srcu() implies synchronize_sched() so we're good. + */ synchronize_srcu(&pmus_srcu); free_percpu(pmu->pmu_disable_count); + free_percpu(pmu->pmu_cpu_context); } struct pmu *perf_init_event(struct perf_event *event) @@ -5374,7 +5413,7 @@ SYSCALL_DEFINE5(perf_event_open, /* * Get the target context (task or percpu): */ - ctx = find_get_context(pid, cpu); + ctx = find_get_context(event->pmu, pid, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err_alloc; @@ -5489,7 +5528,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, goto err; } - ctx = find_get_context(pid, cpu); + ctx = find_get_context(event->pmu, pid, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err_free; @@ -5833,6 +5872,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, return -ENOMEM; __perf_event_init_context(child_ctx, child); + child_ctx->pmu = event->pmu; child->perf_event_ctxp = child_ctx; get_task_struct(child); } @@ -5935,30 +5975,18 @@ int perf_event_init_task(struct task_struct *child) static void __init perf_event_init_all_cpus(void) { - struct perf_cpu_context *cpuctx; struct swevent_htable *swhash; int cpu; for_each_possible_cpu(cpu) { swhash = &per_cpu(swevent_htable, cpu); mutex_init(&swhash->hlist_mutex); - - cpuctx = &per_cpu(perf_cpu_context, cpu); - __perf_event_init_context(&cpuctx->ctx, NULL); - cpuctx->timer_interval = TICK_NSEC; - hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - cpuctx->timer.function = perf_event_context_tick; } } static void __cpuinit perf_event_init_cpu(int cpu) { - struct perf_cpu_context *cpuctx; - struct swevent_htable *swhash; - - cpuctx = &per_cpu(perf_cpu_context, cpu); - - swhash = &per_cpu(swevent_htable, cpu); + struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); mutex_lock(&swhash->hlist_mutex); if (swhash->hlist_refcount > 0) { @@ -5972,32 +6000,46 @@ static void __cpuinit perf_event_init_cpu(int cpu) } #ifdef CONFIG_HOTPLUG_CPU -static void __perf_event_exit_cpu(void *info) +static void __perf_event_exit_context(void *__info) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); - struct perf_event_context *ctx = &cpuctx->ctx; + struct perf_event_context *ctx = __info; struct perf_event *event, *tmp; - perf_pmu_rotate_stop(); + perf_pmu_rotate_stop(ctx->pmu); list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) __perf_event_remove_from_context(event); list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) __perf_event_remove_from_context(event); } + +static void perf_event_exit_cpu_context(int cpu) +{ + struct perf_event_context *ctx; + struct pmu *pmu; + int idx; + + idx = srcu_read_lock(&pmus_srcu); + list_for_each_entry_rcu(pmu, &pmus, entry) { + ctx = &this_cpu_ptr(pmu->pmu_cpu_context)->ctx; + + mutex_lock(&ctx->mutex); + smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); + mutex_unlock(&ctx->mutex); + } + srcu_read_unlock(&pmus_srcu, idx); + +} + static void perf_event_exit_cpu(int cpu) { - struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); - struct perf_event_context *ctx = &cpuctx->ctx; mutex_lock(&swhash->hlist_mutex); swevent_hlist_release(swhash); mutex_unlock(&swhash->hlist_mutex); - mutex_lock(&ctx->mutex); - smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1); - mutex_unlock(&ctx->mutex); + perf_event_exit_cpu_context(cpu); } #else static inline void perf_event_exit_cpu(int cpu) { } -- cgit v1.2.3-70-g09d2 From 97dee4f3206622f31396dede2b5ddb8670458f56 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 7 Sep 2010 15:35:33 +0200 Subject: perf: Move some code around Move all inherit code near each other. Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Frederic Weisbecker Cc: Lin Ming Cc: Yanmin LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 200 ++++++++++++++++++++++++++-------------------------- 1 file changed, 100 insertions(+), 100 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 8ca6e690ffe..dae0e2f3029 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5556,106 +5556,6 @@ err: } EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); -/* - * inherit a event from parent task to child task: - */ -static struct perf_event * -inherit_event(struct perf_event *parent_event, - struct task_struct *parent, - struct perf_event_context *parent_ctx, - struct task_struct *child, - struct perf_event *group_leader, - struct perf_event_context *child_ctx) -{ - struct perf_event *child_event; - - /* - * Instead of creating recursive hierarchies of events, - * we link inherited events back to the original parent, - * which has a filp for sure, which we use as the reference - * count: - */ - if (parent_event->parent) - parent_event = parent_event->parent; - - child_event = perf_event_alloc(&parent_event->attr, - parent_event->cpu, - group_leader, parent_event, - NULL); - if (IS_ERR(child_event)) - return child_event; - get_ctx(child_ctx); - - /* - * Make the child state follow the state of the parent event, - * not its attr.disabled bit. We hold the parent's mutex, - * so we won't race with perf_event_{en, dis}able_family. - */ - if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) - child_event->state = PERF_EVENT_STATE_INACTIVE; - else - child_event->state = PERF_EVENT_STATE_OFF; - - if (parent_event->attr.freq) { - u64 sample_period = parent_event->hw.sample_period; - struct hw_perf_event *hwc = &child_event->hw; - - hwc->sample_period = sample_period; - hwc->last_period = sample_period; - - local64_set(&hwc->period_left, sample_period); - } - - child_event->ctx = child_ctx; - child_event->overflow_handler = parent_event->overflow_handler; - - /* - * Link it up in the child's context: - */ - add_event_to_ctx(child_event, child_ctx); - - /* - * Get a reference to the parent filp - we will fput it - * when the child event exits. This is safe to do because - * we are in the parent and we know that the filp still - * exists and has a nonzero count: - */ - atomic_long_inc(&parent_event->filp->f_count); - - /* - * Link this into the parent event's child list - */ - WARN_ON_ONCE(parent_event->ctx->parent_ctx); - mutex_lock(&parent_event->child_mutex); - list_add_tail(&child_event->child_list, &parent_event->child_list); - mutex_unlock(&parent_event->child_mutex); - - return child_event; -} - -static int inherit_group(struct perf_event *parent_event, - struct task_struct *parent, - struct perf_event_context *parent_ctx, - struct task_struct *child, - struct perf_event_context *child_ctx) -{ - struct perf_event *leader; - struct perf_event *sub; - struct perf_event *child_ctr; - - leader = inherit_event(parent_event, parent, parent_ctx, - child, NULL, child_ctx); - if (IS_ERR(leader)) - return PTR_ERR(leader); - list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { - child_ctr = inherit_event(sub, parent, parent_ctx, - child, leader, child_ctx); - if (IS_ERR(child_ctr)) - return PTR_ERR(child_ctr); - } - return 0; -} - static void sync_child_event(struct perf_event *child_event, struct task_struct *child) { @@ -5844,6 +5744,106 @@ again: put_ctx(ctx); } +/* + * inherit a event from parent task to child task: + */ +static struct perf_event * +inherit_event(struct perf_event *parent_event, + struct task_struct *parent, + struct perf_event_context *parent_ctx, + struct task_struct *child, + struct perf_event *group_leader, + struct perf_event_context *child_ctx) +{ + struct perf_event *child_event; + + /* + * Instead of creating recursive hierarchies of events, + * we link inherited events back to the original parent, + * which has a filp for sure, which we use as the reference + * count: + */ + if (parent_event->parent) + parent_event = parent_event->parent; + + child_event = perf_event_alloc(&parent_event->attr, + parent_event->cpu, + group_leader, parent_event, + NULL); + if (IS_ERR(child_event)) + return child_event; + get_ctx(child_ctx); + + /* + * Make the child state follow the state of the parent event, + * not its attr.disabled bit. We hold the parent's mutex, + * so we won't race with perf_event_{en, dis}able_family. + */ + if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) + child_event->state = PERF_EVENT_STATE_INACTIVE; + else + child_event->state = PERF_EVENT_STATE_OFF; + + if (parent_event->attr.freq) { + u64 sample_period = parent_event->hw.sample_period; + struct hw_perf_event *hwc = &child_event->hw; + + hwc->sample_period = sample_period; + hwc->last_period = sample_period; + + local64_set(&hwc->period_left, sample_period); + } + + child_event->ctx = child_ctx; + child_event->overflow_handler = parent_event->overflow_handler; + + /* + * Link it up in the child's context: + */ + add_event_to_ctx(child_event, child_ctx); + + /* + * Get a reference to the parent filp - we will fput it + * when the child event exits. This is safe to do because + * we are in the parent and we know that the filp still + * exists and has a nonzero count: + */ + atomic_long_inc(&parent_event->filp->f_count); + + /* + * Link this into the parent event's child list + */ + WARN_ON_ONCE(parent_event->ctx->parent_ctx); + mutex_lock(&parent_event->child_mutex); + list_add_tail(&child_event->child_list, &parent_event->child_list); + mutex_unlock(&parent_event->child_mutex); + + return child_event; +} + +static int inherit_group(struct perf_event *parent_event, + struct task_struct *parent, + struct perf_event_context *parent_ctx, + struct task_struct *child, + struct perf_event_context *child_ctx) +{ + struct perf_event *leader; + struct perf_event *sub; + struct perf_event *child_ctr; + + leader = inherit_event(parent_event, parent, parent_ctx, + child, NULL, child_ctx); + if (IS_ERR(leader)) + return PTR_ERR(leader); + list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { + child_ctr = inherit_event(sub, parent, parent_ctx, + child, leader, child_ctx); + if (IS_ERR(child_ctr)) + return PTR_ERR(child_ctr); + } + return 0; +} + static int inherit_task_group(struct perf_event *event, struct task_struct *parent, struct perf_event_context *parent_ctx, -- cgit v1.2.3-70-g09d2 From eb184479874238393ac186c4e054d24311c34aaa Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 7 Sep 2010 15:55:13 +0200 Subject: perf: Clean up perf_event_context allocation Unify the two perf_event_context allocation sites. Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Frederic Weisbecker Cc: Lin Ming Cc: Yanmin LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 41 ++++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index dae0e2f3029..13d98d75634 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1959,9 +1959,7 @@ exit_put: /* * Initialize the perf_event context in a task_struct: */ -static void -__perf_event_init_context(struct perf_event_context *ctx, - struct task_struct *task) +static void __perf_event_init_context(struct perf_event_context *ctx) { raw_spin_lock_init(&ctx->lock); mutex_init(&ctx->mutex); @@ -1969,7 +1967,25 @@ __perf_event_init_context(struct perf_event_context *ctx, INIT_LIST_HEAD(&ctx->flexible_groups); INIT_LIST_HEAD(&ctx->event_list); atomic_set(&ctx->refcount, 1); - ctx->task = task; +} + +static struct perf_event_context * +alloc_perf_context(struct pmu *pmu, struct task_struct *task) +{ + struct perf_event_context *ctx; + + ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); + if (!ctx) + return NULL; + + __perf_event_init_context(ctx); + if (task) { + ctx->task = task; + get_task_struct(task); + } + ctx->pmu = pmu; + + return ctx; } static struct perf_event_context * @@ -2036,22 +2052,22 @@ retry: } if (!ctx) { - ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); + ctx = alloc_perf_context(pmu, task); err = -ENOMEM; if (!ctx) goto errout; - __perf_event_init_context(ctx, task); - ctx->pmu = pmu; + get_ctx(ctx); + if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) { /* * We raced with some other task; use * the context they set. */ + put_task_struct(task); kfree(ctx); goto retry; } - get_task_struct(task); } put_task_struct(task); @@ -5044,7 +5060,7 @@ int perf_pmu_register(struct pmu *pmu) struct perf_cpu_context *cpuctx; cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); - __perf_event_init_context(&cpuctx->ctx, NULL); + __perf_event_init_context(&cpuctx->ctx); cpuctx->ctx.pmu = pmu; cpuctx->timer_interval = TICK_NSEC; hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); @@ -5866,15 +5882,11 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, * child. */ - child_ctx = kzalloc(sizeof(struct perf_event_context), - GFP_KERNEL); + child_ctx = alloc_perf_context(event->pmu, child); if (!child_ctx) return -ENOMEM; - __perf_event_init_context(child_ctx, child); - child_ctx->pmu = event->pmu; child->perf_event_ctxp = child_ctx; - get_task_struct(child); } ret = inherit_group(event, parent, parent_ctx, @@ -5886,7 +5898,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, return ret; } - /* * Initialize the perf_event context in task_struct */ -- cgit v1.2.3-70-g09d2 From 8dc85d547285668e509f86c177bcd4ea055bcaaf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 2 Sep 2010 16:50:03 +0200 Subject: perf: Multiple task contexts Provide the infrastructure for multiple task contexts. A more flexible approach would have resulted in more pointer chases in the scheduling hot-paths. This approach has the limitation of a static number of task contexts. Since I expect most external PMUs to be system wide, or at least node wide (as per the intel uncore unit) they won't actually need a task context. Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Frederic Weisbecker Cc: Lin Ming Cc: Yanmin LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 1 + include/linux/sched.h | 8 +- kernel/perf_event.c | 336 +++++++++++++++++++++++++++++++-------------- 3 files changed, 239 insertions(+), 106 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 22155ef3b36..9ecfd856ce6 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -572,6 +572,7 @@ struct pmu { int * __percpu pmu_disable_count; struct perf_cpu_context * __percpu pmu_cpu_context; + int task_ctx_nr; /* * Fully disable/enable this PMU, can be used to protect from the PMI diff --git a/include/linux/sched.h b/include/linux/sched.h index 1e2a6db2d7d..89d6023c6f8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1160,6 +1160,12 @@ struct sched_rt_entity { struct rcu_node; +enum perf_event_task_context { + perf_invalid_context = -1, + perf_hw_context = 0, + perf_nr_task_contexts, +}; + struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ void *stack; @@ -1431,7 +1437,7 @@ struct task_struct { struct futex_pi_state *pi_state_cache; #endif #ifdef CONFIG_PERF_EVENTS - struct perf_event_context *perf_event_ctxp; + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; struct mutex perf_event_mutex; struct list_head perf_event_list; #endif diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 13d98d75634..7223ea87586 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -148,13 +148,13 @@ static u64 primary_event_id(struct perf_event *event) * the context could get moved to another task. */ static struct perf_event_context * -perf_lock_task_context(struct task_struct *task, unsigned long *flags) +perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) { struct perf_event_context *ctx; rcu_read_lock(); retry: - ctx = rcu_dereference(task->perf_event_ctxp); + ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); if (ctx) { /* * If this context is a clone of another, it might @@ -167,7 +167,7 @@ retry: * can't get swapped on us any more. */ raw_spin_lock_irqsave(&ctx->lock, *flags); - if (ctx != rcu_dereference(task->perf_event_ctxp)) { + if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { raw_spin_unlock_irqrestore(&ctx->lock, *flags); goto retry; } @@ -186,12 +186,13 @@ retry: * can't get swapped to another task. This also increments its * reference count so that the context can't get freed. */ -static struct perf_event_context *perf_pin_task_context(struct task_struct *task) +static struct perf_event_context * +perf_pin_task_context(struct task_struct *task, int ctxn) { struct perf_event_context *ctx; unsigned long flags; - ctx = perf_lock_task_context(task, &flags); + ctx = perf_lock_task_context(task, ctxn, &flags); if (ctx) { ++ctx->pin_count; raw_spin_unlock_irqrestore(&ctx->lock, flags); @@ -1179,28 +1180,15 @@ static void perf_event_sync_stat(struct perf_event_context *ctx, } } -/* - * Called from scheduler to remove the events of the current task, - * with interrupts disabled. - * - * We stop each event and update the event value in event->count. - * - * This does not protect us against NMI, but disable() - * sets the disabled bit in the control field of event _before_ - * accessing the event control register. If a NMI hits, then it will - * not restart the event. - */ -void perf_event_task_sched_out(struct task_struct *task, - struct task_struct *next) +void perf_event_context_sched_out(struct task_struct *task, int ctxn, + struct task_struct *next) { - struct perf_event_context *ctx = task->perf_event_ctxp; + struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; struct perf_event_context *next_ctx; struct perf_event_context *parent; struct perf_cpu_context *cpuctx; int do_switch = 1; - perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); - if (likely(!ctx)) return; @@ -1210,7 +1198,7 @@ void perf_event_task_sched_out(struct task_struct *task, rcu_read_lock(); parent = rcu_dereference(ctx->parent_ctx); - next_ctx = next->perf_event_ctxp; + next_ctx = next->perf_event_ctxp[ctxn]; if (parent && next_ctx && rcu_dereference(next_ctx->parent_ctx) == parent) { /* @@ -1229,8 +1217,8 @@ void perf_event_task_sched_out(struct task_struct *task, * XXX do we need a memory barrier of sorts * wrt to rcu_dereference() of perf_event_ctxp */ - task->perf_event_ctxp = next_ctx; - next->perf_event_ctxp = ctx; + task->perf_event_ctxp[ctxn] = next_ctx; + next->perf_event_ctxp[ctxn] = ctx; ctx->task = next; next_ctx->task = task; do_switch = 0; @@ -1248,6 +1236,31 @@ void perf_event_task_sched_out(struct task_struct *task, } } +#define for_each_task_context_nr(ctxn) \ + for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) + +/* + * Called from scheduler to remove the events of the current task, + * with interrupts disabled. + * + * We stop each event and update the event value in event->count. + * + * This does not protect us against NMI, but disable() + * sets the disabled bit in the control field of event _before_ + * accessing the event control register. If a NMI hits, then it will + * not restart the event. + */ +void perf_event_task_sched_out(struct task_struct *task, + struct task_struct *next) +{ + int ctxn; + + perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); + + for_each_task_context_nr(ctxn) + perf_event_context_sched_out(task, ctxn, next); +} + static void task_ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type) { @@ -1366,38 +1379,23 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, ctx_sched_in(ctx, cpuctx, event_type); } -static void task_ctx_sched_in(struct task_struct *task, +static void task_ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type) { - struct perf_event_context *ctx = task->perf_event_ctxp; - struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); + struct perf_cpu_context *cpuctx; - if (likely(!ctx)) - return; + cpuctx = __get_cpu_context(ctx); if (cpuctx->task_ctx == ctx) return; + ctx_sched_in(ctx, cpuctx, event_type); cpuctx->task_ctx = ctx; } -/* - * Called from scheduler to add the events of the current task - * with interrupts disabled. - * - * We restore the event value and then enable it. - * - * This does not protect us against NMI, but enable() - * sets the enabled bit in the control field of event _before_ - * accessing the event control register. If a NMI hits, then it will - * keep the event running. - */ -void perf_event_task_sched_in(struct task_struct *task) + +void perf_event_context_sched_in(struct perf_event_context *ctx) { - struct perf_event_context *ctx = task->perf_event_ctxp; struct perf_cpu_context *cpuctx; - if (likely(!ctx)) - return; - cpuctx = __get_cpu_context(ctx); if (cpuctx->task_ctx == ctx) return; @@ -1422,6 +1420,31 @@ void perf_event_task_sched_in(struct task_struct *task) perf_pmu_rotate_start(ctx->pmu); } +/* + * Called from scheduler to add the events of the current task + * with interrupts disabled. + * + * We restore the event value and then enable it. + * + * This does not protect us against NMI, but enable() + * sets the enabled bit in the control field of event _before_ + * accessing the event control register. If a NMI hits, then it will + * keep the event running. + */ +void perf_event_task_sched_in(struct task_struct *task) +{ + struct perf_event_context *ctx; + int ctxn; + + for_each_task_context_nr(ctxn) { + ctx = task->perf_event_ctxp[ctxn]; + if (likely(!ctx)) + continue; + + perf_event_context_sched_in(ctx); + } +} + #define MAX_INTERRUPTS (~0ULL) static void perf_log_throttle(struct perf_event *event, int enable); @@ -1588,7 +1611,7 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) { enum hrtimer_restart restart = HRTIMER_NORESTART; struct perf_cpu_context *cpuctx; - struct perf_event_context *ctx; + struct perf_event_context *ctx = NULL; int rotate = 0; cpuctx = container_of(timer, struct perf_cpu_context, timer); @@ -1599,7 +1622,7 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) rotate = 1; } - ctx = current->perf_event_ctxp; + ctx = cpuctx->task_ctx; if (ctx && ctx->nr_events) { restart = HRTIMER_RESTART; if (ctx->nr_events != ctx->nr_active) @@ -1623,7 +1646,7 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); if (ctx) - task_ctx_sched_in(current, EVENT_FLEXIBLE); + task_ctx_sched_in(ctx, EVENT_FLEXIBLE); done: hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval)); @@ -1650,20 +1673,18 @@ static int event_enable_on_exec(struct perf_event *event, * Enable all of a task's events that have been marked enable-on-exec. * This expects task == current. */ -static void perf_event_enable_on_exec(struct task_struct *task) +static void perf_event_enable_on_exec(struct perf_event_context *ctx) { - struct perf_event_context *ctx; struct perf_event *event; unsigned long flags; int enabled = 0; int ret; local_irq_save(flags); - ctx = task->perf_event_ctxp; if (!ctx || !ctx->nr_events) goto out; - __perf_event_task_sched_out(ctx); + task_ctx_sched_out(ctx, EVENT_ALL); raw_spin_lock(&ctx->lock); @@ -1687,7 +1708,7 @@ static void perf_event_enable_on_exec(struct task_struct *task) raw_spin_unlock(&ctx->lock); - perf_event_task_sched_in(task); + perf_event_context_sched_in(ctx); out: local_irq_restore(flags); } @@ -1995,7 +2016,7 @@ find_get_context(struct pmu *pmu, pid_t pid, int cpu) struct perf_cpu_context *cpuctx; struct task_struct *task; unsigned long flags; - int err; + int ctxn, err; if (pid == -1 && cpu != -1) { /* Must be root to operate on a CPU event: */ @@ -2044,8 +2065,13 @@ find_get_context(struct pmu *pmu, pid_t pid, int cpu) if (!ptrace_may_access(task, PTRACE_MODE_READ)) goto errout; + err = -EINVAL; + ctxn = pmu->task_ctx_nr; + if (ctxn < 0) + goto errout; + retry: - ctx = perf_lock_task_context(task, &flags); + ctx = perf_lock_task_context(task, ctxn, &flags); if (ctx) { unclone_ctx(ctx); raw_spin_unlock_irqrestore(&ctx->lock, flags); @@ -2059,7 +2085,7 @@ retry: get_ctx(ctx); - if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) { + if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) { /* * We raced with some other task; use * the context they set. @@ -3773,19 +3799,26 @@ static void perf_event_task_ctx(struct perf_event_context *ctx, static void perf_event_task_event(struct perf_task_event *task_event) { - struct perf_event_context *ctx = task_event->task_ctx; struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx; struct pmu *pmu; + int ctxn; rcu_read_lock_sched(); list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); perf_event_task_ctx(&cpuctx->ctx, task_event); + + ctx = task_event->task_ctx; + if (!ctx) { + ctxn = pmu->task_ctx_nr; + if (ctxn < 0) + continue; + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + } + if (ctx) + perf_event_task_ctx(ctx, task_event); } - if (!ctx) - ctx = rcu_dereference(current->perf_event_ctxp); - if (ctx) - perf_event_task_ctx(ctx, task_event); rcu_read_unlock_sched(); } @@ -3890,9 +3923,10 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) { struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; + char comm[TASK_COMM_LEN]; unsigned int size; struct pmu *pmu; - char comm[TASK_COMM_LEN]; + int ctxn; memset(comm, 0, sizeof(comm)); strlcpy(comm, comm_event->task->comm, sizeof(comm)); @@ -3907,19 +3941,31 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); perf_event_comm_ctx(&cpuctx->ctx, comm_event); + + ctxn = pmu->task_ctx_nr; + if (ctxn < 0) + continue; + + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) + perf_event_comm_ctx(ctx, comm_event); } - ctx = rcu_dereference(current->perf_event_ctxp); - if (ctx) - perf_event_comm_ctx(ctx, comm_event); rcu_read_unlock_sched(); } void perf_event_comm(struct task_struct *task) { struct perf_comm_event comm_event; + struct perf_event_context *ctx; + int ctxn; - if (task->perf_event_ctxp) - perf_event_enable_on_exec(task); + for_each_task_context_nr(ctxn) { + ctx = task->perf_event_ctxp[ctxn]; + if (!ctx) + continue; + + perf_event_enable_on_exec(ctx); + } if (!atomic_read(&nr_comm_events)) return; @@ -4022,6 +4068,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) char *buf = NULL; const char *name; struct pmu *pmu; + int ctxn; memset(tmp, 0, sizeof(tmp)); @@ -4078,10 +4125,17 @@ got_name: cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC); + + ctxn = pmu->task_ctx_nr; + if (ctxn < 0) + continue; + + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) { + perf_event_mmap_ctx(ctx, mmap_event, + vma->vm_flags & VM_EXEC); + } } - ctx = rcu_dereference(current->perf_event_ctxp); - if (ctx) - perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC); rcu_read_unlock_sched(); kfree(buf); @@ -5042,6 +5096,43 @@ static void perf_pmu_cancel_txn(struct pmu *pmu) perf_pmu_enable(pmu); } +/* + * Ensures all contexts with the same task_ctx_nr have the same + * pmu_cpu_context too. + */ +static void *find_pmu_context(int ctxn) +{ + struct pmu *pmu; + + if (ctxn < 0) + return NULL; + + list_for_each_entry(pmu, &pmus, entry) { + if (pmu->task_ctx_nr == ctxn) + return pmu->pmu_cpu_context; + } + + return NULL; +} + +static void free_pmu_context(void * __percpu cpu_context) +{ + struct pmu *pmu; + + mutex_lock(&pmus_lock); + /* + * Like a real lame refcount. + */ + list_for_each_entry(pmu, &pmus, entry) { + if (pmu->pmu_cpu_context == cpu_context) + goto out; + } + + free_percpu(cpu_context); +out: + mutex_unlock(&pmus_lock); +} + int perf_pmu_register(struct pmu *pmu) { int cpu, ret; @@ -5052,6 +5143,10 @@ int perf_pmu_register(struct pmu *pmu) if (!pmu->pmu_disable_count) goto unlock; + pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); + if (pmu->pmu_cpu_context) + goto got_cpu_context; + pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); if (!pmu->pmu_cpu_context) goto free_pdc; @@ -5067,6 +5162,7 @@ int perf_pmu_register(struct pmu *pmu) cpuctx->timer.function = perf_event_context_tick; } +got_cpu_context: if (!pmu->start_txn) { if (pmu->pmu_enable) { /* @@ -5114,7 +5210,7 @@ void perf_pmu_unregister(struct pmu *pmu) synchronize_srcu(&pmus_srcu); free_percpu(pmu->pmu_disable_count); - free_percpu(pmu->pmu_cpu_context); + free_pmu_context(pmu->pmu_cpu_context); } struct pmu *perf_init_event(struct perf_event *event) @@ -5628,16 +5724,13 @@ __perf_event_exit_task(struct perf_event *child_event, } } -/* - * When a child task exits, feed back event values to parent events. - */ -void perf_event_exit_task(struct task_struct *child) +static void perf_event_exit_task_context(struct task_struct *child, int ctxn) { struct perf_event *child_event, *tmp; struct perf_event_context *child_ctx; unsigned long flags; - if (likely(!child->perf_event_ctxp)) { + if (likely(!child->perf_event_ctxp[ctxn])) { perf_event_task(child, NULL, 0); return; } @@ -5649,7 +5742,7 @@ void perf_event_exit_task(struct task_struct *child) * scheduled, so we are now safe from rescheduling changing * our context. */ - child_ctx = child->perf_event_ctxp; + child_ctx = child->perf_event_ctxp[ctxn]; __perf_event_task_sched_out(child_ctx); /* @@ -5658,7 +5751,7 @@ void perf_event_exit_task(struct task_struct *child) * incremented the context's refcount before we do put_ctx below. */ raw_spin_lock(&child_ctx->lock); - child->perf_event_ctxp = NULL; + child->perf_event_ctxp[ctxn] = NULL; /* * If this context is a clone; unclone it so it can't get * swapped to another process while we're removing all @@ -5711,6 +5804,17 @@ again: put_ctx(child_ctx); } +/* + * When a child task exits, feed back event values to parent events. + */ +void perf_event_exit_task(struct task_struct *child) +{ + int ctxn; + + for_each_task_context_nr(ctxn) + perf_event_exit_task_context(child, ctxn); +} + static void perf_free_event(struct perf_event *event, struct perf_event_context *ctx) { @@ -5732,32 +5836,37 @@ static void perf_free_event(struct perf_event *event, /* * free an unexposed, unused context as created by inheritance by - * init_task below, used by fork() in case of fail. + * perf_event_init_task below, used by fork() in case of fail. */ void perf_event_free_task(struct task_struct *task) { - struct perf_event_context *ctx = task->perf_event_ctxp; + struct perf_event_context *ctx; struct perf_event *event, *tmp; + int ctxn; - if (!ctx) - return; + for_each_task_context_nr(ctxn) { + ctx = task->perf_event_ctxp[ctxn]; + if (!ctx) + continue; - mutex_lock(&ctx->mutex); + mutex_lock(&ctx->mutex); again: - list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) - perf_free_event(event, ctx); + list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, + group_entry) + perf_free_event(event, ctx); - list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, - group_entry) - perf_free_event(event, ctx); + list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, + group_entry) + perf_free_event(event, ctx); - if (!list_empty(&ctx->pinned_groups) || - !list_empty(&ctx->flexible_groups)) - goto again; + if (!list_empty(&ctx->pinned_groups) || + !list_empty(&ctx->flexible_groups)) + goto again; - mutex_unlock(&ctx->mutex); + mutex_unlock(&ctx->mutex); - put_ctx(ctx); + put_ctx(ctx); + } } /* @@ -5863,17 +5972,18 @@ static int inherit_group(struct perf_event *parent_event, static int inherit_task_group(struct perf_event *event, struct task_struct *parent, struct perf_event_context *parent_ctx, - struct task_struct *child, + struct task_struct *child, int ctxn, int *inherited_all) { int ret; - struct perf_event_context *child_ctx = child->perf_event_ctxp; + struct perf_event_context *child_ctx; if (!event->attr.inherit) { *inherited_all = 0; return 0; } + child_ctx = child->perf_event_ctxp[ctxn]; if (!child_ctx) { /* * This is executed from the parent task context, so @@ -5886,7 +5996,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, if (!child_ctx) return -ENOMEM; - child->perf_event_ctxp = child_ctx; + child->perf_event_ctxp[ctxn] = child_ctx; } ret = inherit_group(event, parent, parent_ctx, @@ -5901,7 +6011,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, /* * Initialize the perf_event context in task_struct */ -int perf_event_init_task(struct task_struct *child) +int perf_event_init_context(struct task_struct *child, int ctxn) { struct perf_event_context *child_ctx, *parent_ctx; struct perf_event_context *cloned_ctx; @@ -5910,19 +6020,19 @@ int perf_event_init_task(struct task_struct *child) int inherited_all = 1; int ret = 0; - child->perf_event_ctxp = NULL; + child->perf_event_ctxp[ctxn] = NULL; mutex_init(&child->perf_event_mutex); INIT_LIST_HEAD(&child->perf_event_list); - if (likely(!parent->perf_event_ctxp)) + if (likely(!parent->perf_event_ctxp[ctxn])) return 0; /* * If the parent's context is a clone, pin it so it won't get * swapped under us. */ - parent_ctx = perf_pin_task_context(parent); + parent_ctx = perf_pin_task_context(parent, ctxn); /* * No need to check if parent_ctx != NULL here; since we saw @@ -5942,20 +6052,20 @@ int perf_event_init_task(struct task_struct *child) * the list, not manipulating it: */ list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { - ret = inherit_task_group(event, parent, parent_ctx, child, - &inherited_all); + ret = inherit_task_group(event, parent, parent_ctx, + child, ctxn, &inherited_all); if (ret) break; } list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { - ret = inherit_task_group(event, parent, parent_ctx, child, - &inherited_all); + ret = inherit_task_group(event, parent, parent_ctx, + child, ctxn, &inherited_all); if (ret) break; } - child_ctx = child->perf_event_ctxp; + child_ctx = child->perf_event_ctxp[ctxn]; if (child_ctx && inherited_all) { /* @@ -5984,6 +6094,22 @@ int perf_event_init_task(struct task_struct *child) return ret; } +/* + * Initialize the perf_event context in task_struct + */ +int perf_event_init_task(struct task_struct *child) +{ + int ctxn, ret; + + for_each_task_context_nr(ctxn) { + ret = perf_event_init_context(child, ctxn); + if (ret) + return ret; + } + + return 0; +} + static void __init perf_event_init_all_cpus(void) { struct swevent_htable *swhash; -- cgit v1.2.3-70-g09d2 From 89a1e18731959e9953fae15ddc1a983eb15a4f19 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 7 Sep 2010 17:34:50 +0200 Subject: perf: Provide a separate task context for swevents Since software events are always schedulable, mixing them up with hardware events (who are not) can lead to funny scheduling oddities. Giving them their own context solves this. Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Frederic Weisbecker Cc: Lin Ming Cc: Yanmin LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 9 +-------- include/linux/sched.h | 1 + kernel/hw_breakpoint.c | 2 ++ kernel/perf_event.c | 40 +++++++++++++++++++++++++++++----------- 4 files changed, 33 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9ecfd856ce6..c1173520f14 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -952,14 +952,7 @@ extern int perf_event_overflow(struct perf_event *event, int nmi, */ static inline int is_software_event(struct perf_event *event) { - switch (event->attr.type) { - case PERF_TYPE_SOFTWARE: - case PERF_TYPE_TRACEPOINT: - /* for now the breakpoint stuff also works as software event */ - case PERF_TYPE_BREAKPOINT: - return 1; - } - return 0; + return event->pmu->task_ctx_nr == perf_sw_context; } extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; diff --git a/include/linux/sched.h b/include/linux/sched.h index 89d6023c6f8..eb3c1ceec06 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1163,6 +1163,7 @@ struct rcu_node; enum perf_event_task_context { perf_invalid_context = -1, perf_hw_context = 0, + perf_sw_context, perf_nr_task_contexts, }; diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 6f150095caf..3b2aaffb65f 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -610,6 +610,8 @@ static void hw_breakpoint_stop(struct perf_event *bp, int flags) } static struct pmu perf_breakpoint = { + .task_ctx_nr = perf_sw_context, /* could eventually get its own */ + .event_init = hw_breakpoint_event_init, .add = hw_breakpoint_add, .del = hw_breakpoint_del, diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 7223ea87586..357ee8d5e8a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4709,6 +4709,8 @@ static int perf_swevent_init(struct perf_event *event) } static struct pmu perf_swevent = { + .task_ctx_nr = perf_sw_context, + .event_init = perf_swevent_init, .add = perf_swevent_add, .del = perf_swevent_del, @@ -4800,6 +4802,8 @@ static int perf_tp_event_init(struct perf_event *event) } static struct pmu perf_tracepoint = { + .task_ctx_nr = perf_sw_context, + .event_init = perf_tp_event_init, .add = perf_trace_add, .del = perf_trace_del, @@ -4988,6 +4992,8 @@ static int cpu_clock_event_init(struct perf_event *event) } static struct pmu perf_cpu_clock = { + .task_ctx_nr = perf_sw_context, + .event_init = cpu_clock_event_init, .add = cpu_clock_event_add, .del = cpu_clock_event_del, @@ -5063,6 +5069,8 @@ static int task_clock_event_init(struct perf_event *event) } static struct pmu perf_task_clock = { + .task_ctx_nr = perf_sw_context, + .event_init = task_clock_event_init, .add = task_clock_event_add, .del = task_clock_event_del, @@ -5490,6 +5498,7 @@ SYSCALL_DEFINE5(perf_event_open, struct perf_event_context *ctx; struct file *event_file = NULL; struct file *group_file = NULL; + struct pmu *pmu; int event_fd; int fput_needed = 0; int err; @@ -5522,20 +5531,11 @@ SYSCALL_DEFINE5(perf_event_open, goto err_fd; } - /* - * Get the target context (task or percpu): - */ - ctx = find_get_context(event->pmu, pid, cpu); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto err_alloc; - } - if (group_fd != -1) { group_leader = perf_fget_light(group_fd, &fput_needed); if (IS_ERR(group_leader)) { err = PTR_ERR(group_leader); - goto err_context; + goto err_alloc; } group_file = group_leader->filp; if (flags & PERF_FLAG_FD_OUTPUT) @@ -5544,6 +5544,23 @@ SYSCALL_DEFINE5(perf_event_open, group_leader = NULL; } + /* + * Special case software events and allow them to be part of + * any hardware group. + */ + pmu = event->pmu; + if ((pmu->task_ctx_nr == perf_sw_context) && group_leader) + pmu = group_leader->pmu; + + /* + * Get the target context (task or percpu): + */ + ctx = find_get_context(pmu, pid, cpu); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto err_group_fd; + } + /* * Look up the group leader (we will attach this event to it): */ @@ -5605,8 +5622,9 @@ SYSCALL_DEFINE5(perf_event_open, return event_fd; err_context: - fput_light(group_file, fput_needed); put_ctx(ctx); +err_group_fd: + fput_light(group_file, fput_needed); err_alloc: free_event(event); err_fd: -- cgit v1.2.3-70-g09d2 From 1b9a644fece117cfa5474a2388d6b89d1baf8ddf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 7 Sep 2010 18:32:22 +0200 Subject: perf: Optimize context ops Assuming we don't mix events of different pmus onto a single context (with the exeption of software events inside a hardware group) we can now assume that all events on a particular context belong to the same pmu, hence we can disable the pmu for the entire context operations. This reduces the amount of hardware writes. The exception for swevents comes from the fact that the sw pmu disable is a nop. Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Frederic Weisbecker Cc: Lin Ming Cc: Yanmin LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 357ee8d5e8a..9819a69a61a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1065,6 +1065,7 @@ static void ctx_sched_out(struct perf_event_context *ctx, struct perf_event *event; raw_spin_lock(&ctx->lock); + perf_pmu_disable(ctx->pmu); ctx->is_active = 0; if (likely(!ctx->nr_events)) goto out; @@ -1083,6 +1084,7 @@ static void ctx_sched_out(struct perf_event_context *ctx, group_sched_out(event, cpuctx, ctx); } out: + perf_pmu_enable(ctx->pmu); raw_spin_unlock(&ctx->lock); } @@ -1400,6 +1402,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx) if (cpuctx->task_ctx == ctx) return; + perf_pmu_disable(ctx->pmu); /* * We want to keep the following priority order: * cpu pinned (that don't need to move), task pinned, @@ -1418,6 +1421,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx) * cpu-context we got scheduled on is actually rotating. */ perf_pmu_rotate_start(ctx->pmu); + perf_pmu_enable(ctx->pmu); } /* @@ -1629,6 +1633,7 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) rotate = 1; } + perf_pmu_disable(cpuctx->ctx.pmu); perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval); if (ctx) perf_ctx_adjust_freq(ctx, cpuctx->timer_interval); @@ -1649,6 +1654,7 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) task_ctx_sched_in(ctx, EVENT_FLEXIBLE); done: + perf_pmu_enable(cpuctx->ctx.pmu); hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval)); return restart; -- cgit v1.2.3-70-g09d2 From 8af3c153baf95374eff20a37f00c59a295b52756 Mon Sep 17 00:00:00 2001 From: Miroslav Lichvar Date: Tue, 7 Sep 2010 16:43:46 +0200 Subject: ntp: Clamp PLL update interval Clamp update interval to reduce PLL gain with low sampling rate (e.g. intermittent network connection) to avoid instability. The clamp roughly corresponds to the loop time constant, it's 8 * poll interval for SHIFT_PLL 2 and 32 * poll interval for SHIFT_PLL 4. This gives good results without affecting the gain in normal conditions where ntpd skips only up to seven consecutive samples. Signed-off-by: Miroslav Lichvar Acked-by: john stultz LKML-Reference: <1283870626-9472-1-git-send-email-mlichvar@redhat.com> Signed-off-by: Thomas Gleixner --- kernel/time/ntp.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index c63116863a8..d2321891538 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -149,10 +149,18 @@ static void ntp_update_offset(long offset) time_reftime = get_seconds(); offset64 = offset; - freq_adj = (offset64 * secs) << - (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant)); + freq_adj = ntp_update_offset_fll(offset64, secs); - freq_adj += ntp_update_offset_fll(offset64, secs); + /* + * Clamp update interval to reduce PLL gain with low + * sampling rate (e.g. intermittent network connection) + * to avoid instability. + */ + if (unlikely(secs > 1 << (SHIFT_PLL + 1 + time_constant))) + secs = 1 << (SHIFT_PLL + 1 + time_constant); + + freq_adj += (offset64 * secs) << + (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant)); freq_adj = min(freq_adj + time_freq, MAXFREQ_SCALED); -- cgit v1.2.3-70-g09d2 From 4e231c7962ce711c7d8c2a4dc23ecd1e8fc28363 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 9 Sep 2010 21:01:59 +0200 Subject: perf: Fix up delayed_put_task_struct() I missed a perf_event_ctxp user when converting it to an array. Pull this last user into perf_event.c as well and fix it up. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 2 ++ kernel/exit.c | 4 +--- kernel/perf_event.c | 8 ++++++++ 3 files changed, 11 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index c1173520f14..93bf53aa50e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -889,6 +889,7 @@ extern void perf_event_task_sched_out(struct task_struct *task, struct task_stru extern int perf_event_init_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); +extern void perf_event_delayed_put(struct task_struct *task); extern void set_perf_event_pending(void); extern void perf_event_do_pending(void); extern void perf_event_print_debug(void); @@ -1067,6 +1068,7 @@ perf_event_task_sched_out(struct task_struct *task, static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } +static inline void perf_event_delayed_put(struct task_struct *task) { } static inline void perf_event_do_pending(void) { } static inline void perf_event_print_debug(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } diff --git a/kernel/exit.c b/kernel/exit.c index 03120229db2..e2bdf37f9fd 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -149,9 +149,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp) { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); -#ifdef CONFIG_PERF_EVENTS - WARN_ON_ONCE(tsk->perf_event_ctxp); -#endif + perf_event_delayed_put(tsk); trace_sched_process_free(tsk); put_task_struct(tsk); } diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9819a69a61a..eaf1c5de6dc 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5893,6 +5893,14 @@ again: } } +void perf_event_delayed_put(struct task_struct *task) +{ + int ctxn; + + for_each_task_context_nr(ctxn) + WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); +} + /* * inherit a event from parent task to child task: */ -- cgit v1.2.3-70-g09d2 From cee010ec5211b96f33c5c2208f5c14ebb04b634a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 10 Sep 2010 12:51:54 +0200 Subject: perf: Ensure we call add_event_to_ctx() with the right locks held Even though we call it from the inherit path, where the child is not yet accessible, we need to hold ctx->lock, add_event_to_ctx() assumes IRQs are disabled. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index eaf1c5de6dc..f395fb4d9b7 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5913,6 +5913,7 @@ inherit_event(struct perf_event *parent_event, struct perf_event_context *child_ctx) { struct perf_event *child_event; + unsigned long flags; /* * Instead of creating recursive hierarchies of events, @@ -5957,7 +5958,9 @@ inherit_event(struct perf_event *parent_event, /* * Link it up in the child's context: */ + raw_spin_lock_irqsave(&child_ctx->lock, flags); add_event_to_ctx(child_event, child_ctx); + raw_spin_unlock_irqrestore(&child_ctx->lock, flags); /* * Get a reference to the parent filp - we will fput it -- cgit v1.2.3-70-g09d2 From e5f4d3394a52ac351f1a479fe136d92fa5228eff Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 10 Sep 2010 17:38:06 +0200 Subject: perf: Fix perf_init_event() We ought to return -ENOENT when non of the registered PMUs recognise the requested event. This fixes a boot crash that occurs if no PMU is available but the NMI watchdog tries to register an event. Reported-by: Ingo Molnar Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index f395fb4d9b7..f29b52576ec 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5236,12 +5236,15 @@ struct pmu *perf_init_event(struct perf_event *event) list_for_each_entry_rcu(pmu, &pmus, entry) { int ret = pmu->event_init(event); if (!ret) - break; + goto unlock; + if (ret != -ENOENT) { pmu = ERR_PTR(ret); - break; + goto unlock; } } + pmu = ERR_PTR(-ENOENT); +unlock: srcu_read_unlock(&pmus_srcu, idx); return pmu; -- cgit v1.2.3-70-g09d2 From cde8e88498c8de69271fcb6d4dd974979368fa67 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 13 Sep 2010 11:06:55 +0200 Subject: perf: Sanitize the RCU logic Simplify things and simply synchronize against two RCU variants for PMU unregister -- we don't care about performance, its module unload if anything. Reported-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra Cc: Paul E. McKenney LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index f29b52576ec..bc46bff6962 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3810,7 +3810,7 @@ static void perf_event_task_event(struct perf_task_event *task_event) struct pmu *pmu; int ctxn; - rcu_read_lock_sched(); + rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); perf_event_task_ctx(&cpuctx->ctx, task_event); @@ -3825,7 +3825,7 @@ static void perf_event_task_event(struct perf_task_event *task_event) if (ctx) perf_event_task_ctx(ctx, task_event); } - rcu_read_unlock_sched(); + rcu_read_unlock(); } static void perf_event_task(struct task_struct *task, @@ -3943,7 +3943,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; - rcu_read_lock_sched(); + rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); perf_event_comm_ctx(&cpuctx->ctx, comm_event); @@ -3956,7 +3956,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) if (ctx) perf_event_comm_ctx(ctx, comm_event); } - rcu_read_unlock_sched(); + rcu_read_unlock(); } void perf_event_comm(struct task_struct *task) @@ -4126,7 +4126,7 @@ got_name: mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; - rcu_read_lock_sched(); + rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, @@ -4142,7 +4142,7 @@ got_name: vma->vm_flags & VM_EXEC); } } - rcu_read_unlock_sched(); + rcu_read_unlock(); kfree(buf); } @@ -5218,10 +5218,11 @@ void perf_pmu_unregister(struct pmu *pmu) mutex_unlock(&pmus_lock); /* - * We use the pmu list either under SRCU or preempt_disable, - * synchronize_srcu() implies synchronize_sched() so we're good. + * We dereference the pmu list under both SRCU and regular RCU, so + * synchronize against both of those. */ synchronize_srcu(&pmus_srcu); + synchronize_rcu(); free_percpu(pmu->pmu_disable_count); free_pmu_context(pmu->pmu_cpu_context); -- cgit v1.2.3-70-g09d2 From 0c67b40872326a5340cab51d79a192a5fbaeb484 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 13 Sep 2010 11:15:58 +0200 Subject: perf: Fix free_event() With the context rework stuff we can actually end up freeing an event before it gets attached to a context. Reported-by: Cyrill Gorcunov Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index bc46bff6962..440f9ca067b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2150,7 +2150,9 @@ static void free_event(struct perf_event *event) if (event->destroy) event->destroy(event); - put_ctx(event->ctx); + if (event->ctx) + put_ctx(event->ctx); + call_rcu(&event->rcu_head, free_event_rcu); } -- cgit v1.2.3-70-g09d2 From 7740191cd909b75d75685fb08a5d1f54b8a9d28b Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Mon, 13 Sep 2010 17:47:00 -0400 Subject: sched: Fix string comparison in /proc/sched_features Fix incorrect handling of the following case: INTERACTIVE INTERACTIVE_SOMETHING_ELSE The comparison only checks up to each element's length. Changelog since v1: - Embellish using some Rostedtisms. [ mingo: ^^ == smaller and cleaner ] Signed-off-by: Mathieu Desnoyers Reviewed-by: Steven Rostedt Cc: Cc: Peter Zijlstra Cc: Tony Lindgren LKML-Reference: <20100913214700.GB16118@Krystal> Signed-off-by: Ingo Molnar --- kernel/sched.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 26f83e2f153..b40b82e3359 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -721,7 +721,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; - char *cmp = buf; + char *cmp; int neg = 0; int i; @@ -732,6 +732,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf, return -EFAULT; buf[cnt] = 0; + cmp = strstrip(buf); if (strncmp(buf, "NO_", 3) == 0) { neg = 1; @@ -739,9 +740,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf, } for (i = 0; sched_feat_names[i]; i++) { - int len = strlen(sched_feat_names[i]); - - if (strncmp(cmp, sched_feat_names[i], len) == 0) { + if (strcmp(cmp, sched_feat_names[i]) == 0) { if (neg) sysctl_sched_features &= ~(1UL << i); else -- cgit v1.2.3-70-g09d2 From 2bccfffd1538f3523847583213567e2f7ce00926 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 9 Sep 2010 08:43:22 -0400 Subject: tracing: Do not reset *pos in set_ftrace_filter After the filtered functions are read, the probed functions are read from the hash in set_ftrace_filter. When the hashed probed functions are read, the *pos passed in is reset. Instead of modifying the pos given to the read function, just record the pos where the filtered functions ended and subtract from that. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fa7ece649fe..585ea27025b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1368,6 +1368,7 @@ enum { #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ struct ftrace_iterator { + loff_t func_pos; struct ftrace_page *pg; int hidx; int idx; @@ -1418,12 +1419,15 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos) loff_t l; if (!(iter->flags & FTRACE_ITER_HASH)) - *pos = 0; + iter->func_pos = *pos; + + if (iter->func_pos > *pos) + return NULL; iter->flags |= FTRACE_ITER_HASH; iter->hidx = 0; - for (l = 0; l <= *pos; ) { + for (l = 0; l <= (*pos - iter->func_pos); ) { p = t_hash_next(m, p, &l); if (!p) break; -- cgit v1.2.3-70-g09d2 From 4aeb69672d011fac5c8df671f3ca89f7987c104e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 9 Sep 2010 10:00:28 -0400 Subject: tracing: Replace typecasted void pointer in set_ftrace_filter code The set_ftrace_filter uses seq_file and reads from two lists. The pointer returned by t_next() can either be of type struct dyn_ftrace or struct ftrace_func_probe. If there is a bug (there was one) the wrong pointer may be used and the reference can cause an oops. This patch makes t_next() and friends only return the iterator structure which now has a pointer of type struct dyn_ftrace and struct ftrace_func_probe. The t_show() can now test if the pointer is NULL or not and if the pointer exists, it is guaranteed to be of the correct type. Now if there's a bug, only wrong data will be shown but not an oops. Cc: Chris Wright Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 67 +++++++++++++++++++++++++++++++++++---------------- 1 file changed, 46 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 585ea27025b..c8db0dbb984 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1368,25 +1368,29 @@ enum { #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ struct ftrace_iterator { - loff_t func_pos; - struct ftrace_page *pg; - int hidx; - int idx; - unsigned flags; - struct trace_parser parser; + loff_t func_pos; + struct ftrace_page *pg; + struct dyn_ftrace *func; + struct ftrace_func_probe *probe; + struct trace_parser parser; + int hidx; + int idx; + unsigned flags; }; static void * -t_hash_next(struct seq_file *m, void *v, loff_t *pos) +t_hash_next(struct seq_file *m, loff_t *pos) { struct ftrace_iterator *iter = m->private; - struct hlist_node *hnd = v; + struct hlist_node *hnd = NULL; struct hlist_head *hhd; WARN_ON(!(iter->flags & FTRACE_ITER_HASH)); (*pos)++; + if (iter->probe) + hnd = &iter->probe->node; retry: if (iter->hidx >= FTRACE_FUNC_HASHSIZE) return NULL; @@ -1409,7 +1413,12 @@ t_hash_next(struct seq_file *m, void *v, loff_t *pos) } } - return hnd; + if (WARN_ON_ONCE(!hnd)) + return NULL; + + iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); + + return iter; } static void *t_hash_start(struct seq_file *m, loff_t *pos) @@ -1428,19 +1437,24 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos) iter->hidx = 0; for (l = 0; l <= (*pos - iter->func_pos); ) { - p = t_hash_next(m, p, &l); + p = t_hash_next(m, &l); if (!p) break; } - return p; + if (!p) + return NULL; + + return iter; } -static int t_hash_show(struct seq_file *m, void *v) +static int +t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) { struct ftrace_func_probe *rec; - struct hlist_node *hnd = v; - rec = hlist_entry(hnd, struct ftrace_func_probe, node); + rec = iter->probe; + if (WARN_ON_ONCE(!rec)) + return -EIO; if (rec->ops->print) return rec->ops->print(m, rec->ip, rec->ops, rec->data); @@ -1461,7 +1475,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) struct dyn_ftrace *rec = NULL; if (iter->flags & FTRACE_ITER_HASH) - return t_hash_next(m, v, pos); + return t_hash_next(m, pos); (*pos)++; @@ -1495,7 +1509,12 @@ t_next(struct seq_file *m, void *v, loff_t *pos) } } - return rec; + if (!rec) + return NULL; + + iter->func = rec; + + return iter; } static void *t_start(struct seq_file *m, loff_t *pos) @@ -1530,10 +1549,14 @@ static void *t_start(struct seq_file *m, loff_t *pos) break; } - if (!p && iter->flags & FTRACE_ITER_FILTER) - return t_hash_start(m, pos); + if (!p) { + if (iter->flags & FTRACE_ITER_FILTER) + return t_hash_start(m, pos); - return p; + return NULL; + } + + return iter; } static void t_stop(struct seq_file *m, void *p) @@ -1544,16 +1567,18 @@ static void t_stop(struct seq_file *m, void *p) static int t_show(struct seq_file *m, void *v) { struct ftrace_iterator *iter = m->private; - struct dyn_ftrace *rec = v; + struct dyn_ftrace *rec; if (iter->flags & FTRACE_ITER_HASH) - return t_hash_show(m, v); + return t_hash_show(m, iter); if (iter->flags & FTRACE_ITER_PRINTALL) { seq_printf(m, "#### all functions enabled ####\n"); return 0; } + rec = iter->func; + if (!rec) return 0; -- cgit v1.2.3-70-g09d2 From 98c4fd046f07156ca6055677e8f03d4280be16c1 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 10 Sep 2010 11:47:43 -0400 Subject: tracing: Keep track of set_ftrace_filter position and allow lseek again This patch keeps track of the index within the elements of set_ftrace_filter and if the position goes backwards, it nicely resets and starts from the beginning again. This allows for lseek and pread to work properly now. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index c8db0dbb984..2d51166b93f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1368,6 +1368,7 @@ enum { #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ struct ftrace_iterator { + loff_t pos; loff_t func_pos; struct ftrace_page *pg; struct dyn_ftrace *func; @@ -1385,9 +1386,8 @@ t_hash_next(struct seq_file *m, loff_t *pos) struct hlist_node *hnd = NULL; struct hlist_head *hhd; - WARN_ON(!(iter->flags & FTRACE_ITER_HASH)); - (*pos)++; + iter->pos = *pos; if (iter->probe) hnd = &iter->probe->node; @@ -1427,14 +1427,9 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos) void *p = NULL; loff_t l; - if (!(iter->flags & FTRACE_ITER_HASH)) - iter->func_pos = *pos; - if (iter->func_pos > *pos) return NULL; - iter->flags |= FTRACE_ITER_HASH; - iter->hidx = 0; for (l = 0; l <= (*pos - iter->func_pos); ) { p = t_hash_next(m, &l); @@ -1444,6 +1439,9 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos) if (!p) return NULL; + /* Only set this if we have an item */ + iter->flags |= FTRACE_ITER_HASH; + return iter; } @@ -1478,6 +1476,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) return t_hash_next(m, pos); (*pos)++; + iter->pos = *pos; + iter->func_pos = *pos; if (iter->flags & FTRACE_ITER_PRINTALL) return NULL; @@ -1517,6 +1517,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos) return iter; } +static void reset_iter_read(struct ftrace_iterator *iter) +{ + iter->pos = 0; + iter->func_pos = 0; + iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH); +} + static void *t_start(struct seq_file *m, loff_t *pos) { struct ftrace_iterator *iter = m->private; @@ -1524,6 +1531,12 @@ static void *t_start(struct seq_file *m, loff_t *pos) loff_t l; mutex_lock(&ftrace_lock); + /* + * If an lseek was done, then reset and start from beginning. + */ + if (*pos < iter->pos) + reset_iter_read(iter); + /* * For set_ftrace_filter reading, if we have the filter * off, we can short cut and just print out that all @@ -1541,6 +1554,11 @@ static void *t_start(struct seq_file *m, loff_t *pos) if (iter->flags & FTRACE_ITER_HASH) return t_hash_start(m, pos); + /* + * Unfortunately, we need to restart at ftrace_pages_start + * every time we let go of the ftrace_mutex. This is because + * those pointers can change without the lock. + */ iter->pg = ftrace_pages_start; iter->idx = 0; for (l = 0; l <= *pos; ) { @@ -2447,7 +2465,7 @@ static const struct file_operations ftrace_filter_fops = { .open = ftrace_filter_open, .read = seq_read, .write = ftrace_filter_write, - .llseek = no_llseek, + .llseek = ftrace_regex_lseek, .release = ftrace_filter_release, }; -- cgit v1.2.3-70-g09d2 From 57c072c7113f54f9512624d6c665db6184448782 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Sep 2010 11:21:11 -0400 Subject: tracing: Fix reading of set_ftrace_filter across lists If we do: # cd /sys/kernel/debug # echo 'do_IRQ:traceon schedule:traceon sys_write:traceon' > \ set_ftrace_filter # cat set_ftrace_filter We get the following output: #### all functions enabled #### sys_write:traceon:unlimited schedule:traceon:unlimited do_IRQ:traceon:unlimited This outputs two lists. One is the fact that all functions are currently enabled for function tracing, the other has three probed functions, which happen to have 'traceon' as their commands. Currently, when reading the first list (functions enabled) the seq_file code will receive a "NULL" from the t_next() function causing it to exit early. This makes "read()" from userspace stop reading the code at this boarder. Although read is allowed to do this, some (broken) applications might consider this an end of file and stop early. This patch adds the start of the second list to t_next() when it finishes the first list. It is a simple change and gives the set_ftrace_filter file nicer reading ability. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2d51166b93f..1884cf5bc11 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1477,10 +1477,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos) (*pos)++; iter->pos = *pos; - iter->func_pos = *pos; if (iter->flags & FTRACE_ITER_PRINTALL) - return NULL; + return t_hash_start(m, pos); retry: if (iter->idx >= iter->pg->index) { @@ -1510,8 +1509,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos) } if (!rec) - return NULL; + return t_hash_start(m, pos); + iter->func_pos = *pos; iter->func = rec; return iter; -- cgit v1.2.3-70-g09d2 From 2bd16212b8eb86f9574e78d6605a5ba9e9aa8c4e Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 7 Sep 2010 16:53:44 +0200 Subject: tracing: Add funcgraph-irq option for function graph tracer. It's handy to be able to disable the irq related output and not to have to jump over each irq related code, when you have no interrest in it. The option is by default enabled, so there's no change to current behaviour. It affects only the final output, so all the irq related data stay in the ring buffer. Signed-off-by: Jiri Olsa LKML-Reference: <20100907145344.GC1912@jolsa.brq.redhat.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions_graph.c | 101 ++++++++++++++++++++++++++++++++++- 1 file changed, 100 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index c93bcb24863..8674750a5ec 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -18,6 +18,7 @@ struct fgraph_cpu_data { pid_t last_pid; int depth; + int depth_irq; int ignore; unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; }; @@ -41,6 +42,7 @@ struct fgraph_data { #define TRACE_GRAPH_PRINT_PROC 0x8 #define TRACE_GRAPH_PRINT_DURATION 0x10 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 +#define TRACE_GRAPH_PRINT_IRQS 0x40 static struct tracer_opt trace_opts[] = { /* Display overruns? (for self-debug purpose) */ @@ -55,13 +57,15 @@ static struct tracer_opt trace_opts[] = { { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, /* Display absolute time of an entry */ { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, + /* Display interrupts */ + { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, { } /* Empty entry */ }; static struct tracer_flags tracer_flags = { /* Don't display overruns and proc by default */ .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | - TRACE_GRAPH_PRINT_DURATION, + TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, .opts = trace_opts }; @@ -855,6 +859,92 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, return 0; } +/* + * Entry check for irq code + * + * returns 1 if + * - we are inside irq code + * - we just extered irq code + * + * retunns 0 if + * - funcgraph-interrupts option is set + * - we are not inside irq code + */ +static int +check_irq_entry(struct trace_iterator *iter, u32 flags, + unsigned long addr, int depth) +{ + int cpu = iter->cpu; + struct fgraph_data *data = iter->private; + int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); + + if (flags & TRACE_GRAPH_PRINT_IRQS) + return 0; + + /* + * We are inside the irq code + */ + if (*depth_irq >= 0) + return 1; + + if ((addr < (unsigned long)__irqentry_text_start) || + (addr >= (unsigned long)__irqentry_text_end)) + return 0; + + /* + * We are entering irq code. + */ + *depth_irq = depth; + return 1; +} + +/* + * Return check for irq code + * + * returns 1 if + * - we are inside irq code + * - we just left irq code + * + * returns 0 if + * - funcgraph-interrupts option is set + * - we are not inside irq code + */ +static int +check_irq_return(struct trace_iterator *iter, u32 flags, int depth) +{ + int cpu = iter->cpu; + struct fgraph_data *data = iter->private; + int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); + + if (flags & TRACE_GRAPH_PRINT_IRQS) + return 0; + + /* + * We are not inside the irq code. + */ + if (*depth_irq == -1) + return 0; + + /* + * We are inside the irq code, and this is returning entry. + * Let's not trace it and clear the entry depth, since + * we are out of irq code. + * + * This condition ensures that we 'leave the irq code' once + * we are out of the entry depth. Thus protecting us from + * the RETURN entry loss. + */ + if (*depth_irq >= depth) { + *depth_irq = -1; + return 1; + } + + /* + * We are inside the irq code, and this is not the entry. + */ + return 1; +} + static enum print_line_t print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, struct trace_iterator *iter, u32 flags) @@ -865,6 +955,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, static enum print_line_t ret; int cpu = iter->cpu; + if (check_irq_entry(iter, flags, call->func, call->depth)) + return TRACE_TYPE_HANDLED; + if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) return TRACE_TYPE_PARTIAL_LINE; @@ -902,6 +995,9 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, int ret; int i; + if (check_irq_return(iter, flags, trace->depth)) + return TRACE_TYPE_HANDLED; + if (data) { struct fgraph_cpu_data *cpu_data; int cpu = iter->cpu; @@ -1210,9 +1306,12 @@ void graph_trace_open(struct trace_iterator *iter) pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); + int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); + *pid = -1; *depth = 0; *ignore = 0; + *depth_irq = -1; } iter->private = data; -- cgit v1.2.3-70-g09d2 From b304d0441a4118fadd4c3f16e4dc600c271030b5 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Sep 2010 18:58:33 -0400 Subject: tracing: Do not trace in irq when funcgraph-irq option is zero When the function graph tracer funcgraph-irq option is zero, disable tracing in IRQs. This makes the option have two effects. 1) When reading the trace file, do not display the functions that happen in interrupt context (when detected) 2) [*new*] When recording a trace, skip those that are detected to be in interrupt by the 'in_irq()' function Note, in_irq() is updated at irq_enter() and irq_exit(). There are still functions that are recorded by the function graph tracer that is in interrupt context but outside the irq_enter/exit() routines. Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions_graph.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 8674750a5ec..02c708ae0d4 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -15,6 +15,9 @@ #include "trace.h" #include "trace_output.h" +/* When set, irq functions will be ignored */ +static int ftrace_graph_skip_irqs; + struct fgraph_cpu_data { pid_t last_pid; int depth; @@ -208,6 +211,14 @@ int __trace_graph_entry(struct trace_array *tr, return 1; } +static inline int ftrace_graph_ignore_irqs(void) +{ + if (!ftrace_graph_skip_irqs) + return 0; + + return in_irq(); +} + int trace_graph_entry(struct ftrace_graph_ent *trace) { struct trace_array *tr = graph_array; @@ -222,7 +233,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) return 0; /* trace it when it is-nested-in or is a function enabled. */ - if (!(trace->depth || ftrace_graph_addr(trace->func))) + if (!(trace->depth || ftrace_graph_addr(trace->func)) || + ftrace_graph_ignore_irqs()) return 0; local_irq_save(flags); @@ -1334,6 +1346,14 @@ void graph_trace_close(struct trace_iterator *iter) } } +static int func_graph_set_flag(u32 old_flags, u32 bit, int set) +{ + if (bit == TRACE_GRAPH_PRINT_IRQS) + ftrace_graph_skip_irqs = !set; + + return 0; +} + static struct trace_event_functions graph_functions = { .trace = print_graph_function_event, }; @@ -1360,6 +1380,7 @@ static struct tracer graph_trace __read_mostly = { .print_line = print_graph_function, .print_header = print_graph_headers, .flags = &tracer_flags, + .set_flag = func_graph_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_function_graph, #endif -- cgit v1.2.3-70-g09d2 From 79e406d7b00ab2b261ae32a59f266fd3b7af6f29 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Sep 2010 22:19:46 -0400 Subject: tracing: Remove leftover FTRACE_ENABLE/DISABLE_MCOUNT enums The enums for FTRACE_ENABLE_MCOUNT and FTRACE_DISABLE_MCOUNT were used as commands to ftrace_run_update_code(). But these commands were used by the old nasty ftrace daemon that has long been slain. This is a clean up patch to remove the references to these enums and simplify the code a little. Reported-by: Wu Zhangjin Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 83a16e9ee51..20aff3f1c71 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -884,10 +884,8 @@ enum { FTRACE_ENABLE_CALLS = (1 << 0), FTRACE_DISABLE_CALLS = (1 << 1), FTRACE_UPDATE_TRACE_FUNC = (1 << 2), - FTRACE_ENABLE_MCOUNT = (1 << 3), - FTRACE_DISABLE_MCOUNT = (1 << 4), - FTRACE_START_FUNC_RET = (1 << 5), - FTRACE_STOP_FUNC_RET = (1 << 6), + FTRACE_START_FUNC_RET = (1 << 3), + FTRACE_STOP_FUNC_RET = (1 << 4), }; static int ftrace_filtered; @@ -1226,8 +1224,6 @@ static void ftrace_shutdown(int command) static void ftrace_startup_sysctl(void) { - int command = FTRACE_ENABLE_MCOUNT; - if (unlikely(ftrace_disabled)) return; @@ -1235,23 +1231,17 @@ static void ftrace_startup_sysctl(void) saved_ftrace_func = NULL; /* ftrace_start_up is true if we want ftrace running */ if (ftrace_start_up) - command |= FTRACE_ENABLE_CALLS; - - ftrace_run_update_code(command); + ftrace_run_update_code(FTRACE_ENABLE_CALLS); } static void ftrace_shutdown_sysctl(void) { - int command = FTRACE_DISABLE_MCOUNT; - if (unlikely(ftrace_disabled)) return; /* ftrace_start_up is true if ftrace is running */ if (ftrace_start_up) - command |= FTRACE_DISABLE_CALLS; - - ftrace_run_update_code(command); + ftrace_run_update_code(FTRACE_DISABLE_CALLS); } static cycle_t ftrace_update_time; -- cgit v1.2.3-70-g09d2 From d9ca07a05ce1c42ac9717e54eaea4546a3a80978 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 14 Sep 2010 15:34:01 +0200 Subject: watchdog: Avoid kernel crash when disabling watchdog In case you boot with the watchdog disabled, i.e., nowatchdog, then, if you try to disable it via /proc/sys/kernel/watchdog, you get a kernel crash. The reason is that you are trying to cancel a hrtimer which has never been initialized. This patch fixes this by skipping execution of watchdog_disable_all_cpus() when the watchdog is marked disabled from boot. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <4c8f7a23.cae9d80a.2c11.0bb4@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index fa71aebda4f..89eadbb9cef 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -473,6 +473,9 @@ static void watchdog_disable_all_cpus(void) { int cpu; + if (no_watchdog) + return; + for_each_online_cpu(cpu) watchdog_disable(cpu); -- cgit v1.2.3-70-g09d2 From d958077d007d98125766d11e82da2fd6497b91d6 Mon Sep 17 00:00:00 2001 From: Matt Helsley Date: Mon, 13 Sep 2010 13:01:18 -0700 Subject: hw breakpoints: Fix pid namespace bug Hardware breakpoints can't be registered within pid namespaces because tsk->pid is passed rather than the pid in the current namespace. (See https://bugzilla.kernel.org/show_bug.cgi?id=17281 ) This is a quick fix demonstrating the problem but is not the best method of solving the problem since passing pids internally is not the best way to avoid pid namespace bugs. Subsequent patches will show a better solution. Much thanks to Frederic Weisbecker for doing the bulk of the work finding this bug. Signed-off-by: Matt Helsley Signed-off-by: Peter Zijlstra Cc: Robin Green Cc: Prasad Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Will Deacon Cc: Mahesh Salgaonkar LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/hw_breakpoint.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 3b2aaffb65f..6122f02cfed 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -433,7 +433,8 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, struct task_struct *tsk) { - return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); + return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk), + triggered); } EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); -- cgit v1.2.3-70-g09d2 From 2ebd4ffb6d0cb877787b1e42be8485820158857e Mon Sep 17 00:00:00 2001 From: Matt Helsley Date: Mon, 13 Sep 2010 13:01:19 -0700 Subject: perf events: Split out task search into helper Split out the code which searches for non-exiting tasks into its own helper. Creating this helper not only makes the code slightly more readable it prepares to move the search out of find_get_context() in a subsequent commit. Signed-off-by: Matt Helsley Signed-off-by: Peter Zijlstra Cc: Robin Green Cc: Prasad Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Will Deacon Cc: Mahesh Salgaonkar LKML-Reference: <561205417b450b8a4bf7488374541d64b4690431.1284407762.git.matthltc@us.ibm.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 63 ++++++++++++++++++++++++++++++++++------------------- 1 file changed, 40 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 440f9ca067b..3f5309db72f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2015,6 +2015,43 @@ alloc_perf_context(struct pmu *pmu, struct task_struct *task) return ctx; } +static struct task_struct * +find_lively_task_by_vpid(pid_t vpid) +{ + struct task_struct *task; + int err; + + rcu_read_lock(); + if (!vpid) + task = current; + else + task = find_task_by_vpid(vpid); + if (task) + get_task_struct(task); + rcu_read_unlock(); + + if (!task) + return ERR_PTR(-ESRCH); + + /* + * Can't attach events to a dying task. + */ + err = -ESRCH; + if (task->flags & PF_EXITING) + goto errout; + + /* Reuse ptrace permission checks for now. */ + err = -EACCES; + if (!ptrace_may_access(task, PTRACE_MODE_READ)) + goto errout; + + return task; +errout: + put_task_struct(task); + return ERR_PTR(err); + +} + static struct perf_event_context * find_get_context(struct pmu *pmu, pid_t pid, int cpu) { @@ -2047,29 +2084,9 @@ find_get_context(struct pmu *pmu, pid_t pid, int cpu) return ctx; } - rcu_read_lock(); - if (!pid) - task = current; - else - task = find_task_by_vpid(pid); - if (task) - get_task_struct(task); - rcu_read_unlock(); - - if (!task) - return ERR_PTR(-ESRCH); - - /* - * Can't attach events to a dying task. - */ - err = -ESRCH; - if (task->flags & PF_EXITING) - goto errout; - - /* Reuse ptrace permission checks for now. */ - err = -EACCES; - if (!ptrace_may_access(task, PTRACE_MODE_READ)) - goto errout; + task = find_lively_task_by_vpid(pid); + if (IS_ERR(task)) + return (void*)task; err = -EINVAL; ctxn = pmu->task_ctx_nr; -- cgit v1.2.3-70-g09d2 From 38a81da2205f94e8a2a834b51a6b99c91fc7c2e8 Mon Sep 17 00:00:00 2001 From: Matt Helsley Date: Mon, 13 Sep 2010 13:01:20 -0700 Subject: perf events: Clean up pid passing The kernel perf event creation path shouldn't use find_task_by_vpid() because a vpid exists in a specific namespace. find_task_by_vpid() uses current's pid namespace which isn't always the correct namespace to use for the vpid in all the places perf_event_create_kernel_counter() (and thus find_get_context()) is called. The goal is to clean up pid namespace handling and prevent bugs like: https://bugzilla.kernel.org/show_bug.cgi?id=17281 Instead of using pids switch find_get_context() to use task struct pointers directly. The syscall is responsible for resolving the pid to a task struct. This moves the pid namespace resolution into the syscall much like every other syscall that takes pid parameters. Signed-off-by: Matt Helsley Signed-off-by: Peter Zijlstra Cc: Robin Green Cc: Prasad Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Will Deacon Cc: Mahesh Salgaonkar LKML-Reference: Signed-off-by: Ingo Molnar --- arch/arm/oprofile/common.c | 2 +- include/linux/perf_event.h | 2 +- kernel/hw_breakpoint.c | 5 ++--- kernel/perf_event.c | 21 ++++++++++----------- kernel/watchdog.c | 2 +- 5 files changed, 15 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index 0691176899f..aad63e611b3 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c @@ -96,7 +96,7 @@ static int op_create_counter(int cpu, int event) return ret; pevent = perf_event_create_kernel_counter(&counter_config[event].attr, - cpu, -1, + cpu, NULL, op_overflow_handler); if (IS_ERR(pevent)) { diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 93bf53aa50e..39d8860b268 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -902,7 +902,7 @@ extern int perf_event_release_kernel(struct perf_event *event); extern struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, - pid_t pid, + struct task_struct *task, perf_overflow_handler_t callback); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 6122f02cfed..3b714e839c1 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -433,8 +433,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, struct task_struct *tsk) { - return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk), - triggered); + return perf_event_create_kernel_counter(attr, -1, tsk, triggered); } EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); @@ -516,7 +515,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, get_online_cpus(); for_each_online_cpu(cpu) { pevent = per_cpu_ptr(cpu_events, cpu); - bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); + bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered); *pevent = bp; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 3f5309db72f..86f394e15d5 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2053,15 +2053,14 @@ errout: } static struct perf_event_context * -find_get_context(struct pmu *pmu, pid_t pid, int cpu) +find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) { struct perf_event_context *ctx; struct perf_cpu_context *cpuctx; - struct task_struct *task; unsigned long flags; int ctxn, err; - if (pid == -1 && cpu != -1) { + if (!task && cpu != -1) { /* Must be root to operate on a CPU event: */ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EACCES); @@ -2084,10 +2083,6 @@ find_get_context(struct pmu *pmu, pid_t pid, int cpu) return ctx; } - task = find_lively_task_by_vpid(pid); - if (IS_ERR(task)) - return (void*)task; - err = -EINVAL; ctxn = pmu->task_ctx_nr; if (ctxn < 0) @@ -5527,6 +5522,7 @@ SYSCALL_DEFINE5(perf_event_open, struct perf_event_context *ctx; struct file *event_file = NULL; struct file *group_file = NULL; + struct task_struct *task = NULL; struct pmu *pmu; int event_fd; int fput_needed = 0; @@ -5581,10 +5577,13 @@ SYSCALL_DEFINE5(perf_event_open, if ((pmu->task_ctx_nr == perf_sw_context) && group_leader) pmu = group_leader->pmu; + if (pid != -1) + task = find_lively_task_by_vpid(pid); + /* * Get the target context (task or percpu): */ - ctx = find_get_context(pmu, pid, cpu); + ctx = find_get_context(pmu, task, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err_group_fd; @@ -5666,11 +5665,11 @@ err_fd: * * @attr: attributes of the counter to create * @cpu: cpu in which the counter is bound - * @pid: task to profile + * @task: task to profile (NULL for percpu) */ struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, - pid_t pid, + struct task_struct *task, perf_overflow_handler_t overflow_handler) { struct perf_event_context *ctx; @@ -5687,7 +5686,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, goto err; } - ctx = find_get_context(event->pmu, pid, cpu); + ctx = find_get_context(event->pmu, task, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err_free; diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 89eadbb9cef..dc8e16824b5 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -358,7 +358,7 @@ static int watchdog_nmi_enable(int cpu) /* Try to register using hardware perf events */ wd_attr = &wd_hw_attr; wd_attr->sample_period = hw_nmi_get_sample_period(); - event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback); + event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback); if (!IS_ERR(event)) { printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); goto out_save; -- cgit v1.2.3-70-g09d2 From edbaadbe42b0b790618ec49d29626223529d8195 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 15 Sep 2010 10:04:26 +0900 Subject: kprobes: Remove redundant address check Remove call to kernel_text_address() in register_jprobes() because it is called right after in register_kprobe(). Signed-off-by: Namhyung Kim Acked-by: Masami Hiramatsu LKML-Reference: <1284512670-2369-2-git-send-email-namhyung@gmail.com> Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 282035f3ae9..8f967016cef 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1343,14 +1343,11 @@ int __kprobes register_jprobes(struct jprobe **jps, int num) jp = jps[i]; addr = arch_deref_entry_point(jp->entry); - if (!kernel_text_address(addr)) - ret = -EINVAL; - else { - /* Todo: Verify probepoint is a function entry point */ - jp->kp.pre_handler = setjmp_pre_handler; - jp->kp.break_handler = longjmp_break_handler; - ret = register_kprobe(&jp->kp); - } + /* Todo: Verify probepoint is a function entry point */ + jp->kp.pre_handler = setjmp_pre_handler; + jp->kp.break_handler = longjmp_break_handler; + ret = register_kprobe(&jp->kp); + if (ret < 0) { if (i > 0) unregister_jprobes(jps, i); -- cgit v1.2.3-70-g09d2 From 05662bdb64c746079de7ac4dc4fb4caa5e8e119f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 15 Sep 2010 10:04:27 +0900 Subject: kprobes: Verify jprobe entry point Verify jprobe's entry point is a function entry point using kallsyms' offset value. Signed-off-by: Namhyung Kim Acked-by: Masami Hiramatsu LKML-Reference: <1284512670-2369-3-git-send-email-namhyung@gmail.com> Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 8f967016cef..1b0dbe06707 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1339,14 +1339,18 @@ int __kprobes register_jprobes(struct jprobe **jps, int num) if (num <= 0) return -EINVAL; for (i = 0; i < num; i++) { - unsigned long addr; + unsigned long addr, offset; jp = jps[i]; addr = arch_deref_entry_point(jp->entry); - /* Todo: Verify probepoint is a function entry point */ - jp->kp.pre_handler = setjmp_pre_handler; - jp->kp.break_handler = longjmp_break_handler; - ret = register_kprobe(&jp->kp); + /* Verify probepoint is a function entry point */ + if (kallsyms_lookup_size_offset(addr, NULL, &offset) && + offset == 0) { + jp->kp.pre_handler = setjmp_pre_handler; + jp->kp.break_handler = longjmp_break_handler; + ret = register_kprobe(&jp->kp); + } else + ret = -EINVAL; if (ret < 0) { if (i > 0) -- cgit v1.2.3-70-g09d2 From 6376b2297502e72255b7eb2893c6044ad5a7b5f4 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 15 Sep 2010 10:04:28 +0900 Subject: kprobes: Make functions static Make following (internal) functions static to make sparse happier :-) * get_optimized_kprobe: only called from static functions * kretprobe_table_unlock: _lock function is static * kprobes_optinsn_template_holder: never called but holding asm code Signed-off-by: Namhyung Kim Acked-by: Masami Hiramatsu LKML-Reference: <1284512670-2369-4-git-send-email-namhyung@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes.c | 2 +- kernel/kprobes.c | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 770ebfb349e..05c20a48841 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -1129,7 +1129,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, *(unsigned long *)addr = val; } -void __kprobes kprobes_optinsn_template_holder(void) +static void __used __kprobes kprobes_optinsn_template_holder(void) { asm volatile ( ".global optprobe_template_entry\n" diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1b0dbe06707..c53aad5d7e5 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -399,7 +399,7 @@ static inline int kprobe_optready(struct kprobe *p) * Return an optimized kprobe whose optimizing code replaces * instructions including addr (exclude breakpoint). */ -struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) +static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) { int i; struct kprobe *p = NULL; @@ -857,7 +857,8 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, spin_unlock_irqrestore(hlist_lock, *flags); } -void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) +static void __kprobes kretprobe_table_unlock(unsigned long hash, + unsigned long *flags) { spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); spin_unlock_irqrestore(hlist_lock, *flags); -- cgit v1.2.3-70-g09d2 From 635c17c2b2b4e5cd34f5dcba19d751b4e58533c2 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 15 Sep 2010 10:04:30 +0900 Subject: kprobes: Add sparse context annotations This removes following warnings when build with C=1 warning: context imbalance in 'kretprobe_hash_lock' - wrong count at exit warning: context imbalance in 'kretprobe_table_lock' - wrong count at exit warning: context imbalance in 'kretprobe_hash_unlock' - unexpected unlock warning: context imbalance in 'kretprobe_table_unlock' - unexpected unlock Signed-off-by: Namhyung Kim Acked-by: Masami Hiramatsu LKML-Reference: <1284512670-2369-6-git-send-email-namhyung@gmail.com> Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index c53aad5d7e5..6dd5359e1f0 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -831,6 +831,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, void __kprobes kretprobe_hash_lock(struct task_struct *tsk, struct hlist_head **head, unsigned long *flags) +__acquires(hlist_lock) { unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); spinlock_t *hlist_lock; @@ -842,6 +843,7 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk, static void __kprobes kretprobe_table_lock(unsigned long hash, unsigned long *flags) +__acquires(hlist_lock) { spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); spin_lock_irqsave(hlist_lock, *flags); @@ -849,6 +851,7 @@ static void __kprobes kretprobe_table_lock(unsigned long hash, void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags) +__releases(hlist_lock) { unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); spinlock_t *hlist_lock; @@ -859,6 +862,7 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, static void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) +__releases(hlist_lock) { spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); spin_unlock_irqrestore(hlist_lock, *flags); -- cgit v1.2.3-70-g09d2 From 31915ab4cbf507aadab40847cf9989da5e88b090 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 16 Sep 2010 14:42:25 +0200 Subject: sched: Remove branch hints within context_switch() With 710390d9 "sched: Optimize branch hint in context_switch()" the branch hint logic within context_switch() got inversed. In fact the hints "if (likely(!mm))" and "if (likely(!prev->mm))" mean that it is likely that the previous and next task are kernel threads. That assumption is certainly counter intuitive, but Tim has shown that at least with his workload this is true. Nevertheless the truth is: it depends on the current workload. So just remove the annotations which also improves readability. Reported-by: Tim Blechmann Signed-off-by: Heiko Carstens Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <20100916124225.GA2209@osiris.boeblingen.de.ibm.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index b40b82e3359..16a1129f51e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2845,14 +2845,14 @@ context_switch(struct rq *rq, struct task_struct *prev, */ arch_start_context_switch(prev); - if (likely(!mm)) { + if (!mm) { next->active_mm = oldmm; atomic_inc(&oldmm->mm_count); enter_lazy_tlb(oldmm, next); } else switch_mm(oldmm, mm, next); - if (likely(!prev->mm)) { + if (!prev->mm) { prev->active_mm = NULL; rq->prev_mm = oldmm; } -- cgit v1.2.3-70-g09d2 From d14b12d7adbf214f33eb59f800b5c3d5ed9268e8 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Fri, 17 Sep 2010 11:28:47 +0200 Subject: perf_events: Fix broken event grouping Events were not grouped anymore. The reason was that in perf_event_open(), the field event->group_leader was initialized before the function looked up the group_fd to find the event leader. This patch fixes this by reordering the code correctly. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Robert Richter LKML-Reference: <20100917093009.360420946@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 86f394e15d5..ce95617f5d2 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5550,17 +5550,11 @@ SYSCALL_DEFINE5(perf_event_open, if (event_fd < 0) return event_fd; - event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL); - if (IS_ERR(event)) { - err = PTR_ERR(event); - goto err_fd; - } - if (group_fd != -1) { group_leader = perf_fget_light(group_fd, &fput_needed); if (IS_ERR(group_leader)) { err = PTR_ERR(group_leader); - goto err_alloc; + goto err_fd; } group_file = group_leader->filp; if (flags & PERF_FLAG_FD_OUTPUT) @@ -5569,6 +5563,12 @@ SYSCALL_DEFINE5(perf_event_open, group_leader = NULL; } + event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL); + if (IS_ERR(event)) { + err = PTR_ERR(event); + goto err_fd; + } + /* * Special case software events and allow them to be part of * any hardware group. @@ -5653,7 +5653,6 @@ err_context: put_ctx(ctx); err_group_fd: fput_light(group_file, fput_needed); -err_alloc: free_event(event); err_fd: put_unused_fd(event_fd); -- cgit v1.2.3-70-g09d2 From b04243ef7006cda301819f54ee7ce0a3632489e3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 17 Sep 2010 11:28:48 +0200 Subject: perf: Complete software pmu grouping Aside from allowing software events into a !software group, allow adding !software events to pure software groups. Once we've moved the software group and attached the first !software event, the group will no longer be a pure software group and hence no longer be eligible for movement, at which point the straight ctx comparison is correct again. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian Cc: Robert Richter Cc: Paul Mackerras LKML-Reference: <20100917093009.410784731@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 6 +++++ kernel/perf_event.c | 65 ++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 66 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 39d8860b268..165287fd2cc 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -804,12 +804,18 @@ struct perf_event { #endif /* CONFIG_PERF_EVENTS */ }; +enum perf_event_context_type { + task_context, + cpu_context, +}; + /** * struct perf_event_context - event context structure * * Used as a container for task events and CPU events as well: */ struct perf_event_context { + enum perf_event_context_type type; struct pmu *pmu; /* * Protect the states of the events in the list, diff --git a/kernel/perf_event.c b/kernel/perf_event.c index ce95617f5d2..6d7eef5f3c4 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5184,6 +5184,7 @@ int perf_pmu_register(struct pmu *pmu) cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); __perf_event_init_context(&cpuctx->ctx); + cpuctx->ctx.type = cpu_context; cpuctx->ctx.pmu = pmu; cpuctx->timer_interval = TICK_NSEC; hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); @@ -5517,7 +5518,8 @@ SYSCALL_DEFINE5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) { - struct perf_event *event, *group_leader = NULL, *output_event = NULL; + struct perf_event *group_leader = NULL, *output_event = NULL; + struct perf_event *event, *sibling; struct perf_event_attr attr; struct perf_event_context *ctx; struct file *event_file = NULL; @@ -5525,6 +5527,7 @@ SYSCALL_DEFINE5(perf_event_open, struct task_struct *task = NULL; struct pmu *pmu; int event_fd; + int move_group = 0; int fput_needed = 0; int err; @@ -5574,8 +5577,29 @@ SYSCALL_DEFINE5(perf_event_open, * any hardware group. */ pmu = event->pmu; - if ((pmu->task_ctx_nr == perf_sw_context) && group_leader) - pmu = group_leader->pmu; + + if (group_leader && + (is_software_event(event) != is_software_event(group_leader))) { + if (is_software_event(event)) { + /* + * If event and group_leader are not both a software + * event, and event is, then group leader is not. + * + * Allow the addition of software events to !software + * groups, this is safe because software events never + * fail to schedule. + */ + pmu = group_leader->pmu; + } else if (is_software_event(group_leader) && + (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { + /* + * In case the group is a pure software group, and we + * try to add a hardware event, move the whole group to + * the hardware context. + */ + move_group = 1; + } + } if (pid != -1) task = find_lively_task_by_vpid(pid); @@ -5605,8 +5629,14 @@ SYSCALL_DEFINE5(perf_event_open, * Do not allow to attach to a group in a different * task or CPU context: */ - if (group_leader->ctx != ctx) - goto err_context; + if (move_group) { + if (group_leader->ctx->type != ctx->type) + goto err_context; + } else { + if (group_leader->ctx != ctx) + goto err_context; + } + /* * Only a group leader can be exclusive or pinned */ @@ -5626,9 +5656,34 @@ SYSCALL_DEFINE5(perf_event_open, goto err_context; } + if (move_group) { + struct perf_event_context *gctx = group_leader->ctx; + + mutex_lock(&gctx->mutex); + perf_event_remove_from_context(group_leader); + list_for_each_entry(sibling, &group_leader->sibling_list, + group_entry) { + perf_event_remove_from_context(sibling); + put_ctx(gctx); + } + mutex_unlock(&gctx->mutex); + put_ctx(gctx); + } + event->filp = event_file; WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); + + if (move_group) { + perf_install_in_context(ctx, group_leader, cpu); + get_ctx(ctx); + list_for_each_entry(sibling, &group_leader->sibling_list, + group_entry) { + perf_install_in_context(ctx, sibling, cpu); + get_ctx(ctx); + } + } + perf_install_in_context(ctx, event, cpu); ++ctx->generation; mutex_unlock(&ctx->mutex); -- cgit v1.2.3-70-g09d2 From 917bdd1c9b7b0f4c22f2504c2f0c1074c8ab9df7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 17 Sep 2010 11:28:49 +0200 Subject: perf: Fix perf_event_exit_cpu_context() Use the right cpu-context.. spotted by preempt warning on hot-unplug Signed-off-by: Peter Zijlstra Cc: Stephane Eranian Cc: Robert Richter LKML-Reference: <20100917093009.461794357@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 6d7eef5f3c4..27332e5f51a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -6269,14 +6269,13 @@ static void perf_event_exit_cpu_context(int cpu) idx = srcu_read_lock(&pmus_srcu); list_for_each_entry_rcu(pmu, &pmus, entry) { - ctx = &this_cpu_ptr(pmu->pmu_cpu_context)->ctx; + ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; mutex_lock(&ctx->mutex); smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); mutex_unlock(&ctx->mutex); } srcu_read_unlock(&pmus_srcu, idx); - } static void perf_event_exit_cpu(int cpu) -- cgit v1.2.3-70-g09d2 From e9d2b064149ff7ef4acbc65a1b9374ac8b218d3e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 17 Sep 2010 11:28:50 +0200 Subject: perf: Undo the per cpu-context timer stuff Revert the timer per cpu-context timers because of unfortunate nohz interaction. Fixing that would have been somewhat ugly, so go back to driving things from the regular tick. Provide a jiffies interval feature for people who want slower rotations. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian Cc: Robert Richter Cc: Yinghai Lu LKML-Reference: <20100917093009.519845633@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 6 ++-- kernel/perf_event.c | 79 ++++++++++++++++++++++++++++------------------ kernel/sched.c | 2 ++ 3 files changed, 55 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 165287fd2cc..61b1e2d760f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -870,8 +870,8 @@ struct perf_cpu_context { struct perf_event_context *task_ctx; int active_oncpu; int exclusive; - u64 timer_interval; - struct hrtimer timer; + struct list_head rotation_list; + int jiffies_interval; }; struct perf_output_handle { @@ -1065,6 +1065,7 @@ extern int perf_swevent_get_recursion_context(void); extern void perf_swevent_put_recursion_context(int rctx); extern void perf_event_enable(struct perf_event *event); extern void perf_event_disable(struct perf_event *event); +extern void perf_event_task_tick(void); #else static inline void perf_event_task_sched_in(struct task_struct *task) { } @@ -1099,6 +1100,7 @@ static inline int perf_swevent_get_recursion_context(void) { return -1; } static inline void perf_swevent_put_recursion_context(int rctx) { } static inline void perf_event_enable(struct perf_event *event) { } static inline void perf_event_disable(struct perf_event *event) { } +static inline void perf_event_task_tick(void) { } #endif #define perf_output_put(handle, x) \ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 27332e5f51a..baae1367e94 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -77,23 +77,22 @@ void perf_pmu_enable(struct pmu *pmu) pmu->pmu_enable(pmu); } +static DEFINE_PER_CPU(struct list_head, rotation_list); + +/* + * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized + * because they're strictly cpu affine and rotate_start is called with IRQs + * disabled, while rotate_context is called from IRQ context. + */ static void perf_pmu_rotate_start(struct pmu *pmu) { struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + struct list_head *head = &__get_cpu_var(rotation_list); - if (hrtimer_active(&cpuctx->timer)) - return; + WARN_ON(!irqs_disabled()); - __hrtimer_start_range_ns(&cpuctx->timer, - ns_to_ktime(cpuctx->timer_interval), 0, - HRTIMER_MODE_REL_PINNED, 0); -} - -static void perf_pmu_rotate_stop(struct pmu *pmu) -{ - struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); - - hrtimer_cancel(&cpuctx->timer); + if (list_empty(&cpuctx->rotation_list)) + list_add(&cpuctx->rotation_list, head); } static void get_ctx(struct perf_event_context *ctx) @@ -1607,36 +1606,33 @@ static void rotate_ctx(struct perf_event_context *ctx) } /* - * Cannot race with ->pmu_rotate_start() because this is ran from hardirq - * context, and ->pmu_rotate_start() is called with irqs disabled (both are - * cpu affine, so there are no SMP races). + * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized + * because they're strictly cpu affine and rotate_start is called with IRQs + * disabled, while rotate_context is called from IRQ context. */ -static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) +static void perf_rotate_context(struct perf_cpu_context *cpuctx) { - enum hrtimer_restart restart = HRTIMER_NORESTART; - struct perf_cpu_context *cpuctx; + u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; struct perf_event_context *ctx = NULL; - int rotate = 0; - - cpuctx = container_of(timer, struct perf_cpu_context, timer); + int rotate = 0, remove = 1; if (cpuctx->ctx.nr_events) { - restart = HRTIMER_RESTART; + remove = 0; if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) rotate = 1; } ctx = cpuctx->task_ctx; if (ctx && ctx->nr_events) { - restart = HRTIMER_RESTART; + remove = 0; if (ctx->nr_events != ctx->nr_active) rotate = 1; } perf_pmu_disable(cpuctx->ctx.pmu); - perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval); + perf_ctx_adjust_freq(&cpuctx->ctx, interval); if (ctx) - perf_ctx_adjust_freq(ctx, cpuctx->timer_interval); + perf_ctx_adjust_freq(ctx, interval); if (!rotate) goto done; @@ -1654,10 +1650,24 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) task_ctx_sched_in(ctx, EVENT_FLEXIBLE); done: + if (remove) + list_del_init(&cpuctx->rotation_list); + perf_pmu_enable(cpuctx->ctx.pmu); - hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval)); +} + +void perf_event_task_tick(void) +{ + struct list_head *head = &__get_cpu_var(rotation_list); + struct perf_cpu_context *cpuctx, *tmp; - return restart; + WARN_ON(!irqs_disabled()); + + list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { + if (cpuctx->jiffies_interval == 1 || + !(jiffies % cpuctx->jiffies_interval)) + perf_rotate_context(cpuctx); + } } static int event_enable_on_exec(struct perf_event *event, @@ -5186,9 +5196,8 @@ int perf_pmu_register(struct pmu *pmu) __perf_event_init_context(&cpuctx->ctx); cpuctx->ctx.type = cpu_context; cpuctx->ctx.pmu = pmu; - cpuctx->timer_interval = TICK_NSEC; - hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - cpuctx->timer.function = perf_event_context_tick; + cpuctx->jiffies_interval = 1; + INIT_LIST_HEAD(&cpuctx->rotation_list); } got_cpu_context: @@ -6229,6 +6238,7 @@ static void __init perf_event_init_all_cpus(void) for_each_possible_cpu(cpu) { swhash = &per_cpu(swevent_htable, cpu); mutex_init(&swhash->hlist_mutex); + INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); } } @@ -6248,6 +6258,15 @@ static void __cpuinit perf_event_init_cpu(int cpu) } #ifdef CONFIG_HOTPLUG_CPU +static void perf_pmu_rotate_stop(struct pmu *pmu) +{ + struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + + WARN_ON(!irqs_disabled()); + + list_del_init(&cpuctx->rotation_list); +} + static void __perf_event_exit_context(void *__info) { struct perf_event_context *ctx = __info; diff --git a/kernel/sched.c b/kernel/sched.c index 1c3ea7a55b7..794819eab9c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3584,6 +3584,8 @@ void scheduler_tick(void) curr->sched_class->task_tick(rq, curr, 0); raw_spin_unlock(&rq->lock); + perf_event_task_tick(); + #ifdef CONFIG_SMP rq->idle_at_tick = idle_cpu(cpu); trigger_load_balance(rq, cpu); -- cgit v1.2.3-70-g09d2 From 1dcc41bb037533839753df983d31778b30b67d93 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 14 Sep 2010 21:43:46 +0900 Subject: futex: Change 3rd arg of fetch_robust_entry() to unsigned int* Sparse complains: kernel/futex.c:2495:59: warning: incorrect type in argument 3 (different signedness) Make 3rd argument of fetch_robust_entry() 'unsigned int'. Signed-off-by: Namhyung Kim Cc: Peter Zijlstra Cc: Darren Hart LKML-Reference: <1284468228-8723-1-git-send-email-namhyung@gmail.com> Signed-off-by: Thomas Gleixner --- kernel/futex.c | 2 +- kernel/futex_compat.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 6a3a5fa1526..464de2751ff 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2458,7 +2458,7 @@ retry: */ static inline int fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, - int *pi) + unsigned int *pi) { unsigned long uentry; diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index d49afb2395e..06da4dfc339 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -19,7 +19,7 @@ */ static inline int fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, - compat_uptr_t __user *head, int *pi) + compat_uptr_t __user *head, unsigned int *pi) { if (get_user(*uentry, head)) return -EFAULT; -- cgit v1.2.3-70-g09d2 From a3c74c52570c0c4ac90c9a0216de800c39089ba7 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 14 Sep 2010 21:43:47 +0900 Subject: futex: Mark restart_block.futex.uaddr[2] __user @uaddr and @uaddr2 fields in restart_block.futex are user pointers. Add __user and remove unnecessary casts. Signed-off-by: Namhyung Kim Cc: Peter Zijlstra Cc: Darren Hart LKML-Reference: <1284468228-8723-2-git-send-email-namhyung@gmail.com> Signed-off-by: Thomas Gleixner --- include/linux/thread_info.h | 4 ++-- kernel/futex.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index a8cc4e13434..c9069654417 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -23,12 +23,12 @@ struct restart_block { }; /* For futex_wait and futex_wait_requeue_pi */ struct { - u32 *uaddr; + u32 __user *uaddr; u32 val; u32 flags; u32 bitset; u64 time; - u32 *uaddr2; + u32 __user *uaddr2; } futex; /* For nanosleep */ struct { diff --git a/kernel/futex.c b/kernel/futex.c index 464de2751ff..45e448a5e44 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1843,7 +1843,7 @@ retry: restart = ¤t_thread_info()->restart_block; restart->fn = futex_wait_restart; - restart->futex.uaddr = (u32 *)uaddr; + restart->futex.uaddr = uaddr; restart->futex.val = val; restart->futex.time = abs_time->tv64; restart->futex.bitset = bitset; @@ -1869,7 +1869,7 @@ out: static long futex_wait_restart(struct restart_block *restart) { - u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; + u32 __user *uaddr = restart->futex.uaddr; int fshared = 0; ktime_t t, *tp = NULL; -- cgit v1.2.3-70-g09d2 From 15e408cd6ccc3f4f453d87ccd5bc7a84d59feb96 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 14 Sep 2010 21:43:48 +0900 Subject: futex: Add lock context annotations queue_lock/unlock/me() and unqueue_me_pi() grab/release spinlocks but are missing proper annotations. Add them. Signed-off-by: Namhyung Kim Cc: Peter Zijlstra Cc: Darren Hart LKML-Reference: <1284468228-8723-3-git-send-email-namhyung@gmail.com> Signed-off-by: Thomas Gleixner --- kernel/futex.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 45e448a5e44..92a31d4cd56 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1360,6 +1360,7 @@ out: /* The key must be already stored in q->key. */ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) + __acquires(&hb->lock) { struct futex_hash_bucket *hb; @@ -1373,6 +1374,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) static inline void queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) + __releases(&hb->lock) { spin_unlock(&hb->lock); drop_futex_key_refs(&q->key); @@ -1391,6 +1393,7 @@ queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) * an example). */ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) + __releases(&hb->lock) { int prio; @@ -1471,6 +1474,7 @@ retry: * and dropped here. */ static void unqueue_me_pi(struct futex_q *q) + __releases(q->lock_ptr) { WARN_ON(plist_node_empty(&q->list)); plist_del(&q->list, &q->list.plist); -- cgit v1.2.3-70-g09d2 From 401a8d048eadfbe1b1c1bf53d3b614fcc894c61a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 16 Sep 2010 10:36:00 +0200 Subject: workqueue: cleanup flush/cancel functions Make the following cleanup changes. * Relocate flush/cancel function prototypes and definitions. * Relocate wait_on_cpu_work() and wait_on_work() before try_to_grab_pending(). These will be used to implement flush_work_sync(). * Make all flush/cancel functions return bool instead of int. * Update wait_on_cpu_work() and wait_on_work() to return %true if they actually waited. * Add / update comments. This patch doesn't cause any functional changes. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 18 ++--- kernel/workqueue.c | 175 +++++++++++++++++++++++++--------------------- 2 files changed, 103 insertions(+), 90 deletions(-) (limited to 'kernel') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 07c48925a8f..bb9b683ea6f 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -343,7 +343,6 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, extern void flush_workqueue(struct workqueue_struct *wq); extern void flush_scheduled_work(void); -extern void flush_delayed_work(struct delayed_work *work); extern int schedule_work(struct work_struct *work); extern int schedule_work_on(int cpu, struct work_struct *work); @@ -355,8 +354,11 @@ extern int keventd_up(void); int execute_in_process_context(work_func_t fn, struct execute_work *); -extern int flush_work(struct work_struct *work); -extern int cancel_work_sync(struct work_struct *work); +extern bool flush_work(struct work_struct *work); +extern bool cancel_work_sync(struct work_struct *work); + +extern bool flush_delayed_work(struct delayed_work *dwork); +extern bool cancel_delayed_work_sync(struct delayed_work *dwork); extern void workqueue_set_max_active(struct workqueue_struct *wq, int max_active); @@ -370,9 +372,9 @@ extern unsigned int work_busy(struct work_struct *work); * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or * cancel_work_sync() to wait on it. */ -static inline int cancel_delayed_work(struct delayed_work *work) +static inline bool cancel_delayed_work(struct delayed_work *work) { - int ret; + bool ret; ret = del_timer_sync(&work->timer); if (ret) @@ -385,9 +387,9 @@ static inline int cancel_delayed_work(struct delayed_work *work) * if it returns 0 the timer function may be running and the queueing is in * progress. */ -static inline int __cancel_delayed_work(struct delayed_work *work) +static inline bool __cancel_delayed_work(struct delayed_work *work) { - int ret; + bool ret; ret = del_timer(&work->timer); if (ret) @@ -395,8 +397,6 @@ static inline int __cancel_delayed_work(struct delayed_work *work) return ret; } -extern int cancel_delayed_work_sync(struct delayed_work *work); - /* Obsolete. use cancel_delayed_work_sync() */ static inline void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f77afd93922..1240b9d94b0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2327,16 +2327,24 @@ out_unlock: EXPORT_SYMBOL_GPL(flush_workqueue); /** - * flush_work - block until a work_struct's callback has terminated - * @work: the work which is to be flushed + * flush_work - wait for a work to finish executing the last queueing instance + * @work: the work to flush * - * Returns false if @work has already terminated. + * Wait until @work has finished execution. This function considers + * only the last queueing instance of @work. If @work has been + * enqueued across different CPUs on a non-reentrant workqueue or on + * multiple workqueues, @work might still be executing on return on + * some of the CPUs from earlier queueing. * - * It is expected that, prior to calling flush_work(), the caller has - * arranged for the work to not be requeued, otherwise it doesn't make - * sense to use this function. + * If @work was queued only on a non-reentrant, ordered or unbound + * workqueue, @work is guaranteed to be idle on return if it hasn't + * been requeued since flush started. + * + * RETURNS: + * %true if flush_work() waited for the work to finish execution, + * %false if it was already idle. */ -int flush_work(struct work_struct *work) +bool flush_work(struct work_struct *work) { struct worker *worker = NULL; struct global_cwq *gcwq; @@ -2374,13 +2382,49 @@ int flush_work(struct work_struct *work) wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); - return 1; + return true; already_gone: spin_unlock_irq(&gcwq->lock); - return 0; + return false; } EXPORT_SYMBOL_GPL(flush_work); +static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) +{ + struct wq_barrier barr; + struct worker *worker; + + spin_lock_irq(&gcwq->lock); + + worker = find_worker_executing_work(gcwq, work); + if (unlikely(worker)) + insert_wq_barrier(worker->current_cwq, &barr, work, worker); + + spin_unlock_irq(&gcwq->lock); + + if (unlikely(worker)) { + wait_for_completion(&barr.done); + destroy_work_on_stack(&barr.work); + return true; + } else + return false; +} + +static bool wait_on_work(struct work_struct *work) +{ + bool ret = false; + int cpu; + + might_sleep(); + + lock_map_acquire(&work->lockdep_map); + lock_map_release(&work->lockdep_map); + + for_each_gcwq_cpu(cpu) + ret |= wait_on_cpu_work(get_gcwq(cpu), work); + return ret; +} + /* * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, * so this work can't be re-armed in any way. @@ -2423,39 +2467,7 @@ static int try_to_grab_pending(struct work_struct *work) return ret; } -static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) -{ - struct wq_barrier barr; - struct worker *worker; - - spin_lock_irq(&gcwq->lock); - - worker = find_worker_executing_work(gcwq, work); - if (unlikely(worker)) - insert_wq_barrier(worker->current_cwq, &barr, work, worker); - - spin_unlock_irq(&gcwq->lock); - - if (unlikely(worker)) { - wait_for_completion(&barr.done); - destroy_work_on_stack(&barr.work); - } -} - -static void wait_on_work(struct work_struct *work) -{ - int cpu; - - might_sleep(); - - lock_map_acquire(&work->lockdep_map); - lock_map_release(&work->lockdep_map); - - for_each_gcwq_cpu(cpu) - wait_on_cpu_work(get_gcwq(cpu), work); -} - -static int __cancel_work_timer(struct work_struct *work, +static bool __cancel_work_timer(struct work_struct *work, struct timer_list* timer) { int ret; @@ -2472,42 +2484,60 @@ static int __cancel_work_timer(struct work_struct *work, } /** - * cancel_work_sync - block until a work_struct's callback has terminated - * @work: the work which is to be flushed - * - * Returns true if @work was pending. + * cancel_work_sync - cancel a work and wait for it to finish + * @work: the work to cancel * - * cancel_work_sync() will cancel the work if it is queued. If the work's - * callback appears to be running, cancel_work_sync() will block until it - * has completed. + * Cancel @work and wait for its execution to finish. This function + * can be used even if the work re-queues itself or migrates to + * another workqueue. On return from this function, @work is + * guaranteed to be not pending or executing on any CPU. * - * It is possible to use this function if the work re-queues itself. It can - * cancel the work even if it migrates to another workqueue, however in that - * case it only guarantees that work->func() has completed on the last queued - * workqueue. - * - * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not - * pending, otherwise it goes into a busy-wait loop until the timer expires. + * cancel_work_sync(&delayed_work->work) must not be used for + * delayed_work's. Use cancel_delayed_work_sync() instead. * - * The caller must ensure that workqueue_struct on which this work was last + * The caller must ensure that the workqueue on which @work was last * queued can't be destroyed before this function returns. + * + * RETURNS: + * %true if @work was pending, %false otherwise. */ -int cancel_work_sync(struct work_struct *work) +bool cancel_work_sync(struct work_struct *work) { return __cancel_work_timer(work, NULL); } EXPORT_SYMBOL_GPL(cancel_work_sync); /** - * cancel_delayed_work_sync - reliably kill off a delayed work. - * @dwork: the delayed work struct + * flush_delayed_work - wait for a dwork to finish executing the last queueing + * @dwork: the delayed work to flush + * + * Delayed timer is cancelled and the pending work is queued for + * immediate execution. Like flush_work(), this function only + * considers the last queueing instance of @dwork. + * + * RETURNS: + * %true if flush_work() waited for the work to finish execution, + * %false if it was already idle. + */ +bool flush_delayed_work(struct delayed_work *dwork) +{ + if (del_timer_sync(&dwork->timer)) + __queue_work(raw_smp_processor_id(), + get_work_cwq(&dwork->work)->wq, &dwork->work); + return flush_work(&dwork->work); +} +EXPORT_SYMBOL(flush_delayed_work); + +/** + * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish + * @dwork: the delayed work cancel * - * Returns true if @dwork was pending. + * This is cancel_work_sync() for delayed works. * - * It is possible to use this function if @dwork rearms itself via queue_work() - * or queue_delayed_work(). See also the comment for cancel_work_sync(). + * RETURNS: + * %true if @dwork was pending, %false otherwise. */ -int cancel_delayed_work_sync(struct delayed_work *dwork) +bool cancel_delayed_work_sync(struct delayed_work *dwork) { return __cancel_work_timer(&dwork->work, &dwork->timer); } @@ -2558,23 +2588,6 @@ int schedule_delayed_work(struct delayed_work *dwork, } EXPORT_SYMBOL(schedule_delayed_work); -/** - * flush_delayed_work - block until a dwork_struct's callback has terminated - * @dwork: the delayed work which is to be flushed - * - * Any timeout is cancelled, and any pending work is run immediately. - */ -void flush_delayed_work(struct delayed_work *dwork) -{ - if (del_timer_sync(&dwork->timer)) { - __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq, - &dwork->work); - put_cpu(); - } - flush_work(&dwork->work); -} -EXPORT_SYMBOL(flush_delayed_work); - /** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use -- cgit v1.2.3-70-g09d2 From baf59022c37d43f202e62d5130e4bac5e825b426 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 16 Sep 2010 10:42:16 +0200 Subject: workqueue: factor out start_flush_work() Factor out start_flush_work() from flush_work(). start_flush_work() has @wait_executing argument which controls whether the barrier is queued only if the work is pending or also if executing. As flush_work() needs to wait for execution too, it uses %true. This commit doesn't cause any behavior difference. start_flush_work() will be used to implement flush_work_sync(). Signed-off-by: Tejun Heo --- kernel/workqueue.c | 64 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 37 insertions(+), 27 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1240b9d94b0..33d31d76870 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2326,35 +2326,17 @@ out_unlock: } EXPORT_SYMBOL_GPL(flush_workqueue); -/** - * flush_work - wait for a work to finish executing the last queueing instance - * @work: the work to flush - * - * Wait until @work has finished execution. This function considers - * only the last queueing instance of @work. If @work has been - * enqueued across different CPUs on a non-reentrant workqueue or on - * multiple workqueues, @work might still be executing on return on - * some of the CPUs from earlier queueing. - * - * If @work was queued only on a non-reentrant, ordered or unbound - * workqueue, @work is guaranteed to be idle on return if it hasn't - * been requeued since flush started. - * - * RETURNS: - * %true if flush_work() waited for the work to finish execution, - * %false if it was already idle. - */ -bool flush_work(struct work_struct *work) +static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, + bool wait_executing) { struct worker *worker = NULL; struct global_cwq *gcwq; struct cpu_workqueue_struct *cwq; - struct wq_barrier barr; might_sleep(); gcwq = get_work_gcwq(work); if (!gcwq) - return 0; + return false; spin_lock_irq(&gcwq->lock); if (!list_empty(&work->entry)) { @@ -2367,26 +2349,54 @@ bool flush_work(struct work_struct *work) cwq = get_work_cwq(work); if (unlikely(!cwq || gcwq != cwq->gcwq)) goto already_gone; - } else { + } else if (wait_executing) { worker = find_worker_executing_work(gcwq, work); if (!worker) goto already_gone; cwq = worker->current_cwq; - } + } else + goto already_gone; - insert_wq_barrier(cwq, &barr, work, worker); + insert_wq_barrier(cwq, barr, work, worker); spin_unlock_irq(&gcwq->lock); lock_map_acquire(&cwq->wq->lockdep_map); lock_map_release(&cwq->wq->lockdep_map); - - wait_for_completion(&barr.done); - destroy_work_on_stack(&barr.work); return true; already_gone: spin_unlock_irq(&gcwq->lock); return false; } + +/** + * flush_work - wait for a work to finish executing the last queueing instance + * @work: the work to flush + * + * Wait until @work has finished execution. This function considers + * only the last queueing instance of @work. If @work has been + * enqueued across different CPUs on a non-reentrant workqueue or on + * multiple workqueues, @work might still be executing on return on + * some of the CPUs from earlier queueing. + * + * If @work was queued only on a non-reentrant, ordered or unbound + * workqueue, @work is guaranteed to be idle on return if it hasn't + * been requeued since flush started. + * + * RETURNS: + * %true if flush_work() waited for the work to finish execution, + * %false if it was already idle. + */ +bool flush_work(struct work_struct *work) +{ + struct wq_barrier barr; + + if (start_flush_work(work, &barr, true)) { + wait_for_completion(&barr.done); + destroy_work_on_stack(&barr.work); + return true; + } else + return false; +} EXPORT_SYMBOL_GPL(flush_work); static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) -- cgit v1.2.3-70-g09d2 From 09383498c5d35262e643bfdbae84826177a3c624 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 16 Sep 2010 10:48:29 +0200 Subject: workqueue: implement flush[_delayed]_work_sync() Implement flush[_delayed]_work_sync(). These are flush functions which also make sure no CPU is still executing the target work from earlier queueing instances. These are similar to cancel[_delayed]_work_sync() except that the target work item is flushed instead of cancelled. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 2 ++ kernel/workqueue.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) (limited to 'kernel') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index bb9b683ea6f..e33ff4a9170 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -355,9 +355,11 @@ extern int keventd_up(void); int execute_in_process_context(work_func_t fn, struct execute_work *); extern bool flush_work(struct work_struct *work); +extern bool flush_work_sync(struct work_struct *work); extern bool cancel_work_sync(struct work_struct *work); extern bool flush_delayed_work(struct delayed_work *dwork); +extern bool flush_delayed_work_sync(struct delayed_work *work); extern bool cancel_delayed_work_sync(struct delayed_work *dwork); extern void workqueue_set_max_active(struct workqueue_struct *wq, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 33d31d76870..19e4bc15ee9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2435,6 +2435,41 @@ static bool wait_on_work(struct work_struct *work) return ret; } +/** + * flush_work_sync - wait until a work has finished execution + * @work: the work to flush + * + * Wait until @work has finished execution. On return, it's + * guaranteed that all queueing instances of @work which happened + * before this function is called are finished. In other words, if + * @work hasn't been requeued since this function was called, @work is + * guaranteed to be idle on return. + * + * RETURNS: + * %true if flush_work_sync() waited for the work to finish execution, + * %false if it was already idle. + */ +bool flush_work_sync(struct work_struct *work) +{ + struct wq_barrier barr; + bool pending, waited; + + /* we'll wait for executions separately, queue barr only if pending */ + pending = start_flush_work(work, &barr, false); + + /* wait for executions to finish */ + waited = wait_on_work(work); + + /* wait for the pending one */ + if (pending) { + wait_for_completion(&barr.done); + destroy_work_on_stack(&barr.work); + } + + return pending || waited; +} +EXPORT_SYMBOL_GPL(flush_work_sync); + /* * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, * so this work can't be re-armed in any way. @@ -2538,6 +2573,27 @@ bool flush_delayed_work(struct delayed_work *dwork) } EXPORT_SYMBOL(flush_delayed_work); +/** + * flush_delayed_work_sync - wait for a dwork to finish + * @dwork: the delayed work to flush + * + * Delayed timer is cancelled and the pending work is queued for + * execution immediately. Other than timer handling, its behavior + * is identical to flush_work_sync(). + * + * RETURNS: + * %true if flush_work_sync() waited for the work to finish execution, + * %false if it was already idle. + */ +bool flush_delayed_work_sync(struct delayed_work *dwork) +{ + if (del_timer_sync(&dwork->timer)) + __queue_work(raw_smp_processor_id(), + get_work_cwq(&dwork->work)->wq, &dwork->work); + return flush_work_sync(&dwork->work); +} +EXPORT_SYMBOL(flush_delayed_work_sync); + /** * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish * @dwork: the delayed work cancel -- cgit v1.2.3-70-g09d2 From 41945f6ccf1e86f87fddf6b32db9cf431c05fb54 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 16 Sep 2010 19:17:24 +0200 Subject: perf: Avoid RCU vs preemption assumptions The per-pmu per-cpu context patch converted things from get_cpu_var() to this_cpu_ptr(), but that only works if rcu_read_lock() actually disables preemption, and since there is no such guarantee, we need to fix that. Use the newly introduced {get,put}_cpu_ptr(). Signed-off-by: Peter Zijlstra Cc: Tejun Heo LKML-Reference: <20100917093009.308453028@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index baae1367e94..c16158c77df 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3836,18 +3836,20 @@ static void perf_event_task_event(struct perf_task_event *task_event) rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { - cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); perf_event_task_ctx(&cpuctx->ctx, task_event); ctx = task_event->task_ctx; if (!ctx) { ctxn = pmu->task_ctx_nr; if (ctxn < 0) - continue; + goto next; ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); } if (ctx) perf_event_task_ctx(ctx, task_event); +next: + put_cpu_ptr(pmu->pmu_cpu_context); } rcu_read_unlock(); } @@ -3969,16 +3971,18 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { - cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); perf_event_comm_ctx(&cpuctx->ctx, comm_event); ctxn = pmu->task_ctx_nr; if (ctxn < 0) - continue; + goto next; ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); if (ctx) perf_event_comm_ctx(ctx, comm_event); +next: + put_cpu_ptr(pmu->pmu_cpu_context); } rcu_read_unlock(); } @@ -4152,19 +4156,21 @@ got_name: rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { - cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC); ctxn = pmu->task_ctx_nr; if (ctxn < 0) - continue; + goto next; ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); if (ctx) { perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC); } +next: + put_cpu_ptr(pmu->pmu_cpu_context); } rcu_read_unlock(); -- cgit v1.2.3-70-g09d2 From 58b26c4c025778c09c7a1438ff185080e11b7d0a Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Fri, 10 Sep 2010 18:19:17 -0700 Subject: sched: Increment cache_nice_tries only on periodic lb scheduler uses cache_nice_tries as an indicator to do cache_hot and active load balance, when normal load balance fails. Currently, this value is changed on any failed load balance attempt. That ends up being not so nice to workloads that enter/exit idle often, as they do more frequent new_idle balance and that pretty soon results in cache hot tasks being pulled in. Making the cache_nice_tries ignore failed new_idle balance seems to make better sense. With that only the failed load balance in periodic load balance gets accounted and the rate of accumulation of cache_nice_tries will not depend on idle entry/exit (short running sleep-wakeup kind of tasks). This reduces movement of cache_hot tasks. schedstat diff (after-before) excerpt from a workload that has frequent and short wakeup-idle pattern (:2 in cpu col below refers to NEWIDLE idx) This snapshot was across ~400 seconds. Without this change: domainstats: domain0 cpu cnt bln fld imb gain hgain nobusyq nobusyg 0:2 306487 219575 73167 110069413 44583 19070 1172 218403 1:2 292139 194853 81421 120893383 50745 21902 1259 193594 2:2 283166 174607 91359 129699642 54931 23688 1287 173320 3:2 273998 161788 93991 132757146 57122 24351 1366 160422 4:2 289851 215692 62190 83398383 36377 13680 851 214841 5:2 316312 222146 77605 117582154 49948 20281 988 221158 6:2 297172 195596 83623 122133390 52801 21301 929 194667 7:2 283391 178078 86378 126622761 55122 22239 928 177150 8:2 297655 210359 72995 110246694 45798 19777 1125 209234 9:2 297357 202011 79363 119753474 50953 22088 1089 200922 10:2 278797 178703 83180 122514385 52969 22726 1128 177575 11:2 272661 167669 86978 127342327 55857 24342 1195 166474 12:2 293039 204031 73211 110282059 47285 19651 948 203083 13:2 289502 196762 76803 114712942 49339 20547 1016 195746 14:2 264446 169609 78292 115715605 50459 21017 982 168627 15:2 260968 163660 80142 116811793 51483 21281 1064 162596 With this change: domainstats: domain0 cpu cnt bln fld imb gain hgain nobusyq nobusyg 0:2 272347 187380 77455 105420270 24975 1 953 186427 1:2 267276 172360 86234 116242264 28087 6 1028 171332 2:2 259769 156777 93281 123243134 30555 1 1043 155734 3:2 250870 143129 97627 127370868 32026 6 1188 141941 4:2 248422 177116 64096 78261112 22202 2 757 176359 5:2 275595 180683 84950 116075022 29400 6 778 179905 6:2 262418 162609 88944 119256898 31056 4 817 161792 7:2 252204 147946 92646 122388300 32879 4 824 147122 8:2 262335 172239 81631 110477214 26599 4 864 171375 9:2 261563 164775 88016 117203621 28331 3 849 163926 10:2 243389 140949 93379 121353071 29585 2 909 140040 11:2 242795 134651 98310 124768957 30895 2 1016 133635 12:2 255234 166622 79843 104696912 26483 4 746 165876 13:2 244944 151595 83855 109808099 27787 3 801 150794 14:2 241301 140982 89935 116954383 30403 6 845 140137 15:2 232271 128564 92821 119185207 31207 4 1416 127148 Signed-off-by: Venkatesh Pallipadi Signed-off-by: Peter Zijlstra LKML-Reference: <1284167957-3675-1-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a171138a940..aa16cf1eb8f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -3031,7 +3031,14 @@ redo: if (!ld_moved) { schedstat_inc(sd, lb_failed[idle]); - sd->nr_balance_failed++; + /* + * Increment the failure counter only on periodic balance. + * We do not want newidle balance, which can be very + * frequent, pollute the failure counter causing + * excessive cache_hot migrations and active balances. + */ + if (idle != CPU_NEWLY_IDLE) + sd->nr_balance_failed++; if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest), this_cpu)) { -- cgit v1.2.3-70-g09d2 From 43fa5460fe60dea5c610490a1d263415419c60f6 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 20 Sep 2010 22:40:03 -0400 Subject: sched: Try not to migrate higher priority RT tasks When first working on the RT scheduler design, we concentrated on keeping all CPUs running RT tasks instead of having multiple RT tasks on a single CPU waiting for the migration thread to move them. Instead we take a more proactive stance and push or pull RT tasks from one CPU to another on wakeup or scheduling. When an RT task wakes up on a CPU that is running another RT task, instead of preempting it and killing the cache of the running RT task, we look to see if we can migrate the RT task that is waking up, even if the RT task waking up is of higher priority. This may sound a bit odd, but RT tasks should be limited in migration by the user anyway. But in practice, people do not do this, which causes high prio RT tasks to bounce around the CPUs. This becomes even worse when we have priority inheritance, because a high prio task can block on a lower prio task and boost its priority. When the lower prio task wakes up the high prio task, if it happens to be on the same CPU it will migrate off of it. But in reality, the above does not happen much either, because the wake up of the lower prio task, which has already been boosted, if it was on the same CPU as the higher prio task, it would then migrate off of it. But anyway, we do not want to migrate them either. To examine the scheduling, I created a test program and examined it under kernelshark. The test program created CPU * 2 threads, where each thread had a different priority. The program takes different options. The options used in this change log was to have priority inheritance mutexes or not. All threads did the following loop: static void grab_lock(long id, int iter, int l) { ftrace_write("thread %ld iter %d, taking lock %d\n", id, iter, l); pthread_mutex_lock(&locks[l]); ftrace_write("thread %ld iter %d, took lock %d\n", id, iter, l); busy_loop(nr_tasks - id); ftrace_write("thread %ld iter %d, unlock lock %d\n", id, iter, l); pthread_mutex_unlock(&locks[l]); } void *start_task(void *id) { [...] while (!done) { for (l = 0; l < nr_locks; l++) { grab_lock(id, i, l); ftrace_write("thread %ld iter %d sleeping\n", id, i); ms_sleep(id); } i++; } [...] } The busy_loop(ms) keeps the CPU spinning for ms milliseconds. The ms_sleep(ms) sleeps for ms milliseconds. The ftrace_write() writes to the ftrace buffer to help analyze via ftrace. The higher the id, the higher the prio, the shorter it does the busy loop, but the longer it spins. This is usually the case with RT tasks, the lower priority tasks usually run longer than higher priority tasks. At the end of the test, it records the number of loops each thread took, as well as the number of voluntary preemptions, non-voluntary preemptions, and number of migrations each thread took, taking the information from /proc/$$/sched and /proc/$$/status. Running this on a 4 CPU processor, the results without changes to the kernel looked like this: Task vol nonvol migrated iterations ---- --- ------ -------- ---------- 0: 53 3220 1470 98 1: 562 773 724 98 2: 752 933 1375 98 3: 749 39 697 98 4: 758 5 515 98 5: 764 2 679 99 6: 761 2 535 99 7: 757 3 346 99 total: 5156 4977 6341 787 Each thread regardless of priority migrated a few hundred times. The higher priority tasks, were a little better but still took quite an impact. By letting higher priority tasks bump the lower prio task from the CPU, things changed a bit: Task vol nonvol migrated iterations ---- --- ------ -------- ---------- 0: 37 2835 1937 98 1: 666 1821 1865 98 2: 654 1003 1385 98 3: 664 635 973 99 4: 698 197 352 99 5: 703 101 159 99 6: 708 1 75 99 7: 713 1 2 99 total: 4843 6594 6748 789 The total # of migrations did not change (several runs showed the difference all within the noise). But we now see a dramatic improvement to the higher priority tasks. (kernelshark showed that the watchdog timer bumped the highest priority task to give it the 2 count. This was actually consistent with every run). Notice that the # of iterations did not change either. The above was with priority inheritance mutexes. That is, when the higher prority task blocked on a lower priority task, the lower priority task would inherit the higher priority task (which shows why task 6 was bumped so many times). When not using priority inheritance mutexes, the current kernel shows this: Task vol nonvol migrated iterations ---- --- ------ -------- ---------- 0: 56 3101 1892 95 1: 594 713 937 95 2: 625 188 618 95 3: 628 4 491 96 4: 640 7 468 96 5: 631 2 501 96 6: 641 1 466 96 7: 643 2 497 96 total: 4458 4018 5870 765 Not much changed with or without priority inheritance mutexes. But if we let the high priority task bump lower priority tasks on wakeup we see: Task vol nonvol migrated iterations ---- --- ------ -------- ---------- 0: 115 3439 2782 98 1: 633 1354 1583 99 2: 652 919 1218 99 3: 645 713 934 99 4: 690 3 3 99 5: 694 1 4 99 6: 720 3 4 99 7: 747 0 1 100 Which shows a even bigger change. The big difference between task 3 and task 4 is because we have only 4 CPUs on the machine, causing the 4 highest prio tasks to always have preference. Although I did not measure cache misses, and I'm sure there would be little to measure since the test was not data intensive, I could imagine large improvements for higher priority tasks when dealing with lower priority tasks. Thus, I'm satisfied with making the change and agreeing with what Gregory Haskins argued a few years ago when we first had this discussion. One final note. All tasks in the above tests were RT tasks. Any RT task will always preempt a non RT task that is running on the CPU the RT task wants to run on. Signed-off-by: Steven Rostedt Signed-off-by: Peter Zijlstra Cc: Gregory Haskins LKML-Reference: <20100921024138.605460343@goodmis.org> Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index d10c80ebb67..6a02b38ab65 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -960,18 +960,18 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) * runqueue. Otherwise simply start this RT task * on its current runqueue. * - * We want to avoid overloading runqueues. Even if - * the RT task is of higher priority than the current RT task. - * RT tasks behave differently than other tasks. If - * one gets preempted, we try to push it off to another queue. - * So trying to keep a preempting RT task on the same - * cache hot CPU will force the running RT task to - * a cold CPU. So we waste all the cache for the lower - * RT task in hopes of saving some of a RT task - * that is just being woken and probably will have - * cold cache anyway. + * We want to avoid overloading runqueues. If the woken + * task is a higher priority, then it will stay on this CPU + * and the lower prio task should be moved to another CPU. + * Even though this will probably make the lower prio task + * lose its cache, we do not want to bounce a higher task + * around just because it gave up its CPU, perhaps for a + * lock? + * + * For equal prio tasks, we just let the scheduler sort it out. */ if (unlikely(rt_task(rq->curr)) && + rq->curr->prio < p->prio && (p->rt.nr_cpus_allowed > 1)) { int cpu = find_lowest_rq(p); @@ -1491,6 +1491,8 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && has_pushable_tasks(rq) && + rt_task(rq->curr) && + rq->curr->prio < p->prio && p->rt.nr_cpus_allowed > 1) push_rt_tasks(rq); } -- cgit v1.2.3-70-g09d2 From b3bc211cfe7d5fe94b310480d78e00bea96fbf2a Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 20 Sep 2010 22:40:04 -0400 Subject: sched: Give CPU bound RT tasks preference If a high priority task is waking up on a CPU that is running a lower priority task that is bound to a CPU, see if we can move the high RT task to another CPU first. Note, if all other CPUs are running higher priority tasks than the CPU bounded current task, then it will be preempted regardless. Signed-off-by: Steven Rostedt Signed-off-by: Peter Zijlstra Cc: Gregory Haskins LKML-Reference: <20100921024138.888922071@goodmis.org> Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 6a02b38ab65..baef30f0840 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -971,7 +971,8 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) * For equal prio tasks, we just let the scheduler sort it out. */ if (unlikely(rt_task(rq->curr)) && - rq->curr->prio < p->prio && + (rq->curr->rt.nr_cpus_allowed < 2 || + rq->curr->prio < p->prio) && (p->rt.nr_cpus_allowed > 1)) { int cpu = find_lowest_rq(p); @@ -1491,9 +1492,10 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && has_pushable_tasks(rq) && + p->rt.nr_cpus_allowed > 1 && rt_task(rq->curr) && - rq->curr->prio < p->prio && - p->rt.nr_cpus_allowed > 1) + (rq->curr->rt.nr_cpus_allowed < 2 || + rq->curr->prio < p->prio)) push_rt_tasks(rq); } -- cgit v1.2.3-70-g09d2 From a8027073eb127cd207070891374b5c54c2ce3d23 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 20 Sep 2010 15:13:34 -0400 Subject: tracing/sched: Add sched_pi_setprio tracepoint Add a tracepoint that shows the priority of a task being boosted via priority inheritance. Cc: Gregory Haskins Acked-by: Peter Zijlstra Signed-off-by: Steven Rostedt --- include/trace/events/sched.h | 29 +++++++++++++++++++++++++++++ kernel/sched.c | 1 + 2 files changed, 30 insertions(+) (limited to 'kernel') diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 9208c92aeab..f6334782a59 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -362,6 +362,35 @@ TRACE_EVENT(sched_stat_runtime, (unsigned long long)__entry->vruntime) ); +/* + * Tracepoint for showing priority inheritance modifying a tasks + * priority. + */ +TRACE_EVENT(sched_pi_setprio, + + TP_PROTO(struct task_struct *tsk, int newprio), + + TP_ARGS(tsk, newprio), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, oldprio ) + __field( int, newprio ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->oldprio = tsk->prio; + __entry->newprio = newprio; + ), + + TP_printk("comm=%s pid=%d oldprio=%d newprio=%d", + __entry->comm, __entry->pid, + __entry->oldprio, __entry->newprio) +); + #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/kernel/sched.c b/kernel/sched.c index 9ca8ad05950..4ad47381435 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4355,6 +4355,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) rq = task_rq_lock(p, &flags); + trace_sched_pi_setprio(p, prio); oldprio = p->prio; prev_class = p->sched_class; on_rq = p->se.on_rq; -- cgit v1.2.3-70-g09d2 From 676cb02dc32adef13d9efb5ea52079e4ede1e3ec Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 20 Jul 2009 23:33:49 +0200 Subject: softirqs: Make wakeup_softirqd static No users outside of kernel/softirq.c Signed-off-by: Thomas Gleixner --- include/linux/interrupt.h | 1 - kernel/softirq.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a0384a4d1e6..0a9141e6924 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -410,7 +410,6 @@ extern void softirq_init(void); #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); -extern void wakeup_softirqd(void); /* This is the worklist that queues up per-cpu softirq work. * diff --git a/kernel/softirq.c b/kernel/softirq.c index 07b4f1b1a73..80f6e3bd1d2 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -67,7 +67,7 @@ char *softirq_to_name[NR_SOFTIRQS] = { * to the pending events, so lets the scheduler to balance * the softirq load for us. */ -void wakeup_softirqd(void) +static void wakeup_softirqd(void) { /* Interrupts are disabled: no need to stop preemption */ struct task_struct *tsk = __get_cpu_var(ksoftirqd); -- cgit v1.2.3-70-g09d2 From bf5438fca2950b03c21ad868090cc1a8fcd49536 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 17 Sep 2010 11:09:00 -0400 Subject: jump label: Base patch for jump label base patch to implement 'jump labeling'. Based on a new 'asm goto' inline assembly gcc mechanism, we can now branch to labels from an 'asm goto' statment. This allows us to create a 'no-op' fastpath, which can subsequently be patched with a jump to the slowpath code. This is useful for code which might be rarely used, but which we'd like to be able to call, if needed. Tracepoints are the current usecase that these are being implemented for. Acked-by: David S. Miller Signed-off-by: Jason Baron LKML-Reference: [ cleaned up some formating ] Signed-off-by: Steven Rostedt --- Makefile | 5 + arch/Kconfig | 3 + arch/x86/include/asm/alternative.h | 3 +- arch/x86/kernel/alternative.c | 2 +- include/asm-generic/vmlinux.lds.h | 10 ++ include/linux/jump_label.h | 58 +++++++ include/linux/module.h | 5 +- kernel/Makefile | 2 +- kernel/jump_label.c | 346 +++++++++++++++++++++++++++++++++++++ kernel/kprobes.c | 1 + kernel/module.c | 6 + scripts/gcc-goto.sh | 5 + 12 files changed, 442 insertions(+), 4 deletions(-) create mode 100644 include/linux/jump_label.h create mode 100644 kernel/jump_label.c create mode 100644 scripts/gcc-goto.sh (limited to 'kernel') diff --git a/Makefile b/Makefile index 92ab33f16cf..a906378d505 100644 --- a/Makefile +++ b/Makefile @@ -591,6 +591,11 @@ KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) # conserve stack if available KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) +# check for 'asm goto' +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) + KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO +endif + # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments # But warn user when we do so warn-assign = \ diff --git a/arch/Kconfig b/arch/Kconfig index 4877a8c8ee1..1462d8492d8 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -158,4 +158,7 @@ config HAVE_PERF_EVENTS_NMI subsystem. Also has support for calculating CPU cycle events to determine how many clock cycles in a given period. +config HAVE_ARCH_JUMP_LABEL + bool + source "kernel/gcov/Kconfig" diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 634bf782dca..76561d20ea2 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -4,6 +4,7 @@ #include #include #include +#include #include /* @@ -182,7 +183,7 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len); extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke_smp(void *addr, const void *opcode, size_t len); -#if defined(CONFIG_DYNAMIC_FTRACE) +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) #define IDEAL_NOP_SIZE_5 5 extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; extern void arch_init_ideal_nop5(void); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 083bd010d92..cb0e6d385f6 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -641,7 +641,7 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len) return addr; } -#if defined(CONFIG_DYNAMIC_FTRACE) +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8a92a170fb7..ef2af9948ea 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -220,6 +220,8 @@ \ BUG_TABLE \ \ + JUMP_TABLE \ + \ /* PCI quirks */ \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ @@ -563,6 +565,14 @@ #define BUG_TABLE #endif +#define JUMP_TABLE \ + . = ALIGN(8); \ + __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___jump_table) = .; \ + *(__jump_table) \ + VMLINUX_SYMBOL(__stop___jump_table) = .; \ + } + #ifdef CONFIG_PM_TRACE #define TRACEDATA \ . = ALIGN(4); \ diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h new file mode 100644 index 00000000000..de58656d28e --- /dev/null +++ b/include/linux/jump_label.h @@ -0,0 +1,58 @@ +#ifndef _LINUX_JUMP_LABEL_H +#define _LINUX_JUMP_LABEL_H + +#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_HAVE_ARCH_JUMP_LABEL) +# include +# define HAVE_JUMP_LABEL +#endif + +enum jump_label_type { + JUMP_LABEL_ENABLE, + JUMP_LABEL_DISABLE +}; + +struct module; + +#ifdef HAVE_JUMP_LABEL + +extern struct jump_entry __start___jump_table[]; +extern struct jump_entry __stop___jump_table[]; + +extern void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type); +extern void jump_label_update(unsigned long key, enum jump_label_type type); +extern void jump_label_apply_nops(struct module *mod); +extern void arch_jump_label_text_poke_early(jump_label_t addr); + +#define enable_jump_label(key) \ + jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); + +#define disable_jump_label(key) \ + jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); + +#else + +#define JUMP_LABEL(key, label) \ +do { \ + if (unlikely(*key)) \ + goto label; \ +} while (0) + +#define enable_jump_label(cond_var) \ +do { \ + *(cond_var) = 1; \ +} while (0) + +#define disable_jump_label(cond_var) \ +do { \ + *(cond_var) = 0; \ +} while (0) + +static inline int jump_label_apply_nops(struct module *mod) +{ + return 0; +} + +#endif + +#endif diff --git a/include/linux/module.h b/include/linux/module.h index 8a6b9fdc7ff..403ac26023c 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -350,7 +350,10 @@ struct module struct tracepoint *tracepoints; unsigned int num_tracepoints; #endif - +#ifdef HAVE_JUMP_LABEL + struct jump_entry *jump_entries; + unsigned int num_jump_entries; +#endif #ifdef CONFIG_TRACING const char **trace_bprintk_fmt_start; unsigned int num_trace_bprintk_fmt; diff --git a/kernel/Makefile b/kernel/Makefile index 0b72d1a74be..d52b473c99a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -10,7 +10,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ - async.o range.o + async.o range.o jump_label.o obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o obj-y += groups.o diff --git a/kernel/jump_label.c b/kernel/jump_label.c new file mode 100644 index 00000000000..460fd40112b --- /dev/null +++ b/kernel/jump_label.c @@ -0,0 +1,346 @@ +/* + * jump label support + * + * Copyright (C) 2009 Jason Baron + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_JUMP_LABEL + +#define JUMP_LABEL_HASH_BITS 6 +#define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS) +static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE]; + +/* mutex to protect coming/going of the the jump_label table */ +static DEFINE_MUTEX(jump_label_mutex); + +struct jump_label_entry { + struct hlist_node hlist; + struct jump_entry *table; + int nr_entries; + /* hang modules off here */ + struct hlist_head modules; + unsigned long key; +}; + +struct jump_label_module_entry { + struct hlist_node hlist; + struct jump_entry *table; + int nr_entries; + struct module *mod; +}; + +static int jump_label_cmp(const void *a, const void *b) +{ + const struct jump_entry *jea = a; + const struct jump_entry *jeb = b; + + if (jea->key < jeb->key) + return -1; + + if (jea->key > jeb->key) + return 1; + + return 0; +} + +static void +sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) +{ + unsigned long size; + + size = (((unsigned long)stop - (unsigned long)start) + / sizeof(struct jump_entry)); + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); +} + +static struct jump_label_entry *get_jump_label_entry(jump_label_t key) +{ + struct hlist_head *head; + struct hlist_node *node; + struct jump_label_entry *e; + u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0); + + head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; + hlist_for_each_entry(e, node, head, hlist) { + if (key == e->key) + return e; + } + return NULL; +} + +static struct jump_label_entry * +add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table) +{ + struct hlist_head *head; + struct jump_label_entry *e; + u32 hash; + + e = get_jump_label_entry(key); + if (e) + return ERR_PTR(-EEXIST); + + e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL); + if (!e) + return ERR_PTR(-ENOMEM); + + hash = jhash((void *)&key, sizeof(jump_label_t), 0); + head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; + e->key = key; + e->table = table; + e->nr_entries = nr_entries; + INIT_HLIST_HEAD(&(e->modules)); + hlist_add_head(&e->hlist, head); + return e; +} + +static int +build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop) +{ + struct jump_entry *iter, *iter_begin; + struct jump_label_entry *entry; + int count; + + sort_jump_label_entries(start, stop); + iter = start; + while (iter < stop) { + entry = get_jump_label_entry(iter->key); + if (!entry) { + iter_begin = iter; + count = 0; + while ((iter < stop) && + (iter->key == iter_begin->key)) { + iter++; + count++; + } + entry = add_jump_label_entry(iter_begin->key, + count, iter_begin); + if (IS_ERR(entry)) + return PTR_ERR(entry); + } else { + WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n"); + return -1; + } + } + return 0; +} + +/*** + * jump_label_update - update jump label text + * @key - key value associated with a a jump label + * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE + * + * Will enable/disable the jump for jump label @key, depending on the + * value of @type. + * + */ + +void jump_label_update(unsigned long key, enum jump_label_type type) +{ + struct jump_entry *iter; + struct jump_label_entry *entry; + struct hlist_node *module_node; + struct jump_label_module_entry *e_module; + int count; + + mutex_lock(&jump_label_mutex); + entry = get_jump_label_entry((jump_label_t)key); + if (entry) { + count = entry->nr_entries; + iter = entry->table; + while (count--) { + if (kernel_text_address(iter->code)) + arch_jump_label_transform(iter, type); + iter++; + } + /* eanble/disable jump labels in modules */ + hlist_for_each_entry(e_module, module_node, &(entry->modules), + hlist) { + count = e_module->nr_entries; + iter = e_module->table; + while (count--) { + if (kernel_text_address(iter->code)) + arch_jump_label_transform(iter, type); + iter++; + } + } + } + mutex_unlock(&jump_label_mutex); +} + +static __init int init_jump_label(void) +{ + int ret; + struct jump_entry *iter_start = __start___jump_table; + struct jump_entry *iter_stop = __stop___jump_table; + struct jump_entry *iter; + + mutex_lock(&jump_label_mutex); + ret = build_jump_label_hashtable(__start___jump_table, + __stop___jump_table); + iter = iter_start; + while (iter < iter_stop) { + arch_jump_label_text_poke_early(iter->code); + iter++; + } + mutex_unlock(&jump_label_mutex); + return ret; +} +early_initcall(init_jump_label); + +#ifdef CONFIG_MODULES + +static struct jump_label_module_entry * +add_jump_label_module_entry(struct jump_label_entry *entry, + struct jump_entry *iter_begin, + int count, struct module *mod) +{ + struct jump_label_module_entry *e; + + e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL); + if (!e) + return ERR_PTR(-ENOMEM); + e->mod = mod; + e->nr_entries = count; + e->table = iter_begin; + hlist_add_head(&e->hlist, &entry->modules); + return e; +} + +static int add_jump_label_module(struct module *mod) +{ + struct jump_entry *iter, *iter_begin; + struct jump_label_entry *entry; + struct jump_label_module_entry *module_entry; + int count; + + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return 0; + + sort_jump_label_entries(mod->jump_entries, + mod->jump_entries + mod->num_jump_entries); + iter = mod->jump_entries; + while (iter < mod->jump_entries + mod->num_jump_entries) { + entry = get_jump_label_entry(iter->key); + iter_begin = iter; + count = 0; + while ((iter < mod->jump_entries + mod->num_jump_entries) && + (iter->key == iter_begin->key)) { + iter++; + count++; + } + if (!entry) { + entry = add_jump_label_entry(iter_begin->key, 0, NULL); + if (IS_ERR(entry)) + return PTR_ERR(entry); + } + module_entry = add_jump_label_module_entry(entry, iter_begin, + count, mod); + if (IS_ERR(module_entry)) + return PTR_ERR(module_entry); + } + return 0; +} + +static void remove_jump_label_module(struct module *mod) +{ + struct hlist_head *head; + struct hlist_node *node, *node_next, *module_node, *module_node_next; + struct jump_label_entry *e; + struct jump_label_module_entry *e_module; + int i; + + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return; + + for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { + head = &jump_label_table[i]; + hlist_for_each_entry_safe(e, node, node_next, head, hlist) { + hlist_for_each_entry_safe(e_module, module_node, + module_node_next, + &(e->modules), hlist) { + if (e_module->mod == mod) { + hlist_del(&e_module->hlist); + kfree(e_module); + } + } + if (hlist_empty(&e->modules) && (e->nr_entries == 0)) { + hlist_del(&e->hlist); + kfree(e); + } + } + } +} + +static int +jump_label_module_notify(struct notifier_block *self, unsigned long val, + void *data) +{ + struct module *mod = data; + int ret = 0; + + switch (val) { + case MODULE_STATE_COMING: + mutex_lock(&jump_label_mutex); + ret = add_jump_label_module(mod); + if (ret) + remove_jump_label_module(mod); + mutex_unlock(&jump_label_mutex); + break; + case MODULE_STATE_GOING: + mutex_lock(&jump_label_mutex); + remove_jump_label_module(mod); + mutex_unlock(&jump_label_mutex); + break; + } + return ret; +} + +/*** + * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() + * @mod: module to patch + * + * Allow for run-time selection of the optimal nops. Before the module + * loads patch these with arch_get_jump_label_nop(), which is specified by + * the arch specific jump label code. + */ +void jump_label_apply_nops(struct module *mod) +{ + struct jump_entry *iter; + + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return; + + iter = mod->jump_entries; + while (iter < mod->jump_entries + mod->num_jump_entries) { + arch_jump_label_text_poke_early(iter->code); + iter++; + } +} + +struct notifier_block jump_label_module_nb = { + .notifier_call = jump_label_module_notify, + .priority = 0, +}; + +static __init int init_jump_label_module(void) +{ + return register_module_notifier(&jump_label_module_nb); +} +early_initcall(init_jump_label_module); + +#endif /* CONFIG_MODULES */ + +#endif diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 6dd5359e1f0..18904e42a91 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include diff --git a/kernel/module.c b/kernel/module.c index d0b5f8db11b..eba134157ef 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -55,6 +55,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -2308,6 +2309,11 @@ static void find_module_sections(struct module *mod, struct load_info *info) sizeof(*mod->tracepoints), &mod->num_tracepoints); #endif +#ifdef HAVE_JUMP_LABEL + mod->jump_entries = section_objs(info, "__jump_table", + sizeof(*mod->jump_entries), + &mod->num_jump_entries); +#endif #ifdef CONFIG_EVENT_TRACING mod->trace_events = section_objs(info, "_ftrace_events", sizeof(*mod->trace_events), diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh new file mode 100644 index 00000000000..8e82424be7a --- /dev/null +++ b/scripts/gcc-goto.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Test for gcc 'asm goto' suport +# Copyright (C) 2010, Jason Baron + +echo "int main(void) { entry: asm goto (\"\"::::entry); return 0; }" | $1 -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y" -- cgit v1.2.3-70-g09d2 From e0cf0cd49632552f063fb3ae58691946da45fb2e Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 17 Sep 2010 11:09:04 -0400 Subject: jump label: Initialize workqueue tracepoints *before* they are registered Initialize the workqueue data structures *before* they are registered so that they are ready for callbacks. Signed-off-by: Jason Baron LKML-Reference: Signed-off-by: Steven Rostedt --- kernel/trace/trace_workqueue.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index a7cc3793baf..209b379a472 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -263,6 +263,11 @@ int __init trace_workqueue_early_init(void) { int ret, cpu; + for_each_possible_cpu(cpu) { + spin_lock_init(&workqueue_cpu_stat(cpu)->lock); + INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list); + } + ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL); if (ret) goto out; @@ -279,11 +284,6 @@ int __init trace_workqueue_early_init(void) if (ret) goto no_creation; - for_each_possible_cpu(cpu) { - spin_lock_init(&workqueue_cpu_stat(cpu)->lock); - INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list); - } - return 0; no_creation: -- cgit v1.2.3-70-g09d2 From 4c3ef6d79328c0e23ade60cbfc8d496123a6855c Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 17 Sep 2010 11:09:08 -0400 Subject: jump label: Add jump_label_text_reserved() to reserve jump points Add a jump_label_text_reserved(void *start, void *end), so that other pieces of code that want to modify kernel text, can first verify that jump label has not reserved the instruction. Acked-by: Masami Hiramatsu Signed-off-by: Jason Baron LKML-Reference: <06236663a3a7b1c1f13576bb9eccb6d9c17b7bfe.1284733808.git.jbaron@redhat.com> Signed-off-by: Steven Rostedt --- arch/x86/kernel/kprobes.c | 3 +- include/linux/jump_label.h | 8 ++++- kernel/jump_label.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++ kernel/kprobes.c | 3 +- 4 files changed, 94 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index e05952af5d2..1cbd54c0df9 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -1218,7 +1218,8 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) } /* Check whether the address range is reserved */ if (ftrace_text_reserved(src, src + len - 1) || - alternatives_text_reserved(src, src + len - 1)) + alternatives_text_reserved(src, src + len - 1) || + jump_label_text_reserved(src, src + len - 1)) return -EBUSY; return len; diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index de58656d28e..b72cd9f92c2 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -20,9 +20,10 @@ extern struct jump_entry __stop___jump_table[]; extern void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type); +extern void arch_jump_label_text_poke_early(jump_label_t addr); extern void jump_label_update(unsigned long key, enum jump_label_type type); extern void jump_label_apply_nops(struct module *mod); -extern void arch_jump_label_text_poke_early(jump_label_t addr); +extern int jump_label_text_reserved(void *start, void *end); #define enable_jump_label(key) \ jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); @@ -53,6 +54,11 @@ static inline int jump_label_apply_nops(struct module *mod) return 0; } +static inline int jump_label_text_reserved(void *start, void *end) +{ + return 0; +} + #endif #endif diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 460fd40112b..7be868bf25c 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -177,6 +177,89 @@ void jump_label_update(unsigned long key, enum jump_label_type type) mutex_unlock(&jump_label_mutex); } +static int addr_conflict(struct jump_entry *entry, void *start, void *end) +{ + if (entry->code <= (unsigned long)end && + entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) + return 1; + + return 0; +} + +#ifdef CONFIG_MODULES + +static int module_conflict(void *start, void *end) +{ + struct hlist_head *head; + struct hlist_node *node, *node_next, *module_node, *module_node_next; + struct jump_label_entry *e; + struct jump_label_module_entry *e_module; + struct jump_entry *iter; + int i, count; + int conflict = 0; + + for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { + head = &jump_label_table[i]; + hlist_for_each_entry_safe(e, node, node_next, head, hlist) { + hlist_for_each_entry_safe(e_module, module_node, + module_node_next, + &(e->modules), hlist) { + count = e_module->nr_entries; + iter = e_module->table; + while (count--) { + if (addr_conflict(iter, start, end)) { + conflict = 1; + goto out; + } + iter++; + } + } + } + } +out: + return conflict; +} + +#endif + +/*** + * jump_label_text_reserved - check if addr range is reserved + * @start: start text addr + * @end: end text addr + * + * checks if the text addr located between @start and @end + * overlaps with any of the jump label patch addresses. Code + * that wants to modify kernel text should first verify that + * it does not overlap with any of the jump label addresses. + * + * returns 1 if there is an overlap, 0 otherwise + */ +int jump_label_text_reserved(void *start, void *end) +{ + struct jump_entry *iter; + struct jump_entry *iter_start = __start___jump_table; + struct jump_entry *iter_stop = __start___jump_table; + int conflict = 0; + + mutex_lock(&jump_label_mutex); + iter = iter_start; + while (iter < iter_stop) { + if (addr_conflict(iter, start, end)) { + conflict = 1; + goto out; + } + iter++; + } + + /* now check modules */ +#ifdef CONFIG_MODULES + conflict = module_conflict(start, end); +#endif +out: + mutex_unlock(&jump_label_mutex); + return conflict; +} + static __init int init_jump_label(void) { int ret; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 18904e42a91..ec4210c6501 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1147,7 +1147,8 @@ int __kprobes register_kprobe(struct kprobe *p) preempt_disable(); if (!kernel_text_address((unsigned long) p->addr) || in_kprobes_functions((unsigned long) p->addr) || - ftrace_text_reserved(p->addr, p->addr)) { + ftrace_text_reserved(p->addr, p->addr) || + jump_label_text_reserved(p->addr, p->addr)) { preempt_enable(); return -EINVAL; } -- cgit v1.2.3-70-g09d2 From 8f7b50c514206211cc282a4247f7b12f18dee674 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 17 Sep 2010 11:09:13 -0400 Subject: jump label: Tracepoint support for jump labels Make use of the jump label infrastructure for tracepoints. Signed-off-by: Jason Baron LKML-Reference: Signed-off-by: Steven Rostedt --- include/linux/tracepoint.h | 5 ++++- kernel/tracepoint.c | 14 ++++++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 103d1b61aac..a4a90b6726c 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -17,6 +17,7 @@ #include #include #include +#include struct module; struct tracepoint; @@ -145,7 +146,9 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ - if (unlikely(__tracepoint_##name.state)) \ + JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ + return; \ +do_trace: \ __DO_TRACE(&__tracepoint_##name, \ TP_PROTO(data_proto), \ TP_ARGS(data_args)); \ diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index c77f3eceea2..d6073a50a6c 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -25,6 +25,7 @@ #include #include #include +#include extern struct tracepoint __start___tracepoints[]; extern struct tracepoint __stop___tracepoints[]; @@ -263,7 +264,13 @@ static void set_tracepoint(struct tracepoint_entry **entry, * is used. */ rcu_assign_pointer(elem->funcs, (*entry)->funcs); - elem->state = active; + if (!elem->state && active) { + enable_jump_label(&elem->state); + elem->state = active; + } else if (elem->state && !active) { + disable_jump_label(&elem->state); + elem->state = active; + } } /* @@ -277,7 +284,10 @@ static void disable_tracepoint(struct tracepoint *elem) if (elem->unregfunc && elem->state) elem->unregfunc(); - elem->state = 0; + if (elem->state) { + disable_jump_label(&elem->state); + elem->state = 0; + } rcu_assign_pointer(elem->funcs, NULL); } -- cgit v1.2.3-70-g09d2 From 1ef21199a5d98702755cfa0df39b8b5112c423d6 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 3 Sep 2010 13:19:04 +1000 Subject: powerpc: define a compat_sys_recv cond_syscall Signed-off-by: Stephen Rothwell Signed-off-by: Benjamin Herrenschmidt --- kernel/sys_ni.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index bad369ec540..c782fe9924c 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -50,6 +50,7 @@ cond_syscall(compat_sys_sendmsg); cond_syscall(sys_recvmsg); cond_syscall(sys_recvmmsg); cond_syscall(compat_sys_recvmsg); +cond_syscall(compat_sys_recv); cond_syscall(compat_sys_recvfrom); cond_syscall(compat_sys_recvmmsg); cond_syscall(sys_socketcall); -- cgit v1.2.3-70-g09d2 From 99a51792dbb7e6e39b2a4ebcfe202f1dcc7354c4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sat, 4 Sep 2010 18:52:50 -0700 Subject: kernel/pm_qos_params.c: Remove unnecessary casts of private_data Signed-off-by: Joe Perches Signed-off-by: Jiri Kosina --- kernel/pm_qos_params.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index 996a4dec5f9..49829c5b4b4 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c @@ -394,7 +394,7 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, } else return -EINVAL; - pm_qos_req = (struct pm_qos_request_list *)filp->private_data; + pm_qos_req = filp->private_data; pm_qos_update_request(pm_qos_req, value); return count; -- cgit v1.2.3-70-g09d2 From 829f8ed2c963df7c23d1c644db6c4387eb1601fa Mon Sep 17 00:00:00 2001 From: Christian Dietrich Date: Mon, 6 Sep 2010 16:37:12 +0200 Subject: kernel: Remove undead ifdef CONFIG_DEBUG_LOCK_ALLOC The CONFIG_DEBUG_LOCK_ALLOC ifdef isn't necessary at this point, because it is checked in an outer ifdef level already and has no effect here. Signed-off-by: Christian Dietrich Signed-off-by: Paul E. McKenney --- kernel/srcu.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/srcu.c b/kernel/srcu.c index 2980da3fd50..c71e0750053 100644 --- a/kernel/srcu.c +++ b/kernel/srcu.c @@ -46,11 +46,9 @@ static int init_srcu_struct_fields(struct srcu_struct *sp) int __init_srcu_struct(struct srcu_struct *sp, const char *name, struct lock_class_key *key) { -#ifdef CONFIG_DEBUG_LOCK_ALLOC /* Don't re-initialize a lock while it is held. */ debug_check_no_locks_freed((void *)sp, sizeof(*sp)); lockdep_init_map(&sp->dep_map, name, key, 0); -#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ return init_srcu_struct_fields(sp); } EXPORT_SYMBOL_GPL(__init_srcu_struct); -- cgit v1.2.3-70-g09d2 From 0ddea0ead2ced99eaaaddff2beb755381e5c89f8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 19 Sep 2010 21:06:14 -0700 Subject: rcu: fix sparse errors in rcutorture.c Add the sparse __rcu address-space identifier and make a couple of variables static. Signed-off-by: Paul E. McKenney --- kernel/rcutorture.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 729710273dc..9d8e8fb2515 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -120,7 +120,7 @@ struct rcu_torture { }; static LIST_HEAD(rcu_torture_freelist); -static struct rcu_torture *rcu_torture_current; +static struct rcu_torture __rcu *rcu_torture_current; static long rcu_torture_current_version; static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; static DEFINE_SPINLOCK(rcu_torture_lock); @@ -153,8 +153,10 @@ int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */ #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */ static int fullstop = FULLSTOP_RMMOD; -DEFINE_MUTEX(fullstop_mutex); /* Protect fullstop transitions and spawning */ - /* of kthreads. */ +/* + * Protect fullstop transitions and spawning of kthreads. + */ +static DEFINE_MUTEX(fullstop_mutex); /* * Detect and respond to a system shutdown. @@ -737,7 +739,8 @@ rcu_torture_writer(void *arg) continue; rp->rtort_pipe_count = 0; udelay(rcu_random(&rand) & 0x3ff); - old_rp = rcu_torture_current; + old_rp = rcu_dereference_check(rcu_torture_current, + current == writer_task); rp->rtort_mbtest = 1; rcu_assign_pointer(rcu_torture_current, rp); smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ -- cgit v1.2.3-70-g09d2 From 269dcc1c2ec25864308ee03a3fa26ea819d9f5d0 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 7 Sep 2010 14:23:09 -0700 Subject: rcu: Add tracing data to support queueing models The current tracing data is not sufficient to deduce the average time that a callback spends waiting for a grace period to end. Add three per-CPU counters recording the number of callbacks invoked (ci), the number of callbacks orphaned (co), and the number of callbacks adopted (ca). Given the existing callback queue length (ql), the average wait time in absence of CPU hotplug operations is ql/ci. The units of wait time will be in terms of the duration over which ci was measured. In the presence of CPU hotplug operations, there is room for argument, but ql/(ci-co+ca) won't steer you too far wrong. Also fixes a typo called out by Lucas De Marchi . Signed-off-by: Paul E. McKenney --- Documentation/RCU/trace.txt | 13 ++++++++++++- kernel/rcutree.c | 3 +++ kernel/rcutree.h | 3 +++ kernel/rcutree_trace.c | 10 +++++++--- 4 files changed, 25 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt index efd8cc95c06..a851118775d 100644 --- a/Documentation/RCU/trace.txt +++ b/Documentation/RCU/trace.txt @@ -125,6 +125,17 @@ o "b" is the batch limit for this CPU. If more than this number of RCU callbacks is ready to invoke, then the remainder will be deferred. +o "ci" is the number of RCU callbacks that have been invoked for + this CPU. Note that ci+ql is the number of callbacks that have + been registered in absence of CPU-hotplug activity. + +o "co" is the number of RCU callbacks that have been orphaned due to + this CPU going offline. + +o "ca" is the number of RCU callbacks that have been adopted due to + other CPUs going offline. Note that ci+co-ca+ql is the number of + RCU callbacks registered on this CPU. + There is also an rcu/rcudata.csv file with the same information in comma-separated-variable spreadsheet format. @@ -180,7 +191,7 @@ o "s" is the "signaled" state that drives force_quiescent_state()'s o "jfq" is the number of jiffies remaining for this grace period before force_quiescent_state() is invoked to help push things - along. Note that CPUs in dyntick-idle mode thoughout the grace + along. Note that CPUs in dyntick-idle mode throughout the grace period will not report on their own, but rather must be check by some other CPU via force_quiescent_state(). diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 42140a860bb..e75073504a3 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1004,6 +1004,7 @@ static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) for (i = 0; i < RCU_NEXT_SIZE; i++) rdp->nxttail[i] = &rdp->nxtlist; rsp->orphan_qlen += rdp->qlen; + rdp->n_cbs_orphaned += rdp->qlen; rdp->qlen = 0; raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ } @@ -1025,6 +1026,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail; rdp->qlen += rsp->orphan_qlen; + rdp->n_cbs_adopted += rsp->orphan_qlen; rsp->orphan_cbs_list = NULL; rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; rsp->orphan_qlen = 0; @@ -1156,6 +1158,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) /* Update count, and requeue any remaining callbacks. */ rdp->qlen -= count; + rdp->n_cbs_invoked += count; if (list != NULL) { *tail = rdp->nxtlist; rdp->nxtlist = list; diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 7918ba61873..91d4170c5c1 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -202,6 +202,9 @@ struct rcu_data { long qlen; /* # of queued callbacks */ long qlen_last_fqs_check; /* qlen at last check for QS forcing */ + unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ + unsigned long n_cbs_orphaned; /* RCU cbs sent to orphanage. */ + unsigned long n_cbs_adopted; /* RCU cbs adopted from orphanage. */ unsigned long n_force_qs_snap; /* did other CPU force QS recently? */ long blimit; /* Upper limit on a processed batch */ diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 458e032a3a3..d15430b9d12 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c @@ -64,7 +64,9 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) rdp->dynticks_fqs); #endif /* #ifdef CONFIG_NO_HZ */ seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); - seq_printf(m, " ql=%ld b=%ld\n", rdp->qlen, rdp->blimit); + seq_printf(m, " ql=%ld b=%ld", rdp->qlen, rdp->blimit); + seq_printf(m, " ci=%lu co=%lu ca=%lu\n", + rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); } #define PRINT_RCU_DATA(name, func, m) \ @@ -119,7 +121,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) rdp->dynticks_fqs); #endif /* #ifdef CONFIG_NO_HZ */ seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); - seq_printf(m, ",%ld,%ld\n", rdp->qlen, rdp->blimit); + seq_printf(m, ",%ld,%ld", rdp->qlen, rdp->blimit); + seq_printf(m, ",%lu,%lu,%lu\n", + rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); } static int show_rcudata_csv(struct seq_file *m, void *unused) @@ -128,7 +132,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) #ifdef CONFIG_NO_HZ seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); #endif /* #ifdef CONFIG_NO_HZ */ - seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n"); + seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); #ifdef CONFIG_TREE_PREEMPT_RCU seq_puts(m, "\"rcu_preempt:\"\n"); PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); -- cgit v1.2.3-70-g09d2 From d1ea13c6e2cce0106531852daaa93dd97aec9580 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 23 Sep 2010 18:40:07 +0200 Subject: genirq: Cleanup irq_chip->typename leftovers 3 years transition phase is enough. Cleanup the last users and remove the cruft. Signed-off-by: Thomas Gleixner Cc: Leo Chen Cc: Hirokazu Takata Cc: Chris Metcalf Cc: Jeff Dike Cc: Chris Zankel --- arch/arm/mach-bcmring/irq.c | 6 +++--- arch/m32r/kernel/irq.c | 2 +- arch/m32r/platforms/m32104ut/setup.c | 2 +- arch/m32r/platforms/m32700ut/setup.c | 8 ++++---- arch/m32r/platforms/mappi/setup.c | 2 +- arch/m32r/platforms/mappi2/setup.c | 2 +- arch/m32r/platforms/mappi3/setup.c | 2 +- arch/m32r/platforms/oaks32r/setup.c | 2 +- arch/m32r/platforms/opsput/setup.c | 6 +++--- arch/m32r/platforms/usrv/setup.c | 4 ++-- arch/tile/kernel/irq.c | 4 ++-- arch/um/kernel/irq.c | 6 +++--- arch/xtensa/kernel/irq.c | 2 +- include/linux/irq.h | 6 ------ kernel/irq/chip.c | 2 -- 15 files changed, 24 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/arch/arm/mach-bcmring/irq.c b/arch/arm/mach-bcmring/irq.c index dc1c4939b0c..e3152631eb3 100644 --- a/arch/arm/mach-bcmring/irq.c +++ b/arch/arm/mach-bcmring/irq.c @@ -67,21 +67,21 @@ static void bcmring_unmask_irq2(unsigned int irq) } static struct irq_chip bcmring_irq0_chip = { - .typename = "ARM-INTC0", + .name = "ARM-INTC0", .ack = bcmring_mask_irq0, .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ }; static struct irq_chip bcmring_irq1_chip = { - .typename = "ARM-INTC1", + .name = "ARM-INTC1", .ack = bcmring_mask_irq1, .mask = bcmring_mask_irq1, .unmask = bcmring_unmask_irq1, }; static struct irq_chip bcmring_irq2_chip = { - .typename = "ARM-SINTC", + .name = "ARM-SINTC", .ack = bcmring_mask_irq2, .mask = bcmring_mask_irq2, .unmask = bcmring_unmask_irq2, diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 3c71f776872..7db26f1f082 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c @@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif - seq_printf(p, " %14s", irq_desc[i].chip->typename); + seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c index 922fdfdadea..402a59d7219 100644 --- a/arch/m32r/platforms/m32104ut/setup.c +++ b/arch/m32r/platforms/m32104ut/setup.c @@ -65,7 +65,7 @@ static void shutdown_m32104ut_irq(unsigned int irq) static struct irq_chip m32104ut_irq_type = { - .typename = "M32104UT-IRQ", + .name = "M32104UT-IRQ", .startup = startup_m32104ut_irq, .shutdown = shutdown_m32104ut_irq, .enable = enable_m32104ut_irq, diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c index 9c1bc7487c1..80b1a026795 100644 --- a/arch/m32r/platforms/m32700ut/setup.c +++ b/arch/m32r/platforms/m32700ut/setup.c @@ -71,7 +71,7 @@ static void shutdown_m32700ut_irq(unsigned int irq) static struct irq_chip m32700ut_irq_type = { - .typename = "M32700UT-IRQ", + .name = "M32700UT-IRQ", .startup = startup_m32700ut_irq, .shutdown = shutdown_m32700ut_irq, .enable = enable_m32700ut_irq, @@ -148,7 +148,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) static struct irq_chip m32700ut_pld_irq_type = { - .typename = "M32700UT-PLD-IRQ", + .name = "M32700UT-PLD-IRQ", .startup = startup_m32700ut_pld_irq, .shutdown = shutdown_m32700ut_pld_irq, .enable = enable_m32700ut_pld_irq, @@ -217,7 +217,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq) static struct irq_chip m32700ut_lanpld_irq_type = { - .typename = "M32700UT-PLD-LAN-IRQ", + .name = "M32700UT-PLD-LAN-IRQ", .startup = startup_m32700ut_lanpld_irq, .shutdown = shutdown_m32700ut_lanpld_irq, .enable = enable_m32700ut_lanpld_irq, @@ -286,7 +286,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq) static struct irq_chip m32700ut_lcdpld_irq_type = { - .typename = "M32700UT-PLD-LCD-IRQ", + .name = "M32700UT-PLD-LCD-IRQ", .startup = startup_m32700ut_lcdpld_irq, .shutdown = shutdown_m32700ut_lcdpld_irq, .enable = enable_m32700ut_lcdpld_irq, diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c index fb4b17799b6..ea00c84d6b1 100644 --- a/arch/m32r/platforms/mappi/setup.c +++ b/arch/m32r/platforms/mappi/setup.c @@ -65,7 +65,7 @@ static void shutdown_mappi_irq(unsigned int irq) static struct irq_chip mappi_irq_type = { - .typename = "MAPPI-IRQ", + .name = "MAPPI-IRQ", .startup = startup_mappi_irq, .shutdown = shutdown_mappi_irq, .enable = enable_mappi_irq, diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c index 6a65eda0a05..c049376d027 100644 --- a/arch/m32r/platforms/mappi2/setup.c +++ b/arch/m32r/platforms/mappi2/setup.c @@ -72,7 +72,7 @@ static void shutdown_mappi2_irq(unsigned int irq) static struct irq_chip mappi2_irq_type = { - .typename = "MAPPI2-IRQ", + .name = "MAPPI2-IRQ", .startup = startup_mappi2_irq, .shutdown = shutdown_mappi2_irq, .enable = enable_mappi2_irq, diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c index 9c337aeac94..882de25c6e8 100644 --- a/arch/m32r/platforms/mappi3/setup.c +++ b/arch/m32r/platforms/mappi3/setup.c @@ -72,7 +72,7 @@ static void shutdown_mappi3_irq(unsigned int irq) static struct irq_chip mappi3_irq_type = { - .typename = "MAPPI3-IRQ", + .name = "MAPPI3-IRQ", .startup = startup_mappi3_irq, .shutdown = shutdown_mappi3_irq, .enable = enable_mappi3_irq, diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c index ed865741c38..d11d93bf74f 100644 --- a/arch/m32r/platforms/oaks32r/setup.c +++ b/arch/m32r/platforms/oaks32r/setup.c @@ -63,7 +63,7 @@ static void shutdown_oaks32r_irq(unsigned int irq) static struct irq_chip oaks32r_irq_type = { - .typename = "OAKS32R-IRQ", + .name = "OAKS32R-IRQ", .startup = startup_oaks32r_irq, .shutdown = shutdown_oaks32r_irq, .enable = enable_oaks32r_irq, diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c index 80d68065701..5f3402a2fba 100644 --- a/arch/m32r/platforms/opsput/setup.c +++ b/arch/m32r/platforms/opsput/setup.c @@ -72,7 +72,7 @@ static void shutdown_opsput_irq(unsigned int irq) static struct irq_chip opsput_irq_type = { - .typename = "OPSPUT-IRQ", + .name = "OPSPUT-IRQ", .startup = startup_opsput_irq, .shutdown = shutdown_opsput_irq, .enable = enable_opsput_irq, @@ -149,7 +149,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq) static struct irq_chip opsput_pld_irq_type = { - .typename = "OPSPUT-PLD-IRQ", + .name = "OPSPUT-PLD-IRQ", .startup = startup_opsput_pld_irq, .shutdown = shutdown_opsput_pld_irq, .enable = enable_opsput_pld_irq, @@ -218,7 +218,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq) static struct irq_chip opsput_lanpld_irq_type = { - .typename = "OPSPUT-PLD-LAN-IRQ", + .name = "OPSPUT-PLD-LAN-IRQ", .startup = startup_opsput_lanpld_irq, .shutdown = shutdown_opsput_lanpld_irq, .enable = enable_opsput_lanpld_irq, diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c index 757302660af..1beac7a51ed 100644 --- a/arch/m32r/platforms/usrv/setup.c +++ b/arch/m32r/platforms/usrv/setup.c @@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq) static struct irq_chip mappi_irq_type = { - .typename = "M32700-IRQ", + .name = "M32700-IRQ", .startup = startup_mappi_irq, .shutdown = shutdown_mappi_irq, .enable = enable_mappi_irq, @@ -136,7 +136,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) static struct irq_chip m32700ut_pld_irq_type = { - .typename = "USRV-PLD-IRQ", + .name = "USRV-PLD-IRQ", .startup = startup_m32700ut_pld_irq, .shutdown = shutdown_m32700ut_pld_irq, .enable = enable_m32700ut_pld_irq, diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 596c6008693..9a27d563fc3 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c @@ -208,7 +208,7 @@ static void tile_irq_chip_eoi(unsigned int irq) } static struct irq_chip tile_irq_chip = { - .typename = "tile_irq_chip", + .name = "tile_irq_chip", .ack = tile_irq_chip_ack, .eoi = tile_irq_chip_eoi, .mask = tile_irq_chip_mask, @@ -288,7 +288,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif - seq_printf(p, " %14s", irq_desc[i].chip->typename); + seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index a3f0b04d710..a746e3037a5 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -46,7 +46,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif - seq_printf(p, " %14s", irq_desc[i].chip->typename); + seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) @@ -369,7 +369,7 @@ static void dummy(unsigned int irq) /* This is used for everything else than the timer. */ static struct irq_chip normal_irq_type = { - .typename = "SIGIO", + .name = "SIGIO", .release = free_irq_by_irq_and_dev, .disable = dummy, .enable = dummy, @@ -378,7 +378,7 @@ static struct irq_chip normal_irq_type = { }; static struct irq_chip SIGVTALRM_irq_type = { - .typename = "SIGVTALRM", + .name = "SIGVTALRM", .release = free_irq_by_irq_and_dev, .shutdown = dummy, /* never called */ .disable = dummy, diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index c64a5d387de..87508886cbb 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -92,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif - seq_printf(p, " %14s", irq_desc[i].chip->typename); + seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) diff --git a/include/linux/irq.h b/include/linux/irq.h index c03243ad84b..06273a2a17e 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -106,7 +106,6 @@ struct msi_desc; * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips * * @release: release function solely used by UML - * @typename: obsoleted by name, kept as migration helper */ struct irq_chip { const char *name; @@ -135,11 +134,6 @@ struct irq_chip { #ifdef CONFIG_IRQ_RELEASE_METHOD void (*release)(unsigned int irq, void *dev_id); #endif - /* - * For compatibility, ->typename is copied into ->name. - * Will disappear. - */ - const char *typename; }; struct timer_rand_state; diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index b7091d5ca2f..4ea775cc60f 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -344,8 +344,6 @@ void irq_chip_set_defaults(struct irq_chip *chip) if (!chip->shutdown) chip->shutdown = chip->disable != default_disable ? chip->disable : default_shutdown; - if (!chip->name) - chip->name = chip->typename; if (!chip->end) chip->end = dummy_irq_chip.end; } -- cgit v1.2.3-70-g09d2 From d9817ebeeef16e01487549312c68540ca8f1561b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:45:59 +0000 Subject: genirq: Provide Kconfig The generic irq Kconfig options are copied around all archs. Provide a generic Kconfig file which can be included. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121843.217333624@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- init/Kconfig | 2 ++ kernel/irq/Kconfig | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100644 kernel/irq/Kconfig (limited to 'kernel') diff --git a/init/Kconfig b/init/Kconfig index 2de5b1cbadd..1df1a87cc59 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -332,6 +332,8 @@ config AUDIT_TREE depends on AUDITSYSCALL select FSNOTIFY +source "kernel/irq/Kconfig" + menu "RCU Subsystem" choice diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig new file mode 100644 index 00000000000..e0fc6cd78aa --- /dev/null +++ b/kernel/irq/Kconfig @@ -0,0 +1,54 @@ +config HAVE_GENERIC_HARDIRQS + def_bool n + +if HAVE_GENERIC_HARDIRQS +menu "IRQ subsystem" +# +# Interrupt subsystem related configuration options +# +config GENERIC_HARDIRQS + def_bool y + +config GENERIC_HARDIRQS_NO__DO_IRQ + def_bool y + +# Options selectable by the architecture code +config HAVE_SPARSE_IRQ + def_bool n + +config GENERIC_IRQ_PROBE + def_bool n + +config GENERIC_PENDING_IRQ + def_bool n + +if SPARSE_IRQ && NUMA +config NUMA_IRQ_DESC + def_bool n +endif + +config AUTO_IRQ_AFFINITY + def_bool n + +config IRQ_PER_CPU + def_bool n + +config HARDIRQS_SW_RESEND + def_bool n + +config SPARSE_IRQ + bool "Support sparse irq numbering" + depends on HAVE_SPARSE_IRQ + ---help--- + + Sparse irq numbering is useful for distro kernels that want + to define a high CONFIG_NR_CPUS value but still want to have + low kernel memory footprint on smaller machines. + + ( Sparse irqs can also be beneficial on NUMA boxes, as they spread + out the interrupt descriptors in a more NUMA-friendly way. ) + + If you don't know what to do here, say N. + +endmenu +endif -- cgit v1.2.3-70-g09d2 From ff7dcd44dd446db2c3e13bdedf2d52b8e0127f16 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:44:25 +0000 Subject: genirq: Create irq_data Low level chip functions need access to irq_desc->handler_data, irq_desc->chip_data and irq_desc->msi_desc. We hand down the irq number to the low level functions, so they need to lookup irq_desc. With sparse irq this means a radix tree lookup. We could hand down irq_desc itself, but low level chip functions have no need to fiddle with it directly and we want to restrict access to irq_desc further. Preparatory patch for new chip functions. Note, that the ugly anon union/struct is there to avoid a full tree wide clean up for now. This is not going to last 3 years like __do_IRQ() Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121841.645542300@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- include/linux/irq.h | 90 +++++++++++++++++++++++++++++++++++++---------------- kernel/irq/handle.c | 39 +++++++++++------------ 2 files changed, 82 insertions(+), 47 deletions(-) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index 06273a2a17e..363c76ff82c 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -83,6 +83,37 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, struct proc_dir_entry; struct msi_desc; +/** + * struct irq_data - per irq and irq chip data passed down to chip functions + * @irq: interrupt number + * @node: node index useful for balancing + * @chip: low level interrupt hardware access + * @handler_data: per-IRQ data for the irq_chip methods + * @chip_data: platform-specific per-chip private data for the chip + * methods, to allow shared chip implementations + * @msi_desc: MSI descriptor + * @affinity: IRQ affinity on SMP + * @irq_2_iommu: iommu with this irq + * + * The fields here need to overlay the ones in irq_desc until we + * cleaned up the direct references and switched everything over to + * irq_data. + */ +struct irq_data { + unsigned int irq; + unsigned int node; + struct irq_chip *chip; + void *handler_data; + void *chip_data; + struct msi_desc *msi_desc; +#ifdef CONFIG_SMP + cpumask_var_t affinity; +#endif +#ifdef CONFIG_INTR_REMAP + struct irq_2_iommu *irq_2_iommu; +#endif +}; + /** * struct irq_chip - hardware interrupt chip descriptor * @@ -140,16 +171,10 @@ struct timer_rand_state; struct irq_2_iommu; /** * struct irq_desc - interrupt descriptor - * @irq: interrupt number for this descriptor + * @irq_data: per irq and chip data passed down to chip functions * @timer_rand_state: pointer to timer rand state struct * @kstat_irqs: irq stats per cpu - * @irq_2_iommu: iommu with this irq * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] - * @chip: low level interrupt hardware access - * @msi_desc: MSI descriptor - * @handler_data: per-IRQ data for the irq_chip methods - * @chip_data: platform-specific per-chip private data for the chip - * methods, to allow shared chip implementations * @action: the irq action chain * @status: status information * @depth: disable-depth, for nested irq_disable() calls @@ -158,8 +183,6 @@ struct irq_2_iommu; * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts * @lock: locking for SMP - * @affinity: IRQ affinity on SMP - * @node: node index useful for balancing * @pending_mask: pending rebalanced interrupts * @threads_active: number of irqaction threads currently running * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers @@ -167,17 +190,32 @@ struct irq_2_iommu; * @name: flow handler name for /proc/interrupts output */ struct irq_desc { - unsigned int irq; - struct timer_rand_state *timer_rand_state; - unsigned int *kstat_irqs; + + /* + * This union will go away, once we fixed the direct access to + * irq_desc all over the place. The direct fields are a 1:1 + * overlay of irq_data. + */ + union { + struct irq_data irq_data; + struct { + unsigned int irq; + unsigned int node; + struct irq_chip *chip; + void *handler_data; + void *chip_data; + struct msi_desc *msi_desc; +#ifdef CONFIG_SMP + cpumask_var_t affinity; +#endif #ifdef CONFIG_INTR_REMAP - struct irq_2_iommu *irq_2_iommu; + struct irq_2_iommu *irq_2_iommu; #endif + }; + }; + struct timer_rand_state *timer_rand_state; + unsigned int *kstat_irqs; irq_flow_handler_t handle_irq; - struct irq_chip *chip; - struct msi_desc *msi_desc; - void *handler_data; - void *chip_data; struct irqaction *action; /* IRQ action list */ unsigned int status; /* IRQ status */ @@ -188,9 +226,7 @@ struct irq_desc { unsigned int irqs_unhandled; raw_spinlock_t lock; #ifdef CONFIG_SMP - cpumask_var_t affinity; const struct cpumask *affinity_hint; - unsigned int node; #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_var_t pending_mask; #endif @@ -406,15 +442,15 @@ extern int set_irq_chip_data(unsigned int irq, void *data); extern int set_irq_type(unsigned int irq, unsigned int type); extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); -#define get_irq_chip(irq) (irq_to_desc(irq)->chip) -#define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data) -#define get_irq_data(irq) (irq_to_desc(irq)->handler_data) -#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) +#define get_irq_chip(irq) (irq_to_desc(irq)->irq_data.chip) +#define get_irq_chip_data(irq) (irq_to_desc(irq)->irq_data.chip_data) +#define get_irq_data(irq) (irq_to_desc(irq)->irq_data.handler_data) +#define get_irq_msi(irq) (irq_to_desc(irq)->irq_data.msi_desc) -#define get_irq_desc_chip(desc) ((desc)->chip) -#define get_irq_desc_chip_data(desc) ((desc)->chip_data) -#define get_irq_desc_data(desc) ((desc)->handler_data) -#define get_irq_desc_msi(desc) ((desc)->msi_desc) +#define get_irq_desc_chip(desc) ((desc)->irq_data.chip) +#define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) +#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) +#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) #endif /* CONFIG_GENERIC_HARDIRQS */ diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 27e5c691122..099d4fc368c 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -75,12 +75,10 @@ EXPORT_SYMBOL_GPL(nr_irqs); #ifdef CONFIG_SPARSE_IRQ static struct irq_desc irq_desc_init = { - .irq = -1, - .status = IRQ_DISABLED, - .chip = &no_irq_chip, - .handle_irq = handle_bad_irq, - .depth = 1, - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), + .status = IRQ_DISABLED, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), }; void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) @@ -105,7 +103,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); raw_spin_lock_init(&desc->lock); - desc->irq = irq; + desc->irq_data.irq = irq; #ifdef CONFIG_SMP desc->node = node; #endif @@ -151,12 +149,10 @@ void replace_irq_desc(unsigned int irq, struct irq_desc *desc) static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { [0 ... NR_IRQS_LEGACY-1] = { - .irq = -1, - .status = IRQ_DISABLED, - .chip = &no_irq_chip, - .handle_irq = handle_bad_irq, - .depth = 1, - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), + .status = IRQ_DISABLED, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), } }; @@ -183,8 +179,11 @@ int __init early_irq_init(void) kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * sizeof(int), GFP_NOWAIT, node); + irq_desc_init.irq_data.chip = &no_irq_chip; + for (i = 0; i < legacy_count; i++) { - desc[i].irq = i; + desc[i].irq_data.irq = i; + desc[i].irq_data.chip = &no_irq_chip; #ifdef CONFIG_SMP desc[i].node = node; #endif @@ -241,11 +240,10 @@ out_unlock: struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { [0 ... NR_IRQS-1] = { - .status = IRQ_DISABLED, - .chip = &no_irq_chip, - .handle_irq = handle_bad_irq, - .depth = 1, - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), + .status = IRQ_DISABLED, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), } }; @@ -264,7 +262,8 @@ int __init early_irq_init(void) count = ARRAY_SIZE(irq_desc); for (i = 0; i < count; i++) { - desc[i].irq = i; + desc[i].irq_data.irq = i; + desc[i].irq_data.chip = &no_irq_chip; alloc_desc_masks(&desc[i], 0, true); init_desc_masks(&desc[i]); desc[i].kstat_irqs = kstat_irqs_all[i]; -- cgit v1.2.3-70-g09d2 From 6b8ff3120c758340505dddf08ad685ebb841d5d5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 Oct 2010 12:58:38 +0200 Subject: genirq: Convert core code to irq_data Convert all references in the core code to orq, chip, handler_data, chip_data, msi_desc, affinity to irq_data.* Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/irq.h | 10 +++--- kernel/irq/autoprobe.c | 14 ++++----- kernel/irq/chip.c | 78 +++++++++++++++++++++++------------------------ kernel/irq/handle.c | 16 +++++----- kernel/irq/internals.h | 12 ++++---- kernel/irq/manage.c | 54 ++++++++++++++++---------------- kernel/irq/migration.c | 10 +++--- kernel/irq/numa_migrate.c | 8 ++--- kernel/irq/proc.c | 8 ++--- kernel/irq/resend.c | 5 +-- kernel/irq/spurious.c | 6 ++-- 11 files changed, 111 insertions(+), 110 deletions(-) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index 363c76ff82c..002351d83c3 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -475,12 +475,12 @@ static inline bool alloc_desc_masks(struct irq_desc *desc, int node, gfp = GFP_NOWAIT; #ifdef CONFIG_CPUMASK_OFFSTACK - if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) + if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) return false; #ifdef CONFIG_GENERIC_PENDING_IRQ if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { - free_cpumask_var(desc->affinity); + free_cpumask_var(desc->irq_data.affinity); return false; } #endif @@ -490,7 +490,7 @@ static inline bool alloc_desc_masks(struct irq_desc *desc, int node, static inline void init_desc_masks(struct irq_desc *desc) { - cpumask_setall(desc->affinity); + cpumask_setall(desc->irq_data.affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_clear(desc->pending_mask); #endif @@ -510,7 +510,7 @@ static inline void init_copy_desc_masks(struct irq_desc *old_desc, struct irq_desc *new_desc) { #ifdef CONFIG_CPUMASK_OFFSTACK - cpumask_copy(new_desc->affinity, old_desc->affinity); + cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); @@ -521,7 +521,7 @@ static inline void init_copy_desc_masks(struct irq_desc *old_desc, static inline void free_desc_masks(struct irq_desc *old_desc, struct irq_desc *new_desc) { - free_cpumask_var(old_desc->affinity); + free_cpumask_var(old_desc->irq_data.affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ free_cpumask_var(old_desc->pending_mask); diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 2295a31ef11..f9bf9b22803 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -57,9 +57,9 @@ unsigned long probe_irq_on(void) * Some chips need to know about probing in * progress: */ - if (desc->chip->set_type) - desc->chip->set_type(i, IRQ_TYPE_PROBE); - desc->chip->startup(i); + if (desc->irq_data.chip->set_type) + desc->irq_data.chip->set_type(i, IRQ_TYPE_PROBE); + desc->irq_data.chip->startup(i); } raw_spin_unlock_irq(&desc->lock); } @@ -76,7 +76,7 @@ unsigned long probe_irq_on(void) raw_spin_lock_irq(&desc->lock); if (!desc->action && !(desc->status & IRQ_NOPROBE)) { desc->status |= IRQ_AUTODETECT | IRQ_WAITING; - if (desc->chip->startup(i)) + if (desc->irq_data.chip->startup(i)) desc->status |= IRQ_PENDING; } raw_spin_unlock_irq(&desc->lock); @@ -98,7 +98,7 @@ unsigned long probe_irq_on(void) /* It triggered already - consider it spurious. */ if (!(status & IRQ_WAITING)) { desc->status = status & ~IRQ_AUTODETECT; - desc->chip->shutdown(i); + desc->irq_data.chip->shutdown(i); } else if (i < 32) mask |= 1 << i; @@ -137,7 +137,7 @@ unsigned int probe_irq_mask(unsigned long val) mask |= 1 << i; desc->status = status & ~IRQ_AUTODETECT; - desc->chip->shutdown(i); + desc->irq_data.chip->shutdown(i); } raw_spin_unlock_irq(&desc->lock); } @@ -181,7 +181,7 @@ int probe_irq_off(unsigned long val) nr_of_irqs++; } desc->status = status & ~IRQ_AUTODETECT; - desc->chip->shutdown(i); + desc->irq_data.chip->shutdown(i); } raw_spin_unlock_irq(&desc->lock); } diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 4ea775cc60f..e0e93ff10af 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -32,18 +32,18 @@ static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) /* Ensure we don't have left over values from a previous use of this irq */ raw_spin_lock_irqsave(&desc->lock, flags); desc->status = IRQ_DISABLED; - desc->chip = &no_irq_chip; + desc->irq_data.chip = &no_irq_chip; desc->handle_irq = handle_bad_irq; desc->depth = 1; - desc->msi_desc = NULL; - desc->handler_data = NULL; + desc->irq_data.msi_desc = NULL; + desc->irq_data.handler_data = NULL; if (!keep_chip_data) - desc->chip_data = NULL; + desc->irq_data.chip_data = NULL; desc->action = NULL; desc->irq_count = 0; desc->irqs_unhandled = 0; #ifdef CONFIG_SMP - cpumask_setall(desc->affinity); + cpumask_setall(desc->irq_data.affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_clear(desc->pending_mask); #endif @@ -64,7 +64,7 @@ void dynamic_irq_init(unsigned int irq) * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq * @irq: irq number to initialize * - * does not set irq_to_desc(irq)->chip_data to NULL + * does not set irq_to_desc(irq)->irq_data.chip_data to NULL */ void dynamic_irq_init_keep_chip_data(unsigned int irq) { @@ -88,12 +88,12 @@ static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data) irq); return; } - desc->msi_desc = NULL; - desc->handler_data = NULL; + desc->irq_data.msi_desc = NULL; + desc->irq_data.handler_data = NULL; if (!keep_chip_data) - desc->chip_data = NULL; + desc->irq_data.chip_data = NULL; desc->handle_irq = handle_bad_irq; - desc->chip = &no_irq_chip; + desc->irq_data.chip = &no_irq_chip; desc->name = NULL; clear_kstat_irqs(desc); raw_spin_unlock_irqrestore(&desc->lock, flags); @@ -112,7 +112,7 @@ void dynamic_irq_cleanup(unsigned int irq) * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq * @irq: irq number to initialize * - * does not set irq_to_desc(irq)->chip_data to NULL + * does not set irq_to_desc(irq)->irq_data.chip_data to NULL */ void dynamic_irq_cleanup_keep_chip_data(unsigned int irq) { @@ -140,7 +140,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) raw_spin_lock_irqsave(&desc->lock, flags); irq_chip_set_defaults(chip); - desc->chip = chip; + desc->irq_data.chip = chip; raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; @@ -193,7 +193,7 @@ int set_irq_data(unsigned int irq, void *data) } raw_spin_lock_irqsave(&desc->lock, flags); - desc->handler_data = data; + desc->irq_data.handler_data = data; raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -218,7 +218,7 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry) } raw_spin_lock_irqsave(&desc->lock, flags); - desc->msi_desc = entry; + desc->irq_data.msi_desc = entry; if (entry) entry->irq = irq; raw_spin_unlock_irqrestore(&desc->lock, flags); @@ -243,13 +243,13 @@ int set_irq_chip_data(unsigned int irq, void *data) return -EINVAL; } - if (!desc->chip) { + if (!desc->irq_data.chip) { printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); return -EINVAL; } raw_spin_lock_irqsave(&desc->lock, flags); - desc->chip_data = data; + desc->irq_data.chip_data = data; raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; @@ -291,7 +291,7 @@ static void default_enable(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); - desc->chip->unmask(irq); + desc->irq_data.chip->unmask(irq); desc->status &= ~IRQ_MASKED; } @@ -309,7 +309,7 @@ static unsigned int default_startup(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); - desc->chip->enable(irq); + desc->irq_data.chip->enable(irq); return 0; } @@ -320,7 +320,7 @@ static void default_shutdown(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); - desc->chip->mask(irq); + desc->irq_data.chip->mask(irq); desc->status |= IRQ_MASKED; } @@ -350,28 +350,28 @@ void irq_chip_set_defaults(struct irq_chip *chip) static inline void mask_ack_irq(struct irq_desc *desc, int irq) { - if (desc->chip->mask_ack) - desc->chip->mask_ack(irq); + if (desc->irq_data.chip->mask_ack) + desc->irq_data.chip->mask_ack(irq); else { - desc->chip->mask(irq); - if (desc->chip->ack) - desc->chip->ack(irq); + desc->irq_data.chip->mask(irq); + if (desc->irq_data.chip->ack) + desc->irq_data.chip->ack(irq); } desc->status |= IRQ_MASKED; } static inline void mask_irq(struct irq_desc *desc, int irq) { - if (desc->chip->mask) { - desc->chip->mask(irq); + if (desc->irq_data.chip->mask) { + desc->irq_data.chip->mask(irq); desc->status |= IRQ_MASKED; } } static inline void unmask_irq(struct irq_desc *desc, int irq) { - if (desc->chip->unmask) { - desc->chip->unmask(irq); + if (desc->irq_data.chip->unmask) { + desc->irq_data.chip->unmask(irq); desc->status &= ~IRQ_MASKED; } } @@ -552,7 +552,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out: - desc->chip->eoi(irq); + desc->irq_data.chip->eoi(irq); raw_spin_unlock(&desc->lock); } @@ -594,8 +594,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) kstat_incr_irqs_this_cpu(irq, desc); /* Start handling the irq */ - if (desc->chip->ack) - desc->chip->ack(irq); + if (desc->irq_data.chip->ack) + desc->irq_data.chip->ack(irq); /* Mark the IRQ currently in progress.*/ desc->status |= IRQ_INPROGRESS; @@ -648,15 +648,15 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) kstat_incr_irqs_this_cpu(irq, desc); - if (desc->chip->ack) - desc->chip->ack(irq); + if (desc->irq_data.chip->ack) + desc->irq_data.chip->ack(irq); action_ret = handle_IRQ_event(irq, desc->action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - if (desc->chip->eoi) - desc->chip->eoi(irq); + if (desc->irq_data.chip->eoi) + desc->irq_data.chip->eoi(irq); } void @@ -674,7 +674,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, if (!handle) handle = handle_bad_irq; - else if (desc->chip == &no_irq_chip) { + else if (desc->irq_data.chip == &no_irq_chip) { printk(KERN_WARNING "Trying to install %sinterrupt handler " "for IRQ%d\n", is_chained ? "chained " : "", irq); /* @@ -684,7 +684,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, * prevent us to setup the interrupt at all. Switch it to * dummy_irq_chip for easy transition. */ - desc->chip = &dummy_irq_chip; + desc->irq_data.chip = &dummy_irq_chip; } chip_bus_lock(irq, desc); @@ -692,7 +692,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, /* Uninstall? */ if (handle == handle_bad_irq) { - if (desc->chip != &no_irq_chip) + if (desc->irq_data.chip != &no_irq_chip) mask_ack_irq(desc, irq); desc->status |= IRQ_DISABLED; desc->depth = 1; @@ -704,7 +704,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, desc->status &= ~IRQ_DISABLED; desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; desc->depth = 0; - desc->chip->startup(irq); + desc->irq_data.chip->startup(irq); } raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(irq, desc); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 099d4fc368c..fc27d76e83e 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -105,7 +105,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) raw_spin_lock_init(&desc->lock); desc->irq_data.irq = irq; #ifdef CONFIG_SMP - desc->node = node; + desc->irq_data.node = node; #endif lockdep_set_class(&desc->lock, &irq_desc_lock_class); init_kstat_irqs(desc, node, nr_cpu_ids); @@ -185,7 +185,7 @@ int __init early_irq_init(void) desc[i].irq_data.irq = i; desc[i].irq_data.chip = &no_irq_chip; #ifdef CONFIG_SMP - desc[i].node = node; + desc[i].irq_data.node = node; #endif desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); @@ -456,20 +456,20 @@ unsigned int __do_IRQ(unsigned int irq) /* * No locking required for CPU-local interrupts: */ - if (desc->chip->ack) - desc->chip->ack(irq); + if (desc->irq_data.chip->ack) + desc->irq_data.chip->ack(irq); if (likely(!(desc->status & IRQ_DISABLED))) { action_ret = handle_IRQ_event(irq, desc->action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); } - desc->chip->end(irq); + desc->irq_data.chip->end(irq); return 1; } raw_spin_lock(&desc->lock); - if (desc->chip->ack) - desc->chip->ack(irq); + if (desc->irq_data.chip->ack) + desc->irq_data.chip->ack(irq); /* * REPLAY is when Linux resends an IRQ that was dropped earlier * WAITING is used by probe to mark irqs that are being tested @@ -529,7 +529,7 @@ out: * The ->end() handler has to deal with interrupts which got * disabled while the handler was running. */ - desc->chip->end(irq); + desc->irq_data.chip->end(irq); raw_spin_unlock(&desc->lock); return 1; diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index c63f3bc88f0..a805a00cfd2 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -43,14 +43,14 @@ extern void irq_set_thread_affinity(struct irq_desc *desc); /* Inline functions for support of irq chips on slow busses */ static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) { - if (unlikely(desc->chip->bus_lock)) - desc->chip->bus_lock(irq); + if (unlikely(desc->irq_data.chip->bus_lock)) + desc->irq_data.chip->bus_lock(irq); } static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) { - if (unlikely(desc->chip->bus_sync_unlock)) - desc->chip->bus_sync_unlock(irq); + if (unlikely(desc->irq_data.chip->bus_sync_unlock)) + desc->irq_data.chip->bus_sync_unlock(irq); } /* @@ -67,8 +67,8 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); printk("->handle_irq(): %p, ", desc->handle_irq); print_symbol("%s\n", (unsigned long)desc->handle_irq); - printk("->chip(): %p, ", desc->chip); - print_symbol("%s\n", (unsigned long)desc->chip); + printk("->irq_data.chip(): %p, ", desc->irq_data.chip); + print_symbol("%s\n", (unsigned long)desc->irq_data.chip); printk("->action(): %p\n", desc->action); if (desc->action) { printk("->action->handler(): %p, ", desc->action->handler); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c3003e9d91a..4dfb19521d9 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); - if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || - !desc->chip->set_affinity) + if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || + !desc->irq_data.chip->set_affinity) return 0; return 1; @@ -111,15 +111,15 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; - if (!desc->chip->set_affinity) + if (!desc->irq_data.chip->set_affinity) return -EINVAL; raw_spin_lock_irqsave(&desc->lock, flags); #ifdef CONFIG_GENERIC_PENDING_IRQ if (desc->status & IRQ_MOVE_PCNTXT) { - if (!desc->chip->set_affinity(irq, cpumask)) { - cpumask_copy(desc->affinity, cpumask); + if (!desc->irq_data.chip->set_affinity(irq, cpumask)) { + cpumask_copy(desc->irq_data.affinity, cpumask); irq_set_thread_affinity(desc); } } @@ -128,8 +128,8 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) cpumask_copy(desc->pending_mask, cpumask); } #else - if (!desc->chip->set_affinity(irq, cpumask)) { - cpumask_copy(desc->affinity, cpumask); + if (!desc->irq_data.chip->set_affinity(irq, cpumask)) { + cpumask_copy(desc->irq_data.affinity, cpumask); irq_set_thread_affinity(desc); } #endif @@ -168,16 +168,16 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc) * one of the targets is online. */ if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { - if (cpumask_any_and(desc->affinity, cpu_online_mask) + if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) < nr_cpu_ids) goto set_affinity; else desc->status &= ~IRQ_AFFINITY_SET; } - cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); + cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); set_affinity: - desc->chip->set_affinity(irq, desc->affinity); + desc->irq_data.chip->set_affinity(irq, desc->irq_data.affinity); return 0; } @@ -223,7 +223,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) if (!desc->depth++) { desc->status |= IRQ_DISABLED; - desc->chip->disable(irq); + desc->irq_data.chip->disable(irq); } } @@ -313,7 +313,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) * IRQ line is re-enabled. * * This function may be called from IRQ context only when - * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! + * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! */ void enable_irq(unsigned int irq) { @@ -336,8 +336,8 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) struct irq_desc *desc = irq_to_desc(irq); int ret = -ENXIO; - if (desc->chip->set_wake) - ret = desc->chip->set_wake(irq, on); + if (desc->irq_data.chip->set_wake) + ret = desc->irq_data.chip->set_wake(irq, on); return ret; } @@ -432,7 +432,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, unsigned long flags) { int ret; - struct irq_chip *chip = desc->chip; + struct irq_chip *chip = desc->irq_data.chip; if (!chip || !chip->set_type) { /* @@ -457,8 +457,8 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); desc->status |= flags; - if (chip != desc->chip) - irq_chip_set_defaults(desc->chip); + if (chip != desc->irq_data.chip) + irq_chip_set_defaults(desc->irq_data.chip); } return ret; @@ -528,7 +528,7 @@ again: if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { desc->status &= ~IRQ_MASKED; - desc->chip->unmask(irq); + desc->irq_data.chip->unmask(irq); } raw_spin_unlock_irq(&desc->lock); chip_bus_sync_unlock(irq, desc); @@ -556,7 +556,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) } raw_spin_lock_irq(&desc->lock); - cpumask_copy(mask, desc->affinity); + cpumask_copy(mask, desc->irq_data.affinity); raw_spin_unlock_irq(&desc->lock); set_cpus_allowed_ptr(current, mask); @@ -657,7 +657,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) if (!desc) return -EINVAL; - if (desc->chip == &no_irq_chip) + if (desc->irq_data.chip == &no_irq_chip) return -ENOSYS; /* * Some drivers like serial.c use request_irq() heavily, @@ -752,7 +752,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) } if (!shared) { - irq_chip_set_defaults(desc->chip); + irq_chip_set_defaults(desc->irq_data.chip); init_waitqueue_head(&desc->wait_for_threads); @@ -779,7 +779,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) if (!(desc->status & IRQ_NOAUTOEN)) { desc->depth = 0; desc->status &= ~IRQ_DISABLED; - desc->chip->startup(irq); + desc->irq_data.chip->startup(irq); } else /* Undo nested disables: */ desc->depth = 1; @@ -912,17 +912,17 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) /* Currently used only by UML, might disappear one day: */ #ifdef CONFIG_IRQ_RELEASE_METHOD - if (desc->chip->release) - desc->chip->release(irq, dev_id); + if (desc->irq_data.chip->release) + desc->irq_data.chip->release(irq, dev_id); #endif /* If this was the last handler, shut down the IRQ line: */ if (!desc->action) { desc->status |= IRQ_DISABLED; - if (desc->chip->shutdown) - desc->chip->shutdown(irq); + if (desc->irq_data.chip->shutdown) + desc->irq_data.chip->shutdown(irq); else - desc->chip->disable(irq); + desc->irq_data.chip->disable(irq); } #ifdef CONFIG_SMP diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 24196228083..f923c37e651 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -24,7 +24,7 @@ void move_masked_irq(int irq) if (unlikely(cpumask_empty(desc->pending_mask))) return; - if (!desc->chip->set_affinity) + if (!desc->irq_data.chip->set_affinity) return; assert_raw_spin_locked(&desc->lock); @@ -43,8 +43,8 @@ void move_masked_irq(int irq) */ if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)) - if (!desc->chip->set_affinity(irq, desc->pending_mask)) { - cpumask_copy(desc->affinity, desc->pending_mask); + if (!desc->irq_data.chip->set_affinity(irq, desc->pending_mask)) { + cpumask_copy(desc->irq_data.affinity, desc->pending_mask); irq_set_thread_affinity(desc); } @@ -61,8 +61,8 @@ void move_native_irq(int irq) if (unlikely(desc->status & IRQ_DISABLED)) return; - desc->chip->mask(irq); + desc->irq_data.chip->mask(irq); move_masked_irq(irq); - desc->chip->unmask(irq); + desc->irq_data.chip->unmask(irq); } diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 65d3845665a..e7f1f16402c 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c @@ -44,7 +44,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, return false; } raw_spin_lock_init(&desc->lock); - desc->node = node; + desc->irq_data.node = node; lockdep_set_class(&desc->lock, &irq_desc_lock_class); init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); init_copy_desc_masks(old_desc, desc); @@ -66,7 +66,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, unsigned int irq; unsigned long flags; - irq = old_desc->irq; + irq = old_desc->irq_data.irq; raw_spin_lock_irqsave(&sparse_irq_lock, flags); @@ -109,10 +109,10 @@ out_unlock: struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) { /* those static or target node is -1, do not move them */ - if (desc->irq < NR_IRQS_LEGACY || node == -1) + if (desc->irq_data.irq < NR_IRQS_LEGACY || node == -1) return desc; - if (desc->node != node) + if (desc->irq_data.node != node) desc = __real_move_irq_desc(desc, node); return desc; diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 09a2ee540bd..9b0da94b5b2 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -21,7 +21,7 @@ static struct proc_dir_entry *root_irq_dir; static int irq_affinity_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long)m->private); - const struct cpumask *mask = desc->affinity; + const struct cpumask *mask = desc->irq_data.affinity; #ifdef CONFIG_GENERIC_PENDING_IRQ if (desc->status & IRQ_MOVE_PENDING) @@ -65,7 +65,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, cpumask_var_t new_value; int err; - if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || + if (!irq_to_desc(irq)->irq_data.chip->set_affinity || no_irq_affinity || irq_balancing_disabled(irq)) return -EIO; @@ -185,7 +185,7 @@ static int irq_node_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long) m->private); - seq_printf(m, "%d\n", desc->node); + seq_printf(m, "%d\n", desc->irq_data.node); return 0; } @@ -269,7 +269,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) { char name [MAX_NAMELEN]; - if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) + if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) return; memset(name, 0, MAX_NAMELEN); diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 090c3763f3a..47c56a09792 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -60,7 +60,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) /* * Make sure the interrupt is enabled, before resending it: */ - desc->chip->enable(irq); + desc->irq_data.chip->enable(irq); /* * We do not resend level type interrupts. Level type @@ -70,7 +70,8 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; - if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) { + if (!desc->irq_data.chip->retrigger || + !desc->irq_data.chip->retrigger(irq)) { #ifdef CONFIG_HARDIRQS_SW_RESEND /* Set it pending and activate the softirq: */ set_bit(irq, irqs_resend); diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 89fb90ae534..36c2c9289e2 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -78,8 +78,8 @@ static int try_one_irq(int irq, struct irq_desc *desc) * If we did actual work for the real IRQ line we must let the * IRQ controller clean up too */ - if (work && desc->chip && desc->chip->end) - desc->chip->end(irq); + if (work && desc->irq_data.chip && desc->irq_data.chip->end) + desc->irq_data.chip->end(irq); raw_spin_unlock(&desc->lock); return ok; @@ -254,7 +254,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, printk(KERN_EMERG "Disabling IRQ #%d\n", irq); desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; desc->depth++; - desc->chip->disable(irq); + desc->irq_data.chip->disable(irq); mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); -- cgit v1.2.3-70-g09d2 From a77c4635915021c646cc017f22239e66d1aab4d5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 Oct 2010 14:44:58 +0200 Subject: genirq: Add new functions to dummy chips The compat functions go away when the core code is converted. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- kernel/irq/handle.c | 59 +++++++++++++++++++++++++++++++++++--------------- kernel/irq/internals.h | 2 ++ 2 files changed, 44 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index fc27d76e83e..adca5b4b40d 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -291,7 +291,15 @@ void clear_kstat_irqs(struct irq_desc *desc) * What should we do if we get a hw irq event on an illegal vector? * Each architecture has to answer this themself. */ -static void ack_bad(unsigned int irq) +static void ack_bad(struct irq_data *data) +{ + struct irq_desc *desc = irq_data_to_desc(data); + + print_irq_desc(data->irq, desc); + ack_bad_irq(data->irq); +} + +static void compat_ack_bad(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); @@ -302,11 +310,16 @@ static void ack_bad(unsigned int irq) /* * NOP functions */ -static void noop(unsigned int irq) +static void noop(struct irq_data *data) { } + +static unsigned int noop_ret(struct irq_data *data) { + return 0; } -static unsigned int noop_ret(unsigned int irq) +static void compat_noop(unsigned int irq) { } + +static unsigned int compat_noop_ret(unsigned int irq) { return 0; } @@ -316,12 +329,17 @@ static unsigned int noop_ret(unsigned int irq) */ struct irq_chip no_irq_chip = { .name = "none", - .startup = noop_ret, - .shutdown = noop, - .enable = noop, - .disable = noop, - .ack = ack_bad, - .end = noop, + .irq_startup = noop_ret, + .irq_shutdown = noop, + .irq_enable = noop, + .irq_disable = noop, + .irq_ack = ack_bad, + .startup = compat_noop_ret, + .shutdown = compat_noop, + .enable = compat_noop, + .disable = compat_noop, + .ack = compat_ack_bad, + .end = compat_noop, }; /* @@ -330,14 +348,21 @@ struct irq_chip no_irq_chip = { */ struct irq_chip dummy_irq_chip = { .name = "dummy", - .startup = noop_ret, - .shutdown = noop, - .enable = noop, - .disable = noop, - .ack = noop, - .mask = noop, - .unmask = noop, - .end = noop, + .irq_startup = noop_ret, + .irq_shutdown = noop, + .irq_enable = noop, + .irq_disable = noop, + .irq_ack = noop, + .irq_mask = noop, + .irq_unmask = noop, + .startup = compat_noop_ret, + .shutdown = compat_noop, + .enable = compat_noop, + .disable = compat_noop, + .ack = compat_noop, + .mask = compat_noop, + .unmask = compat_noop, + .end = compat_noop, }; /* diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index a805a00cfd2..562fc7eeabe 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -4,6 +4,8 @@ extern int noirqdebug; +#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) + /* Set default functions for irq_chip structures: */ extern void irq_chip_set_defaults(struct irq_chip *chip); -- cgit v1.2.3-70-g09d2 From 3876ec9ef3775d062345b3760d3271ecb8cd3fea Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:44:35 +0000 Subject: genirq: Provide compat handling for bus_lock/bus_sync_unlock Wrap the old chip functions for bus_lock/bus_sync_unlock until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121841.842536121@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/chip.c | 20 ++++++++++++++++++-- kernel/irq/internals.h | 12 ++++++------ kernel/irq/manage.c | 22 +++++++++++----------- 3 files changed, 35 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index e0e93ff10af..77e551d9223 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -324,6 +324,17 @@ static void default_shutdown(unsigned int irq) desc->status |= IRQ_MASKED; } +/* Temporary migration helpers */ +static void compat_bus_lock(struct irq_data *data) +{ + data->chip->bus_lock(data->irq); +} + +static void compat_bus_sync_unlock(struct irq_data *data) +{ + data->chip->bus_sync_unlock(data->irq); +} + /* * Fixup enable/disable function pointers */ @@ -346,6 +357,11 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->disable : default_shutdown; if (!chip->end) chip->end = dummy_irq_chip.end; + + if (chip->bus_lock) + chip->irq_bus_lock = compat_bus_lock; + if (chip->bus_sync_unlock) + chip->irq_bus_sync_unlock = compat_bus_sync_unlock; } static inline void mask_ack_irq(struct irq_desc *desc, int irq) @@ -687,7 +703,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, desc->irq_data.chip = &dummy_irq_chip; } - chip_bus_lock(irq, desc); + chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, flags); /* Uninstall? */ @@ -707,7 +723,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, desc->irq_data.chip->startup(irq); } raw_spin_unlock_irqrestore(&desc->lock, flags); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); } EXPORT_SYMBOL_GPL(__set_irq_handler); diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 562fc7eeabe..ecafbfee5b1 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -43,16 +43,16 @@ extern int irq_select_affinity_usr(unsigned int irq); extern void irq_set_thread_affinity(struct irq_desc *desc); /* Inline functions for support of irq chips on slow busses */ -static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) +static inline void chip_bus_lock(struct irq_desc *desc) { - if (unlikely(desc->irq_data.chip->bus_lock)) - desc->irq_data.chip->bus_lock(irq); + if (unlikely(desc->irq_data.chip->irq_bus_lock)) + desc->irq_data.chip->irq_bus_lock(&desc->irq_data); } -static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) +static inline void chip_bus_sync_unlock(struct irq_desc *desc) { - if (unlikely(desc->irq_data.chip->bus_sync_unlock)) - desc->irq_data.chip->bus_sync_unlock(irq); + if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) + desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); } /* diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 4dfb19521d9..dfb02ff7d2e 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -246,11 +246,11 @@ void disable_irq_nosync(unsigned int irq) if (!desc) return; - chip_bus_lock(irq, desc); + chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, flags); __disable_irq(desc, irq, false); raw_spin_unlock_irqrestore(&desc->lock, flags); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); } EXPORT_SYMBOL(disable_irq_nosync); @@ -323,11 +323,11 @@ void enable_irq(unsigned int irq) if (!desc) return; - chip_bus_lock(irq, desc); + chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, flags); __enable_irq(desc, irq, false); raw_spin_unlock_irqrestore(&desc->lock, flags); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); } EXPORT_SYMBOL(enable_irq); @@ -507,7 +507,7 @@ static int irq_wait_for_interrupt(struct irqaction *action) static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) { again: - chip_bus_lock(irq, desc); + chip_bus_lock(desc); raw_spin_lock_irq(&desc->lock); /* @@ -521,7 +521,7 @@ again: */ if (unlikely(desc->status & IRQ_INPROGRESS)) { raw_spin_unlock_irq(&desc->lock); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); cpu_relax(); goto again; } @@ -531,7 +531,7 @@ again: desc->irq_data.chip->unmask(irq); } raw_spin_unlock_irq(&desc->lock); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); } #ifdef CONFIG_SMP @@ -997,9 +997,9 @@ void free_irq(unsigned int irq, void *dev_id) if (!desc) return; - chip_bus_lock(irq, desc); + chip_bus_lock(desc); kfree(__free_irq(irq, dev_id)); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); } EXPORT_SYMBOL(free_irq); @@ -1086,9 +1086,9 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, action->name = devname; action->dev_id = dev_id; - chip_bus_lock(irq, desc); + chip_bus_lock(desc); retval = __setup_irq(irq, desc, action); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); if (retval) kfree(action); -- cgit v1.2.3-70-g09d2 From e2c0f8ff0fc26959952fbfa89f732fef928df77f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:44:42 +0000 Subject: genirq: Provide compat handling for chip->mask() Wrap the old chip function mask() until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121841.940355859@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/chip.c | 22 +++++++++++++++------- kernel/irq/handle.c | 1 - kernel/irq/migration.c | 2 +- 3 files changed, 16 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 77e551d9223..c041270bfe5 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -320,11 +320,16 @@ static void default_shutdown(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); - desc->irq_data.chip->mask(irq); + desc->irq_data.chip->irq_mask(&desc->irq_data); desc->status |= IRQ_MASKED; } /* Temporary migration helpers */ +static void compat_irq_mask(struct irq_data *data) +{ + data->chip->mask(data->irq); +} + static void compat_bus_lock(struct irq_data *data) { data->chip->bus_lock(data->irq); @@ -362,6 +367,9 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->irq_bus_lock = compat_bus_lock; if (chip->bus_sync_unlock) chip->irq_bus_sync_unlock = compat_bus_sync_unlock; + + if (chip->mask) + chip->irq_mask = compat_irq_mask; } static inline void mask_ack_irq(struct irq_desc *desc, int irq) @@ -369,17 +377,17 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) if (desc->irq_data.chip->mask_ack) desc->irq_data.chip->mask_ack(irq); else { - desc->irq_data.chip->mask(irq); + desc->irq_data.chip->irq_mask(&desc->irq_data); if (desc->irq_data.chip->ack) desc->irq_data.chip->ack(irq); } desc->status |= IRQ_MASKED; } -static inline void mask_irq(struct irq_desc *desc, int irq) +static inline void mask_irq(struct irq_desc *desc) { - if (desc->irq_data.chip->mask) { - desc->irq_data.chip->mask(irq); + if (desc->irq_data.chip->irq_mask) { + desc->irq_data.chip->irq_mask(&desc->irq_data); desc->status |= IRQ_MASKED; } } @@ -553,7 +561,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) action = desc->action; if (unlikely(!action || (desc->status & IRQ_DISABLED))) { desc->status |= IRQ_PENDING; - mask_irq(desc, irq); + mask_irq(desc); goto out; } @@ -621,7 +629,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) irqreturn_t action_ret; if (unlikely(!action)) { - mask_irq(desc, irq); + mask_irq(desc); goto out_unlock; } diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index adca5b4b40d..3b160ac236b 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -360,7 +360,6 @@ struct irq_chip dummy_irq_chip = { .enable = compat_noop, .disable = compat_noop, .ack = compat_noop, - .mask = compat_noop, .unmask = compat_noop, .end = compat_noop, }; diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index f923c37e651..b165ec26b75 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -61,7 +61,7 @@ void move_native_irq(int irq) if (unlikely(desc->status & IRQ_DISABLED)) return; - desc->irq_data.chip->mask(irq); + desc->irq_data.chip->irq_mask(&desc->irq_data); move_masked_irq(irq); desc->irq_data.chip->unmask(irq); } -- cgit v1.2.3-70-g09d2 From 0eda58b7f3a30c9a13d83db1cfaab00e1c452055 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:44:44 +0000 Subject: genirq: Provide compat handling for chip->unmask() Wrap the old chip function unmask() until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121842.043608928@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/chip.c | 20 ++++++++++++++------ kernel/irq/handle.c | 1 - kernel/irq/manage.c | 2 +- kernel/irq/migration.c | 2 +- 4 files changed, 16 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index c041270bfe5..dbdb59a4209 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -291,7 +291,7 @@ static void default_enable(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); - desc->irq_data.chip->unmask(irq); + desc->irq_data.chip->irq_unmask(&desc->irq_data); desc->status &= ~IRQ_MASKED; } @@ -330,6 +330,11 @@ static void compat_irq_mask(struct irq_data *data) data->chip->mask(data->irq); } +static void compat_irq_unmask(struct irq_data *data) +{ + data->chip->unmask(data->irq); +} + static void compat_bus_lock(struct irq_data *data) { data->chip->bus_lock(data->irq); @@ -370,6 +375,9 @@ void irq_chip_set_defaults(struct irq_chip *chip) if (chip->mask) chip->irq_mask = compat_irq_mask; + + if (chip->unmask) + chip->irq_unmask = compat_irq_unmask; } static inline void mask_ack_irq(struct irq_desc *desc, int irq) @@ -392,10 +400,10 @@ static inline void mask_irq(struct irq_desc *desc) } } -static inline void unmask_irq(struct irq_desc *desc, int irq) +static inline void unmask_irq(struct irq_desc *desc) { - if (desc->irq_data.chip->unmask) { - desc->irq_data.chip->unmask(irq); + if (desc->irq_data.chip->irq_unmask) { + desc->irq_data.chip->irq_unmask(&desc->irq_data); desc->status &= ~IRQ_MASKED; } } @@ -524,7 +532,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) desc->status &= ~IRQ_INPROGRESS; if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) - unmask_irq(desc, irq); + unmask_irq(desc); out_unlock: raw_spin_unlock(&desc->lock); } @@ -641,7 +649,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) if (unlikely((desc->status & (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == (IRQ_PENDING | IRQ_MASKED))) { - unmask_irq(desc, irq); + unmask_irq(desc); } desc->status &= ~IRQ_PENDING; diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 3b160ac236b..f334c8c59dd 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -360,7 +360,6 @@ struct irq_chip dummy_irq_chip = { .enable = compat_noop, .disable = compat_noop, .ack = compat_noop, - .unmask = compat_noop, .end = compat_noop, }; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index dfb02ff7d2e..b3986bce64f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -528,7 +528,7 @@ again: if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { desc->status &= ~IRQ_MASKED; - desc->irq_data.chip->unmask(irq); + desc->irq_data.chip->irq_unmask(&desc->irq_data); } raw_spin_unlock_irq(&desc->lock); chip_bus_sync_unlock(desc); diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index b165ec26b75..7888e5d5575 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -63,6 +63,6 @@ void move_native_irq(int irq) desc->irq_data.chip->irq_mask(&desc->irq_data); move_masked_irq(irq); - desc->irq_data.chip->unmask(irq); + desc->irq_data.chip->irq_unmask(&desc->irq_data); } -- cgit v1.2.3-70-g09d2 From 22a49163e90dd7088f801dd54e25b04e1f337e9b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:44:47 +0000 Subject: genirq: Provide compat handling for chip->ack() Wrap the old chip function ack() until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121842.142624725@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/chip.c | 20 ++++++++++++-------- kernel/irq/handle.c | 10 ---------- 2 files changed, 12 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index dbdb59a4209..864c3abdf8f 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -335,6 +335,11 @@ static void compat_irq_unmask(struct irq_data *data) data->chip->unmask(data->irq); } +static void compat_irq_ack(struct irq_data *data) +{ + data->chip->ack(data->irq); +} + static void compat_bus_lock(struct irq_data *data) { data->chip->bus_lock(data->irq); @@ -372,12 +377,12 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->irq_bus_lock = compat_bus_lock; if (chip->bus_sync_unlock) chip->irq_bus_sync_unlock = compat_bus_sync_unlock; - if (chip->mask) chip->irq_mask = compat_irq_mask; - if (chip->unmask) chip->irq_unmask = compat_irq_unmask; + if (chip->ack) + chip->irq_ack = compat_irq_ack; } static inline void mask_ack_irq(struct irq_desc *desc, int irq) @@ -386,8 +391,8 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) desc->irq_data.chip->mask_ack(irq); else { desc->irq_data.chip->irq_mask(&desc->irq_data); - if (desc->irq_data.chip->ack) - desc->irq_data.chip->ack(irq); + if (desc->irq_data.chip->irq_ack) + desc->irq_data.chip->irq_ack(&desc->irq_data); } desc->status |= IRQ_MASKED; } @@ -626,8 +631,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) kstat_incr_irqs_this_cpu(irq, desc); /* Start handling the irq */ - if (desc->irq_data.chip->ack) - desc->irq_data.chip->ack(irq); + desc->irq_data.chip->irq_ack(&desc->irq_data); /* Mark the IRQ currently in progress.*/ desc->status |= IRQ_INPROGRESS; @@ -680,8 +684,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) kstat_incr_irqs_this_cpu(irq, desc); - if (desc->irq_data.chip->ack) - desc->irq_data.chip->ack(irq); + if (desc->irq_data.chip->irq_ack) + desc->irq_data.chip->irq_ack(&desc->irq_data); action_ret = handle_IRQ_event(irq, desc->action); if (!noirqdebug) diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index f334c8c59dd..9ba7aece0e4 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -299,14 +299,6 @@ static void ack_bad(struct irq_data *data) ack_bad_irq(data->irq); } -static void compat_ack_bad(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - - print_irq_desc(irq, desc); - ack_bad_irq(irq); -} - /* * NOP functions */ @@ -338,7 +330,6 @@ struct irq_chip no_irq_chip = { .shutdown = compat_noop, .enable = compat_noop, .disable = compat_noop, - .ack = compat_ack_bad, .end = compat_noop, }; @@ -359,7 +350,6 @@ struct irq_chip dummy_irq_chip = { .shutdown = compat_noop, .enable = compat_noop, .disable = compat_noop, - .ack = compat_noop, .end = compat_noop, }; -- cgit v1.2.3-70-g09d2 From 9205e31d1af0f725e71bbae10d199c6b9e8d6dd8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:44:50 +0000 Subject: genirq: Provide compat handling for chip->mask_ack() Wrap the old chip function mask_ack() until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121842.240806983@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/chip.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 864c3abdf8f..09c1a449344 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -340,6 +340,11 @@ static void compat_irq_ack(struct irq_data *data) data->chip->ack(data->irq); } +static void compat_irq_mask_ack(struct irq_data *data) +{ + data->chip->mask_ack(data->irq); +} + static void compat_bus_lock(struct irq_data *data) { data->chip->bus_lock(data->irq); @@ -383,12 +388,14 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->irq_unmask = compat_irq_unmask; if (chip->ack) chip->irq_ack = compat_irq_ack; + if (chip->mask_ack) + chip->irq_mask_ack = compat_irq_mask_ack; } -static inline void mask_ack_irq(struct irq_desc *desc, int irq) +static inline void mask_ack_irq(struct irq_desc *desc) { - if (desc->irq_data.chip->mask_ack) - desc->irq_data.chip->mask_ack(irq); + if (desc->irq_data.chip->irq_mask_ack) + desc->irq_data.chip->irq_mask_ack(&desc->irq_data); else { desc->irq_data.chip->irq_mask(&desc->irq_data); if (desc->irq_data.chip->irq_ack) @@ -511,7 +518,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) irqreturn_t action_ret; raw_spin_lock(&desc->lock); - mask_ack_irq(desc, irq); + mask_ack_irq(desc); if (unlikely(desc->status & IRQ_INPROGRESS)) goto out_unlock; @@ -625,7 +632,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || !desc->action)) { desc->status |= (IRQ_PENDING | IRQ_MASKED); - mask_ack_irq(desc, irq); + mask_ack_irq(desc); goto out_unlock; } kstat_incr_irqs_this_cpu(irq, desc); @@ -729,7 +736,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, /* Uninstall? */ if (handle == handle_bad_irq) { if (desc->irq_data.chip != &no_irq_chip) - mask_ack_irq(desc, irq); + mask_ack_irq(desc); desc->status |= IRQ_DISABLED; desc->depth = 1; } -- cgit v1.2.3-70-g09d2 From 0c5c15572ac096001f52d37b416f2a4be9aebb80 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:44:53 +0000 Subject: genirq: Provide compat handling for chip->eoi() Wrap the old chip function eoi() until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121842.339657617@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/chip.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 09c1a449344..c8648a83b80 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -345,6 +345,11 @@ static void compat_irq_mask_ack(struct irq_data *data) data->chip->mask_ack(data->irq); } +static void compat_irq_eoi(struct irq_data *data) +{ + data->chip->eoi(data->irq); +} + static void compat_bus_lock(struct irq_data *data) { data->chip->bus_lock(data->irq); @@ -390,6 +395,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->irq_ack = compat_irq_ack; if (chip->mask_ack) chip->irq_mask_ack = compat_irq_mask_ack; + if (chip->eoi) + chip->irq_eoi = compat_irq_eoi; } static inline void mask_ack_irq(struct irq_desc *desc) @@ -596,7 +603,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out: - desc->irq_data.chip->eoi(irq); + desc->irq_data.chip->irq_eoi(&desc->irq_data); raw_spin_unlock(&desc->lock); } @@ -698,8 +705,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) if (!noirqdebug) note_interrupt(irq, desc, action_ret); - if (desc->irq_data.chip->eoi) - desc->irq_data.chip->eoi(irq); + if (desc->irq_data.chip->irq_eoi) + desc->irq_data.chip->irq_eoi(&desc->irq_data); } void -- cgit v1.2.3-70-g09d2 From c5f756344c390f629243b4a28c2bd198fdfd7ee9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:44:56 +0000 Subject: genirq: Provide compat handling for chip->enable() Wrap the old chip function enable() until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121842.437159182@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/chip.c | 25 ++++++++++++++++++++----- kernel/irq/handle.c | 2 -- kernel/irq/resend.c | 2 +- 3 files changed, 21 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index c8648a83b80..a95b4783126 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -287,9 +287,9 @@ EXPORT_SYMBOL_GPL(set_irq_nested_thread); /* * default enable function */ -static void default_enable(unsigned int irq) +static void default_enable(struct irq_data *data) { - struct irq_desc *desc = irq_to_desc(irq); + struct irq_desc *desc = irq_data_to_desc(data); desc->irq_data.chip->irq_unmask(&desc->irq_data); desc->status &= ~IRQ_MASKED; @@ -309,7 +309,7 @@ static unsigned int default_startup(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); - desc->irq_data.chip->enable(irq); + desc->irq_data.chip->irq_enable(&desc->irq_data); return 0; } @@ -350,6 +350,11 @@ static void compat_irq_eoi(struct irq_data *data) data->chip->eoi(data->irq); } +static void compat_irq_enable(struct irq_data *data) +{ + data->chip->enable(data->irq); +} + static void compat_bus_lock(struct irq_data *data) { data->chip->bus_lock(data->irq); @@ -365,8 +370,18 @@ static void compat_bus_sync_unlock(struct irq_data *data) */ void irq_chip_set_defaults(struct irq_chip *chip) { - if (!chip->enable) - chip->enable = default_enable; + /* + * Compat fixup functions need to be before we set the + * defaults for enable/disable/startup/shutdown + */ + if (chip->enable) + chip->irq_enable = compat_irq_enable; + + /* + * The real defaults + */ + if (!chip->irq_enable) + chip->irq_enable = default_enable; if (!chip->disable) chip->disable = default_disable; if (!chip->startup) diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 9ba7aece0e4..ac8c749dfee 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -328,7 +328,6 @@ struct irq_chip no_irq_chip = { .irq_ack = ack_bad, .startup = compat_noop_ret, .shutdown = compat_noop, - .enable = compat_noop, .disable = compat_noop, .end = compat_noop, }; @@ -348,7 +347,6 @@ struct irq_chip dummy_irq_chip = { .irq_unmask = noop, .startup = compat_noop_ret, .shutdown = compat_noop, - .enable = compat_noop, .disable = compat_noop, .end = compat_noop, }; diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 47c56a09792..a798a2328f8 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -60,7 +60,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) /* * Make sure the interrupt is enabled, before resending it: */ - desc->irq_data.chip->enable(irq); + desc->irq_data.chip->irq_enable(&desc->irq_data); /* * We do not resend level type interrupts. Level type -- cgit v1.2.3-70-g09d2 From bc310dda41be6439364c8f3b9fe7c9d743d22b1c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:45:02 +0000 Subject: genirq: Provide compat handling for chip->disable()/shutdown() Wrap the old chip functions disable() and shutdown() until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121842.532070631@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/autoprobe.c | 6 +++--- kernel/irq/chip.c | 37 +++++++++++++++++++++++++++---------- kernel/irq/handle.c | 4 ---- kernel/irq/manage.c | 8 ++++---- kernel/irq/spurious.c | 2 +- 5 files changed, 35 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index f9bf9b22803..95806a45be7 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -98,7 +98,7 @@ unsigned long probe_irq_on(void) /* It triggered already - consider it spurious. */ if (!(status & IRQ_WAITING)) { desc->status = status & ~IRQ_AUTODETECT; - desc->irq_data.chip->shutdown(i); + desc->irq_data.chip->irq_shutdown(&desc->irq_data); } else if (i < 32) mask |= 1 << i; @@ -137,7 +137,7 @@ unsigned int probe_irq_mask(unsigned long val) mask |= 1 << i; desc->status = status & ~IRQ_AUTODETECT; - desc->irq_data.chip->shutdown(i); + desc->irq_data.chip->irq_shutdown(&desc->irq_data); } raw_spin_unlock_irq(&desc->lock); } @@ -181,7 +181,7 @@ int probe_irq_off(unsigned long val) nr_of_irqs++; } desc->status = status & ~IRQ_AUTODETECT; - desc->irq_data.chip->shutdown(i); + desc->irq_data.chip->irq_shutdown(&desc->irq_data); } raw_spin_unlock_irq(&desc->lock); } diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index a95b4783126..b8a47f0a26c 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -298,7 +298,7 @@ static void default_enable(struct irq_data *data) /* * default disable function */ -static void default_disable(unsigned int irq) +static void default_disable(struct irq_data *data) { } @@ -316,9 +316,9 @@ static unsigned int default_startup(unsigned int irq) /* * default shutdown function */ -static void default_shutdown(unsigned int irq) +static void default_shutdown(struct irq_data *data) { - struct irq_desc *desc = irq_to_desc(irq); + struct irq_desc *desc = irq_data_to_desc(data); desc->irq_data.chip->irq_mask(&desc->irq_data); desc->status |= IRQ_MASKED; @@ -355,6 +355,16 @@ static void compat_irq_enable(struct irq_data *data) data->chip->enable(data->irq); } +static void compat_irq_disable(struct irq_data *data) +{ + data->chip->disable(data->irq); +} + +static void compat_irq_shutdown(struct irq_data *data) +{ + data->chip->shutdown(data->irq); +} + static void compat_bus_lock(struct irq_data *data) { data->chip->bus_lock(data->irq); @@ -376,28 +386,35 @@ void irq_chip_set_defaults(struct irq_chip *chip) */ if (chip->enable) chip->irq_enable = compat_irq_enable; + if (chip->disable) + chip->irq_disable = compat_irq_disable; + if (chip->shutdown) + chip->irq_shutdown = compat_irq_shutdown; /* * The real defaults */ if (!chip->irq_enable) chip->irq_enable = default_enable; - if (!chip->disable) - chip->disable = default_disable; + if (!chip->irq_disable) + chip->irq_disable = default_disable; if (!chip->startup) chip->startup = default_startup; /* - * We use chip->disable, when the user provided its own. When - * we have default_disable set for chip->disable, then we need + * We use chip->irq_disable, when the user provided its own. When + * we have default_disable set for chip->irq_disable, then we need * to use default_shutdown, otherwise the irq line is not * disabled on free_irq(): */ - if (!chip->shutdown) - chip->shutdown = chip->disable != default_disable ? - chip->disable : default_shutdown; + if (!chip->irq_shutdown) + chip->irq_shutdown = chip->irq_disable != default_disable ? + chip->irq_disable : default_shutdown; if (!chip->end) chip->end = dummy_irq_chip.end; + /* + * Now fix up the remaining compat handlers + */ if (chip->bus_lock) chip->irq_bus_lock = compat_bus_lock; if (chip->bus_sync_unlock) diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index ac8c749dfee..60e25c46eb5 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -327,8 +327,6 @@ struct irq_chip no_irq_chip = { .irq_disable = noop, .irq_ack = ack_bad, .startup = compat_noop_ret, - .shutdown = compat_noop, - .disable = compat_noop, .end = compat_noop, }; @@ -346,8 +344,6 @@ struct irq_chip dummy_irq_chip = { .irq_mask = noop, .irq_unmask = noop, .startup = compat_noop_ret, - .shutdown = compat_noop, - .disable = compat_noop, .end = compat_noop, }; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index b3986bce64f..f3f36f6af9a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -223,7 +223,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) if (!desc->depth++) { desc->status |= IRQ_DISABLED; - desc->irq_data.chip->disable(irq); + desc->irq_data.chip->irq_disable(&desc->irq_data); } } @@ -919,10 +919,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) /* If this was the last handler, shut down the IRQ line: */ if (!desc->action) { desc->status |= IRQ_DISABLED; - if (desc->irq_data.chip->shutdown) - desc->irq_data.chip->shutdown(irq); + if (desc->irq_data.chip->irq_shutdown) + desc->irq_data.chip->irq_shutdown(&desc->irq_data); else - desc->irq_data.chip->disable(irq); + desc->irq_data.chip->irq_disable(&desc->irq_data); } #ifdef CONFIG_SMP diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 36c2c9289e2..9ee704d3a23 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -254,7 +254,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, printk(KERN_EMERG "Disabling IRQ #%d\n", irq); desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; desc->depth++; - desc->irq_data.chip->disable(irq); + desc->irq_data.chip->irq_disable(&desc->irq_data); mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); -- cgit v1.2.3-70-g09d2 From 37e12df709f09eac17314d79a52190ac46746e33 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:45:38 +0000 Subject: genirq: Provide compat handling for chip->startup() Wrap the old chip function startup() until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121842.635152961@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/autoprobe.c | 4 ++-- kernel/irq/chip.c | 19 +++++++++++++------ kernel/irq/handle.c | 7 ------- kernel/irq/manage.c | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 95806a45be7..7a468254e53 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -59,7 +59,7 @@ unsigned long probe_irq_on(void) */ if (desc->irq_data.chip->set_type) desc->irq_data.chip->set_type(i, IRQ_TYPE_PROBE); - desc->irq_data.chip->startup(i); + desc->irq_data.chip->irq_startup(&desc->irq_data); } raw_spin_unlock_irq(&desc->lock); } @@ -76,7 +76,7 @@ unsigned long probe_irq_on(void) raw_spin_lock_irq(&desc->lock); if (!desc->action && !(desc->status & IRQ_NOPROBE)) { desc->status |= IRQ_AUTODETECT | IRQ_WAITING; - if (desc->irq_data.chip->startup(i)) + if (desc->irq_data.chip->irq_startup(&desc->irq_data)) desc->status |= IRQ_PENDING; } raw_spin_unlock_irq(&desc->lock); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index b8a47f0a26c..cce85f0734b 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -305,11 +305,11 @@ static void default_disable(struct irq_data *data) /* * default startup function */ -static unsigned int default_startup(unsigned int irq) +static unsigned int default_startup(struct irq_data *data) { - struct irq_desc *desc = irq_to_desc(irq); + struct irq_desc *desc = irq_data_to_desc(data); - desc->irq_data.chip->irq_enable(&desc->irq_data); + desc->irq_data.chip->irq_enable(data); return 0; } @@ -365,6 +365,11 @@ static void compat_irq_shutdown(struct irq_data *data) data->chip->shutdown(data->irq); } +static unsigned int compat_irq_startup(struct irq_data *data) +{ + return data->chip->startup(data->irq); +} + static void compat_bus_lock(struct irq_data *data) { data->chip->bus_lock(data->irq); @@ -390,6 +395,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->irq_disable = compat_irq_disable; if (chip->shutdown) chip->irq_shutdown = compat_irq_shutdown; + if (chip->startup) + chip->irq_startup = compat_irq_startup; /* * The real defaults @@ -398,8 +405,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->irq_enable = default_enable; if (!chip->irq_disable) chip->irq_disable = default_disable; - if (!chip->startup) - chip->startup = default_startup; + if (!chip->irq_startup) + chip->irq_startup = default_startup; /* * We use chip->irq_disable, when the user provided its own. When * we have default_disable set for chip->irq_disable, then we need @@ -786,7 +793,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, desc->status &= ~IRQ_DISABLED; desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; desc->depth = 0; - desc->irq_data.chip->startup(irq); + desc->irq_data.chip->irq_startup(&desc->irq_data); } raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(desc); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 60e25c46eb5..8d0697f892a 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -311,11 +311,6 @@ static unsigned int noop_ret(struct irq_data *data) static void compat_noop(unsigned int irq) { } -static unsigned int compat_noop_ret(unsigned int irq) -{ - return 0; -} - /* * Generic no controller implementation */ @@ -326,7 +321,6 @@ struct irq_chip no_irq_chip = { .irq_enable = noop, .irq_disable = noop, .irq_ack = ack_bad, - .startup = compat_noop_ret, .end = compat_noop, }; @@ -343,7 +337,6 @@ struct irq_chip dummy_irq_chip = { .irq_ack = noop, .irq_mask = noop, .irq_unmask = noop, - .startup = compat_noop_ret, .end = compat_noop, }; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index f3f36f6af9a..31d7678e026 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -779,7 +779,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) if (!(desc->status & IRQ_NOAUTOEN)) { desc->depth = 0; desc->status &= ~IRQ_DISABLED; - desc->irq_data.chip->startup(irq); + desc->irq_data.chip->irq_startup(&desc->irq_data); } else /* Undo nested disables: */ desc->depth = 1; -- cgit v1.2.3-70-g09d2 From c96b3b3c448592a0b87ef20306deb8b1fb4878c7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:45:41 +0000 Subject: genirq: Provide compat handling for chip->set_affinity() Wrap the old chip function set_affinity() until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121842.732894108@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/chip.c | 8 ++++++++ kernel/irq/manage.c | 11 ++++++----- kernel/irq/migration.c | 6 ++++-- kernel/irq/proc.c | 2 +- 4 files changed, 19 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index cce85f0734b..df51792d9fd 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -370,6 +370,12 @@ static unsigned int compat_irq_startup(struct irq_data *data) return data->chip->startup(data->irq); } +static int compat_irq_set_affinity(struct irq_data *data, + const struct cpumask *dest, bool force) +{ + return data->chip->set_affinity(data->irq, dest); +} + static void compat_bus_lock(struct irq_data *data) { data->chip->bus_lock(data->irq); @@ -436,6 +442,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->irq_mask_ack = compat_irq_mask_ack; if (chip->eoi) chip->irq_eoi = compat_irq_eoi; + if (chip->set_affinity) + chip->irq_set_affinity = compat_irq_set_affinity; } static inline void mask_ack_irq(struct irq_desc *desc) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 31d7678e026..305a60ff756 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -74,7 +74,7 @@ int irq_can_set_affinity(unsigned int irq) struct irq_desc *desc = irq_to_desc(irq); if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || - !desc->irq_data.chip->set_affinity) + !desc->irq_data.chip->irq_set_affinity) return 0; return 1; @@ -109,16 +109,17 @@ void irq_set_thread_affinity(struct irq_desc *desc) int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) { struct irq_desc *desc = irq_to_desc(irq); + struct irq_chip *chip = desc->irq_data.chip; unsigned long flags; - if (!desc->irq_data.chip->set_affinity) + if (!chip->irq_set_affinity) return -EINVAL; raw_spin_lock_irqsave(&desc->lock, flags); #ifdef CONFIG_GENERIC_PENDING_IRQ if (desc->status & IRQ_MOVE_PCNTXT) { - if (!desc->irq_data.chip->set_affinity(irq, cpumask)) { + if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { cpumask_copy(desc->irq_data.affinity, cpumask); irq_set_thread_affinity(desc); } @@ -128,7 +129,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) cpumask_copy(desc->pending_mask, cpumask); } #else - if (!desc->irq_data.chip->set_affinity(irq, cpumask)) { + if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { cpumask_copy(desc->irq_data.affinity, cpumask); irq_set_thread_affinity(desc); } @@ -177,7 +178,7 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc) cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); set_affinity: - desc->irq_data.chip->set_affinity(irq, desc->irq_data.affinity); + desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); return 0; } diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 7888e5d5575..1d254194048 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -7,6 +7,7 @@ void move_masked_irq(int irq) { struct irq_desc *desc = irq_to_desc(irq); + struct irq_chip *chip = desc->irq_data.chip; if (likely(!(desc->status & IRQ_MOVE_PENDING))) return; @@ -24,7 +25,7 @@ void move_masked_irq(int irq) if (unlikely(cpumask_empty(desc->pending_mask))) return; - if (!desc->irq_data.chip->set_affinity) + if (!chip->irq_set_affinity) return; assert_raw_spin_locked(&desc->lock); @@ -43,7 +44,8 @@ void move_masked_irq(int irq) */ if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)) - if (!desc->irq_data.chip->set_affinity(irq, desc->pending_mask)) { + if (!chip->irq_set_affinity(&desc->irq_data, + desc->pending_mask, false)) { cpumask_copy(desc->irq_data.affinity, desc->pending_mask); irq_set_thread_affinity(desc); } diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 9b0da94b5b2..d9fddf918b4 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -65,7 +65,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, cpumask_var_t new_value; int err; - if (!irq_to_desc(irq)->irq_data.chip->set_affinity || no_irq_affinity || + if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity || irq_balancing_disabled(irq)) return -EIO; -- cgit v1.2.3-70-g09d2 From b2ba2c30033c10cca2454f8b44bf98f5249e61c6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:45:47 +0000 Subject: genirq: Provide compat handling for chip->set_type() Wrap the old chip function set_type() until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121842.832261548@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/autoprobe.c | 5 +++-- kernel/irq/chip.c | 7 +++++++ kernel/irq/manage.c | 10 +++++----- 3 files changed, 15 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 7a468254e53..505798f86c3 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -57,8 +57,9 @@ unsigned long probe_irq_on(void) * Some chips need to know about probing in * progress: */ - if (desc->irq_data.chip->set_type) - desc->irq_data.chip->set_type(i, IRQ_TYPE_PROBE); + if (desc->irq_data.chip->irq_set_type) + desc->irq_data.chip->irq_set_type(&desc->irq_data, + IRQ_TYPE_PROBE); desc->irq_data.chip->irq_startup(&desc->irq_data); } raw_spin_unlock_irq(&desc->lock); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index df51792d9fd..b7dd02a99c8 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -376,6 +376,11 @@ static int compat_irq_set_affinity(struct irq_data *data, return data->chip->set_affinity(data->irq, dest); } +static int compat_irq_set_type(struct irq_data *data, unsigned int type) +{ + return data->chip->set_type(data->irq, type); +} + static void compat_bus_lock(struct irq_data *data) { data->chip->bus_lock(data->irq); @@ -444,6 +449,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->irq_eoi = compat_irq_eoi; if (chip->set_affinity) chip->irq_set_affinity = compat_irq_set_affinity; + if (chip->set_type) + chip->irq_set_type = compat_irq_set_type; } static inline void mask_ack_irq(struct irq_desc *desc) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 305a60ff756..3618362b3d8 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -430,12 +430,12 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc) } int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, - unsigned long flags) + unsigned long flags) { int ret; struct irq_chip *chip = desc->irq_data.chip; - if (!chip || !chip->set_type) { + if (!chip || !chip->irq_set_type) { /* * IRQF_TRIGGER_* but the PIC does not support multiple * flow-types? @@ -446,11 +446,11 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, } /* caller masked out all except trigger mode flags */ - ret = chip->set_type(irq, flags); + ret = chip->irq_set_type(&desc->irq_data, flags); if (ret) - pr_err("setting trigger mode %d for irq %u failed (%pF)\n", - (int)flags, irq, chip->set_type); + pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", + flags, irq, chip->irq_set_type); else { if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) flags |= IRQ_LEVEL; -- cgit v1.2.3-70-g09d2 From 2f7e99bb9be6a2d8d7b808dc86037710cc8b7bf1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:45:50 +0000 Subject: genirq: Provide compat handling for chip->set_wake() Wrap the old chip function set_wake() until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121842.927527393@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/chip.c | 7 +++++++ kernel/irq/manage.c | 4 ++-- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index b7dd02a99c8..8775dd39ab3 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -381,6 +381,11 @@ static int compat_irq_set_type(struct irq_data *data, unsigned int type) return data->chip->set_type(data->irq, type); } +static int compat_irq_set_wake(struct irq_data *data, unsigned int on) +{ + return data->chip->set_wake(data->irq, on); +} + static void compat_bus_lock(struct irq_data *data) { data->chip->bus_lock(data->irq); @@ -451,6 +456,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->irq_set_affinity = compat_irq_set_affinity; if (chip->set_type) chip->irq_set_type = compat_irq_set_type; + if (chip->set_wake) + chip->irq_set_wake = compat_irq_set_wake; } static inline void mask_ack_irq(struct irq_desc *desc) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 3618362b3d8..644e8d5fa36 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -337,8 +337,8 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) struct irq_desc *desc = irq_to_desc(irq); int ret = -ENXIO; - if (desc->irq_data.chip->set_wake) - ret = desc->irq_data.chip->set_wake(irq, on); + if (desc->irq_data.chip->irq_set_wake) + ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); return ret; } -- cgit v1.2.3-70-g09d2 From 21e2b8c62cca8f7dbec0c8c131ca1637e4a5670f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 12:45:53 +0000 Subject: genirq: Provide compat handling for chip->retrigger() Wrap the old chip function retrigger() until the migration is complete and the old chip functions are removed. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20100927121843.025801092@linutronix.de> Reviewed-by: H. Peter Anvin Reviewed-by: Ingo Molnar --- kernel/irq/chip.c | 7 +++++++ kernel/irq/resend.c | 4 ++-- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 8775dd39ab3..f2c4d28c508 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -386,6 +386,11 @@ static int compat_irq_set_wake(struct irq_data *data, unsigned int on) return data->chip->set_wake(data->irq, on); } +static int compat_irq_retrigger(struct irq_data *data) +{ + return data->chip->retrigger(data->irq); +} + static void compat_bus_lock(struct irq_data *data) { data->chip->bus_lock(data->irq); @@ -458,6 +463,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->irq_set_type = compat_irq_set_type; if (chip->set_wake) chip->irq_set_wake = compat_irq_set_wake; + if (chip->retrigger) + chip->irq_retrigger = compat_irq_retrigger; } static inline void mask_ack_irq(struct irq_desc *desc) diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index a798a2328f8..891115a929a 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -70,8 +70,8 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; - if (!desc->irq_data.chip->retrigger || - !desc->irq_data.chip->retrigger(irq)) { + if (!desc->irq_data.chip->irq_retrigger || + !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { #ifdef CONFIG_HARDIRQS_SW_RESEND /* Set it pending and activate the softirq: */ set_bit(irq, irqs_resend); -- cgit v1.2.3-70-g09d2 From 540804b5c52065a87d826f7714b18a3ec0b269f9 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 4 Oct 2010 12:00:02 +0200 Subject: perf_events: Fix invalid pointer when pid is invalid This patch fixes an error in perf_event_open() when the pid provided by the user is invalid. find_lively_task_by_vpid() does not return NULL on error but an error code. Without the fix the error code was silently passed to find_get_context() which would eventually cause a invalid pointer dereference. Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: paulus@samba.org Cc: davem@davemloft.net Cc: fweisbec@gmail.com Cc: perfmon2-devel@lists.sf.net Cc: eranian@gmail.com Cc: robert.richter@amd.com LKML-Reference: <4ca9a5d1.e8e9d80a.3dbb.ffff8f2e@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index c16158c77df..64507eaa2d9 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5616,8 +5616,13 @@ SYSCALL_DEFINE5(perf_event_open, } } - if (pid != -1) + if (pid != -1) { task = find_lively_task_by_vpid(pid); + if (IS_ERR(task)) { + err = PTR_ERR(task); + goto err_group_fd; + } + } /* * Get the target context (task or percpu): -- cgit v1.2.3-70-g09d2 From bd151412263a67b5321e9dd1d5b4bf6d96fdebf3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 Oct 2010 15:17:14 +0200 Subject: genirq: Provide config option to disable deprecated code This option covers now the old chip functions and the irq_desc data fields which are moving to struct irq_data. More stuff will follow. Pretty handy for testing a conversion, whether something broke or not. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/irq.h | 8 +++++++- kernel/irq/Kconfig | 4 ++++ kernel/irq/chip.c | 8 +++++++- kernel/irq/handle.c | 9 +++++++-- kernel/irq/internals.h | 10 ++++++++++ kernel/irq/spurious.c | 6 ++++-- 6 files changed, 39 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index 0c83cbd2df4..82ed8231394 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -155,6 +155,7 @@ struct irq_data { */ struct irq_chip { const char *name; +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED unsigned int (*startup)(unsigned int irq); void (*shutdown)(unsigned int irq); void (*enable)(unsigned int irq); @@ -175,7 +176,7 @@ struct irq_chip { void (*bus_lock)(unsigned int irq); void (*bus_sync_unlock)(unsigned int irq); - +#endif unsigned int (*irq_startup)(struct irq_data *data); void (*irq_shutdown)(struct irq_data *data); void (*irq_enable)(struct irq_data *data); @@ -225,6 +226,9 @@ struct irq_2_iommu; */ struct irq_desc { +#ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED + struct irq_data irq_data; +#else /* * This union will go away, once we fixed the direct access to * irq_desc all over the place. The direct fields are a 1:1 @@ -247,6 +251,8 @@ struct irq_desc { #endif }; }; +#endif + struct timer_rand_state *timer_rand_state; unsigned int *kstat_irqs; irq_flow_handler_t handle_irq; diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index e0fc6cd78aa..a42c0191d71 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -12,6 +12,10 @@ config GENERIC_HARDIRQS config GENERIC_HARDIRQS_NO__DO_IRQ def_bool y +# Select this to disable the deprecated stuff +config GENERIC_HARDIRQS_NO_DEPRECATED + def_bool n + # Options selectable by the architecture code config HAVE_SPARSE_IRQ def_bool n diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index f2c4d28c508..323547983f1 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -324,6 +324,7 @@ static void default_shutdown(struct irq_data *data) desc->status |= IRQ_MASKED; } +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED /* Temporary migration helpers */ static void compat_irq_mask(struct irq_data *data) { @@ -400,12 +401,14 @@ static void compat_bus_sync_unlock(struct irq_data *data) { data->chip->bus_sync_unlock(data->irq); } +#endif /* * Fixup enable/disable function pointers */ void irq_chip_set_defaults(struct irq_chip *chip) { +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED /* * Compat fixup functions need to be before we set the * defaults for enable/disable/startup/shutdown @@ -418,7 +421,7 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->irq_shutdown = compat_irq_shutdown; if (chip->startup) chip->irq_startup = compat_irq_startup; - +#endif /* * The real defaults */ @@ -437,6 +440,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) if (!chip->irq_shutdown) chip->irq_shutdown = chip->irq_disable != default_disable ? chip->irq_disable : default_shutdown; + +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED if (!chip->end) chip->end = dummy_irq_chip.end; @@ -465,6 +470,7 @@ void irq_chip_set_defaults(struct irq_chip *chip) chip->irq_set_wake = compat_irq_set_wake; if (chip->retrigger) chip->irq_retrigger = compat_irq_retrigger; +#endif } static inline void mask_ack_irq(struct irq_desc *desc) diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 8d0697f892a..3fcef37154a 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -309,7 +309,12 @@ static unsigned int noop_ret(struct irq_data *data) return 0; } +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED static void compat_noop(unsigned int irq) { } +#define END_INIT .end = compat_noop +#else +#define END_INIT +#endif /* * Generic no controller implementation @@ -321,7 +326,7 @@ struct irq_chip no_irq_chip = { .irq_enable = noop, .irq_disable = noop, .irq_ack = ack_bad, - .end = compat_noop, + END_INIT }; /* @@ -337,7 +342,7 @@ struct irq_chip dummy_irq_chip = { .irq_ack = noop, .irq_mask = noop, .irq_unmask = noop, - .end = compat_noop, + END_INIT }; /* diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index ecafbfee5b1..b905f0ab1bb 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -42,6 +42,16 @@ extern int irq_select_affinity_usr(unsigned int irq); extern void irq_set_thread_affinity(struct irq_desc *desc); +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED +static inline void irq_end(unsigned int irq, struct irq_desc *desc) +{ + if (desc->irq_data.chip && desc->irq_data.chip->end) + desc->irq_data.chip->end(irq); +} +#else +static inline void irq_end(unsigned int irq, struct irq_desc *desc) { } +#endif + /* Inline functions for support of irq chips on slow busses */ static inline void chip_bus_lock(struct irq_desc *desc) { diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 9ee704d3a23..3089d3b9d5f 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -14,6 +14,8 @@ #include #include +#include "internals.h" + static int irqfixup __read_mostly; #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) @@ -78,8 +80,8 @@ static int try_one_irq(int irq, struct irq_desc *desc) * If we did actual work for the real IRQ line we must let the * IRQ controller clean up too */ - if (work && desc->irq_data.chip && desc->irq_data.chip->end) - desc->irq_data.chip->end(irq); + if (work) + irq_end(irq, desc); raw_spin_unlock(&desc->lock); return ok; -- cgit v1.2.3-70-g09d2 From db71922217a214e5c9268448e537b54fc1f301ea Mon Sep 17 00:00:00 2001 From: Jan Blunck Date: Sun, 15 Aug 2010 22:51:10 +0200 Subject: BKL: Explicitly add BKL around get_sb/fill_super This patch is a preparation necessary to remove the BKL from do_new_mount(). It explicitly adds calls to lock_kernel()/unlock_kernel() around get_sb/fill_super operations for filesystems that still uses the BKL. I've read through all the code formerly covered by the BKL inside do_kern_mount() and have satisfied myself that it doesn't need the BKL any more. do_kern_mount() is already called without the BKL when mounting the rootfs and in nfsctl. do_kern_mount() calls vfs_kern_mount(), which is called from various places without BKL: simple_pin_fs(), nfs_do_clone_mount() through nfs_follow_mountpoint(), afs_mntpt_do_automount() through afs_mntpt_follow_link(). Both later functions are actually the filesystems follow_link inode operation. vfs_kern_mount() is calling the specified get_sb function and lets the filesystem do its job by calling the given fill_super function. Therefore I think it is safe to push down the BKL from the VFS to the low-level filesystems get_sb/fill_super operation. [arnd: do not add the BKL to those file systems that already don't use it elsewhere] Signed-off-by: Jan Blunck Signed-off-by: Arnd Bergmann Cc: Matthew Wilcox Cc: Christoph Hellwig --- fs/adfs/super.c | 8 +++++++- fs/affs/super.c | 9 ++++++++- fs/afs/super.c | 5 +++++ fs/bfs/inode.c | 8 +++++++- fs/cifs/cifsfs.c | 12 ++++++++++-- fs/coda/inode.c | 8 +++++++- fs/ecryptfs/main.c | 4 ++++ fs/ext2/super.c | 10 ++++++++-- fs/ext3/super.c | 9 ++++++++- fs/ext4/super.c | 8 ++++++-- fs/fat/namei_msdos.c | 7 ++++++- fs/fat/namei_vfat.c | 7 ++++++- fs/freevxfs/vxfs_super.c | 7 ++++++- fs/hfs/super.c | 8 +++++++- fs/hpfs/super.c | 8 +++++++- fs/isofs/inode.c | 8 +++++++- fs/jffs2/super.c | 11 +++++++++-- fs/jfs/super.c | 12 ++++++++++-- fs/nilfs2/super.c | 8 +++++++- fs/ntfs/super.c | 5 +++++ fs/ocfs2/dlmfs/dlmfs.c | 9 ++++++++- fs/ocfs2/super.c | 5 +++++ fs/qnx4/inode.c | 8 +++++++- fs/smbfs/inode.c | 5 +++++ fs/squashfs/super.c | 6 ++++++ fs/udf/super.c | 8 +++++++- fs/ufs/super.c | 5 +++++ kernel/cgroup.c | 4 ++++ 28 files changed, 187 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/fs/adfs/super.c b/fs/adfs/super.c index 4a3af7075c1..d9803f73236 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -352,11 +352,15 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) struct adfs_sb_info *asb; struct inode *root; + lock_kernel(); + sb->s_flags |= MS_NODIRATIME; asb = kzalloc(sizeof(*asb), GFP_KERNEL); - if (!asb) + if (!asb) { + unlock_kernel(); return -ENOMEM; + } sb->s_fs_info = asb; /* set default options */ @@ -474,6 +478,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) goto error; } else sb->s_root->d_op = &adfs_dentry_operations; + unlock_kernel(); return 0; error_free_bh: @@ -481,6 +486,7 @@ error_free_bh: error: sb->s_fs_info = NULL; kfree(asb); + unlock_kernel(); return -EINVAL; } diff --git a/fs/affs/super.c b/fs/affs/super.c index 33c4e7eef47..3a6d1dee4a5 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -291,6 +291,8 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) u8 sig[4]; int ret = -EINVAL; + lock_kernel(); + save_mount_options(sb, data); pr_debug("AFFS: read_super(%s)\n",data ? (const char *)data : "no options"); @@ -300,8 +302,10 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) sb->s_flags |= MS_NODIRATIME; sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL); - if (!sbi) + if (!sbi) { + unlock_kernel(); return -ENOMEM; + } sb->s_fs_info = sbi; mutex_init(&sbi->s_bmlock); spin_lock_init(&sbi->symlink_lock); @@ -312,6 +316,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) printk(KERN_ERR "AFFS: Error parsing options\n"); kfree(sbi->s_prefix); kfree(sbi); + unlock_kernel(); return -EINVAL; } /* N.B. after this point s_prefix must be released */ @@ -482,6 +487,7 @@ got_root: sb->s_root->d_op = &affs_dentry_operations; pr_debug("AFFS: s_flags=%lX\n",sb->s_flags); + unlock_kernel(); return 0; /* @@ -496,6 +502,7 @@ out_error_noinode: kfree(sbi->s_prefix); kfree(sbi); sb->s_fs_info = NULL; + unlock_kernel(); return ret; } diff --git a/fs/afs/super.c b/fs/afs/super.c index 77e1e5a6115..6c2fef44d38 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -302,12 +302,15 @@ static int afs_fill_super(struct super_block *sb, void *data) struct inode *inode = NULL; int ret; + lock_kernel(); + _enter(""); /* allocate a superblock info record */ as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); if (!as) { _leave(" = -ENOMEM"); + unlock_kernel(); return -ENOMEM; } @@ -341,6 +344,7 @@ static int afs_fill_super(struct super_block *sb, void *data) sb->s_root = root; _leave(" = 0"); + unlock_kernel(); return 0; error_inode: @@ -354,6 +358,7 @@ error: sb->s_fs_info = NULL; _leave(" = %d", ret); + unlock_kernel(); return ret; } diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index c4daf0f5fc0..d2e09363dd9 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -322,9 +322,13 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) int ret = -EINVAL; unsigned long i_sblock, i_eblock, i_eoff, s_size; + lock_kernel(); + info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) + if (!info) { + unlock_kernel(); return -ENOMEM; + } mutex_init(&info->bfs_lock); s->s_fs_info = info; @@ -439,6 +443,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) brelse(bh); brelse(sbh); dump_imap("read_super", s); + unlock_kernel(); return 0; out3: @@ -452,6 +457,7 @@ out: mutex_destroy(&info->bfs_lock); kfree(info); s->s_fs_info = NULL; + unlock_kernel(); return ret; } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index b7431afdd76..070bf1aecd2 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -514,22 +514,30 @@ cifs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { int rc; - struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL); + struct super_block *sb; + + lock_kernel(); + + sb = sget(fs_type, NULL, set_anon_super, NULL); cFYI(1, "Devname: %s flags: %d ", dev_name, flags); - if (IS_ERR(sb)) + if (IS_ERR(sb)) { + unlock_kernel(); return PTR_ERR(sb); + } sb->s_flags = flags; rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0); if (rc) { deactivate_locked_super(sb); + unlock_kernel(); return rc; } sb->s_flags |= MS_ACTIVE; simple_set_mnt(mnt, sb); + unlock_kernel(); return 0; } diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 6526e6f21ec..bfe8179b129 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -148,6 +148,8 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) int error; int idx; + lock_kernel(); + idx = get_device_index((struct coda_mount_data *) data); /* Ignore errors in data, for backward compatibility */ @@ -159,11 +161,13 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) vc = &coda_comms[idx]; if (!vc->vc_inuse) { printk("coda_read_super: No pseudo device\n"); + unlock_kernel(); return -EINVAL; } if ( vc->vc_sb ) { printk("coda_read_super: Device already mounted\n"); + unlock_kernel(); return -EBUSY; } @@ -202,7 +206,8 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) sb->s_root = d_alloc_root(root); if (!sb->s_root) goto error; - return 0; + unlock_kernel(); + return 0; error: bdi_destroy(&vc->bdi); @@ -212,6 +217,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) if (vc) vc->vc_sb = NULL; + unlock_kernel(); return -EINVAL; } diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index cbd4e18adb2..c4af92fa12c 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -36,6 +36,7 @@ #include #include #include +#include /* For lock_kernel() */ #include "ecryptfs_kernel.h" /** @@ -550,6 +551,7 @@ static int ecryptfs_get_sb(struct file_system_type *fs_type, int flags, const char *err = "Getting sb failed"; int rc; + lock_kernel(); sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL); if (!sbi) { rc = -ENOMEM; @@ -608,6 +610,7 @@ static int ecryptfs_get_sb(struct file_system_type *fs_type, int flags, goto out; } simple_set_mnt(mnt, s); + unlock_kernel(); return 0; out: @@ -616,6 +619,7 @@ out: kmem_cache_free(ecryptfs_sb_info_cache, sbi); } printk(KERN_ERR "%s; rc = [%d]\n", err, rc); + unlock_kernel(); return rc; } diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 1ec602673ea..f98c390caf1 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -747,15 +747,18 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) __le32 features; int err; + lock_kernel(); + + err = -ENOMEM; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) - return -ENOMEM; + goto failed_unlock; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); - return -ENOMEM; + goto failed_unlock; } sb->s_fs_info = sbi; sbi->s_sb_block = sb_block; @@ -1083,6 +1086,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY)) sb->s_flags |= MS_RDONLY; ext2_write_super(sb); + unlock_kernel(); return 0; cantfind_ext2: @@ -1107,6 +1111,8 @@ failed_sbi: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); +failed_unlock: + unlock_kernel(); return ret; } diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 5dbf4dba03c..41f9dcd73e9 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1611,14 +1611,19 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) __le32 features; int err; + lock_kernel(); + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); - if (!sbi) + if (!sbi) { + unlock_kernel(); return -ENOMEM; + } sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); + unlock_kernel(); return -ENOMEM; } sb->s_fs_info = sbi; @@ -2026,6 +2031,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) "writeback"); lock_kernel(); + unlock_kernel(); return 0; cantfind_ext3: @@ -2056,6 +2062,7 @@ out_fail: kfree(sbi->s_blockgroup_lock); kfree(sbi); lock_kernel(); + unlock_kernel(); return ret; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 26147746c27..0f0021f4990 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2568,6 +2568,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) int err; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; + lock_kernel(); + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) goto out_free_orig; @@ -3166,7 +3168,6 @@ no_journal: if (es->s_error_count) mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ - lock_kernel(); kfree(orig_data); return 0; @@ -3213,8 +3214,11 @@ out_fail: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); - lock_kernel(); + kfree(orig_data); + return ret; + out_free_orig: + unlock_kernel(); kfree(orig_data); return ret; } diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c index bbc94ae4fd7..e2b0b978340 100644 --- a/fs/fat/namei_msdos.c +++ b/fs/fat/namei_msdos.c @@ -9,6 +9,7 @@ #include #include #include +#include /* For lock_kernel() */ #include "fat.h" /* Characters that are undesirable in an MS-DOS file name */ @@ -662,12 +663,16 @@ static int msdos_fill_super(struct super_block *sb, void *data, int silent) { int res; + lock_kernel(); res = fat_fill_super(sb, data, silent, &msdos_dir_inode_operations, 0); - if (res) + if (res) { + unlock_kernel(); return res; + } sb->s_flags |= MS_NOATIME; sb->s_root->d_op = &msdos_dentry_operations; + unlock_kernel(); return 0; } diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index 6fcc7e71fba..9006ad9c7b1 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -21,6 +21,7 @@ #include #include #include +#include /* For lock_kernel() */ #include "fat.h" /* @@ -1055,15 +1056,19 @@ static int vfat_fill_super(struct super_block *sb, void *data, int silent) { int res; + lock_kernel(); res = fat_fill_super(sb, data, silent, &vfat_dir_inode_operations, 1); - if (res) + if (res) { + unlock_kernel(); return res; + } if (MSDOS_SB(sb)->options.name_check != 's') sb->s_root->d_op = &vfat_ci_dentry_ops; else sb->s_root->d_op = &vfat_dentry_ops; + unlock_kernel(); return 0; } diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c index dc0c041e85c..eb2b9e09c99 100644 --- a/fs/freevxfs/vxfs_super.c +++ b/fs/freevxfs/vxfs_super.c @@ -148,7 +148,7 @@ static int vxfs_remount(struct super_block *sb, int *flags, char *data) * The superblock on success, else %NULL. * * Locking: - * We are under the bkl and @sbp->s_lock. + * We are under @sbp->s_lock. */ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent) { @@ -159,11 +159,14 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent) struct inode *root; int ret = -EINVAL; + lock_kernel(); + sbp->s_flags |= MS_RDONLY; infp = kzalloc(sizeof(*infp), GFP_KERNEL); if (!infp) { printk(KERN_WARNING "vxfs: unable to allocate incore superblock\n"); + unlock_kernel(); return -ENOMEM; } @@ -236,6 +239,7 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent) goto out_free_ilist; } + unlock_kernel(); return 0; out_free_ilist: @@ -245,6 +249,7 @@ out_free_ilist: out: brelse(bp); kfree(infp); + unlock_kernel(); return ret; } diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 34235d4bf08..3069416fa8e 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -382,9 +382,13 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) struct inode *root_inode; int res; + lock_kernel(); + sbi = kzalloc(sizeof(struct hfs_sb_info), GFP_KERNEL); - if (!sbi) + if (!sbi) { + unlock_kernel(); return -ENOMEM; + } sb->s_fs_info = sbi; INIT_HLIST_HEAD(&sbi->rsrc_inodes); @@ -435,6 +439,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_root->d_op = &hfs_dentry_operations; /* everything's okay */ + unlock_kernel(); return 0; bail_iput: @@ -443,6 +448,7 @@ bail_no_root: printk(KERN_ERR "hfs: get root inode failed.\n"); bail: hfs_mdb_put(sb); + unlock_kernel(); return res; } diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 2607010be2f..c969a1aa163 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -477,11 +477,15 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) int o; + lock_kernel(); + save_mount_options(s, options); sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); - if (!sbi) + if (!sbi) { + unlock_kernel(); return -ENOMEM; + } s->s_fs_info = sbi; sbi->sb_bmp_dir = NULL; @@ -666,6 +670,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) root->i_blocks = 5; hpfs_brelse4(&qbh); } + unlock_kernel(); return 0; bail4: brelse(bh2); @@ -677,6 +682,7 @@ bail0: kfree(sbi->sb_cp_table); s->s_fs_info = NULL; kfree(sbi); + unlock_kernel(); return -EINVAL; } diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 5a44811b502..05baf7721e8 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -571,11 +571,15 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) int table, error = -EINVAL; unsigned int vol_desc_start; + lock_kernel(); + save_mount_options(s, data); sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); - if (!sbi) + if (!sbi) { + unlock_kernel(); return -ENOMEM; + } s->s_fs_info = sbi; if (!parse_options((char *)data, &opt)) @@ -900,6 +904,7 @@ root_found: kfree(opt.iocharset); + unlock_kernel(); return 0; /* @@ -939,6 +944,7 @@ out_freesbi: kfree(opt.iocharset); kfree(sbi); s->s_fs_info = NULL; + unlock_kernel(); return error; } diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index 662bba09950..58dd9cf0620 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -146,14 +146,19 @@ static const struct super_operations jffs2_super_operations = static int jffs2_fill_super(struct super_block *sb, void *data, int silent) { struct jffs2_sb_info *c; + int ret; + + lock_kernel(); D1(printk(KERN_DEBUG "jffs2_get_sb_mtd():" " New superblock for device %d (\"%s\")\n", sb->s_mtd->index, sb->s_mtd->name)); c = kzalloc(sizeof(*c), GFP_KERNEL); - if (!c) + if (!c) { + unlock_kernel(); return -ENOMEM; + } c->mtd = sb->s_mtd; c->os_priv = sb; @@ -175,7 +180,9 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent) #ifdef CONFIG_JFFS2_FS_POSIX_ACL sb->s_flags |= MS_POSIXACL; #endif - return jffs2_do_fill_super(sb, data, silent); + ret = jffs2_do_fill_super(sb, data, silent); + unlock_kernel(); + return ret; } static int jffs2_get_sb(struct file_system_type *fs_type, diff --git a/fs/jfs/super.c b/fs/jfs/super.c index ec8c3e4baca..eb31f677347 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -438,14 +438,20 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) s64 newLVSize = 0; int flag, ret = -EINVAL; + lock_kernel(); + jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags); - if (!new_valid_dev(sb->s_bdev->bd_dev)) + if (!new_valid_dev(sb->s_bdev->bd_dev)) { + unlock_kernel(); return -EOVERFLOW; + } sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL); - if (!sbi) + if (!sbi) { + unlock_kernel(); return -ENOMEM; + } sb->s_fs_info = sbi; sbi->sb = sb; sbi->uid = sbi->gid = sbi->umask = -1; @@ -542,6 +548,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, (u64)sb->s_maxbytes); #endif sb->s_time_gran = 1; + unlock_kernel(); return 0; out_no_root: @@ -564,6 +571,7 @@ out_unload: unload_nls(sbi->nls_tab); out_kfree: kfree(sbi); + unlock_kernel(); return ret; } diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 922263393c7..0d573c2a686 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1113,9 +1113,12 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags, if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; + lock_kernel(); sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type); - if (IS_ERR(sd.bdev)) + if (IS_ERR(sd.bdev)) { + unlock_kernel(); return PTR_ERR(sd.bdev); + } /* * To get mount instance using sget() vfs-routine, NILFS needs @@ -1198,6 +1201,7 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags, if (need_to_close) close_bdev_exclusive(sd.bdev, mode); simple_set_mnt(mnt, s); + unlock_kernel(); return 0; failed_unlock: @@ -1206,6 +1210,7 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags, failed: close_bdev_exclusive(sd.bdev, mode); + unlock_kernel(); return err; cancel_new: @@ -1218,6 +1223,7 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags, * We must finish all post-cleaning before this call; * put_nilfs() needs the block device. */ + unlock_kernel(); return err; } diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 512806171bf..1f31e77fc41 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -2732,6 +2732,8 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) struct inode *tmp_ino; int blocksize, result; + lock_kernel(); + /* * We do a pretty difficult piece of bootstrap by reading the * MFT (and other metadata) from disk into memory. We'll only @@ -2755,6 +2757,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) ntfs_error(sb, "Allocation of NTFS volume structure " "failed. Aborting mount..."); lockdep_on(); + unlock_kernel(); return -ENOMEM; } /* Initialize ntfs_volume structure. */ @@ -2942,6 +2945,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) sb->s_export_op = &ntfs_export_ops; lock_kernel(); lockdep_on(); + unlock_kernel(); return 0; } ntfs_error(sb, "Failed to allocate root directory."); @@ -3062,6 +3066,7 @@ err_out_now: kfree(vol); ntfs_debug("Failed, returning -EINVAL."); lockdep_on(); + unlock_kernel(); return -EINVAL; } diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index c2903b84bb7..667d7ceba8c 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -44,6 +44,7 @@ #include #include #include +#include #include @@ -588,21 +589,27 @@ static int dlmfs_fill_super(struct super_block * sb, struct inode * inode; struct dentry * root; + lock_kernel(); + sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = DLMFS_MAGIC; sb->s_op = &dlmfs_ops; inode = dlmfs_get_root_inode(sb); - if (!inode) + if (!inode) { + unlock_kernel(); return -ENOMEM; + } root = d_alloc_root(inode); if (!root) { iput(inode); + unlock_kernel(); return -ENOMEM; } sb->s_root = root; + unlock_kernel(); return 0; } diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index fa1be1b304d..b7e4f2d19d4 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1002,6 +1002,8 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) char nodestr[8]; struct ocfs2_blockcheck_stats stats; + lock_kernel(); + mlog_entry("%p, %p, %i", sb, data, silent); if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) { @@ -1179,6 +1181,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) atomic_set(&osb->vol_state, VOLUME_DISABLED); wake_up(&osb->osb_mount_event); mlog_exit(status); + unlock_kernel(); return status; } } @@ -1193,6 +1196,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) ocfs2_orphan_scan_start(osb); mlog_exit(status); + unlock_kernel(); return status; read_super_error: @@ -1208,6 +1212,7 @@ read_super_error: } mlog_exit(status); + unlock_kernel(); return status; } diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index 16829722be9..86a7be1399a 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c @@ -234,9 +234,13 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent) struct qnx4_sb_info *qs; int ret = -EINVAL; + lock_kernel(); + qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL); - if (!qs) + if (!qs) { + unlock_kernel(); return -ENOMEM; + } s->s_fs_info = qs; sb_set_blocksize(s, QNX4_BLOCK_SIZE); @@ -284,6 +288,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent) brelse(bh); + unlock_kernel(); return 0; outi: @@ -293,6 +298,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent) outnobh: kfree(qs); s->s_fs_info = NULL; + unlock_kernel(); return ret; } diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c index 450c9194198..8fc5e50e142 100644 --- a/fs/smbfs/inode.c +++ b/fs/smbfs/inode.c @@ -501,6 +501,8 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent) void *mem; static int warn_count; + lock_kernel(); + if (warn_count < 5) { warn_count++; printk(KERN_EMERG "smbfs is deprecated and will be removed" @@ -621,6 +623,7 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent) smb_new_dentry(sb->s_root); + unlock_kernel(); return 0; out_no_root: @@ -643,9 +646,11 @@ out_wrong_data: out_no_data: printk(KERN_ERR "smb_fill_super: missing data argument\n"); out_fail: + unlock_kernel(); return -EINVAL; out_no_server: printk(KERN_ERR "smb_fill_super: cannot allocate struct smb_sb_info\n"); + unlock_kernel(); return -ENOMEM; } diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 88b4f860665..ab1a401a0e1 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -87,11 +87,14 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) u64 lookup_table_start, xattr_id_table_start; int err; + lock_kernel(); + TRACE("Entered squashfs_fill_superblock\n"); sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL); if (sb->s_fs_info == NULL) { ERROR("Failed to allocate squashfs_sb_info\n"); + unlock_kernel(); return -ENOMEM; } msblk = sb->s_fs_info; @@ -301,6 +304,7 @@ allocate_root: TRACE("Leaving squashfs_fill_super\n"); kfree(sblk); + unlock_kernel(); return 0; failed_mount: @@ -315,11 +319,13 @@ failed_mount: kfree(sb->s_fs_info); sb->s_fs_info = NULL; kfree(sblk); + unlock_kernel(); return err; failure: kfree(sb->s_fs_info); sb->s_fs_info = NULL; + unlock_kernel(); return -ENOMEM; } diff --git a/fs/udf/super.c b/fs/udf/super.c index 65412d84a45..76f3d6d97b4 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1880,6 +1880,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) struct kernel_lb_addr rootdir, fileset; struct udf_sb_info *sbi; + lock_kernel(); + uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); uopt.uid = -1; uopt.gid = -1; @@ -1888,8 +1890,10 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) uopt.dmode = UDF_INVALID_MODE; sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL); - if (!sbi) + if (!sbi) { + unlock_kernel(); return -ENOMEM; + } sb->s_fs_info = sbi; @@ -2035,6 +2039,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) goto error_out; } sb->s_maxbytes = MAX_LFS_FILESIZE; + unlock_kernel(); return 0; error_out: @@ -2055,6 +2060,7 @@ error_out: kfree(sbi); sb->s_fs_info = NULL; + unlock_kernel(); return -EINVAL; } diff --git a/fs/ufs/super.c b/fs/ufs/super.c index d510c1b9181..6b9be90dae7 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -696,6 +696,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) unsigned maxsymlen; int ret = -EINVAL; + lock_kernel(); + uspi = NULL; ubh = NULL; flags = 0; @@ -1163,6 +1165,7 @@ magic_found: goto failed; UFSD("EXIT\n"); + unlock_kernel(); return 0; dalloc_failed: @@ -1174,10 +1177,12 @@ failed: kfree(sbi); sb->s_fs_info = NULL; UFSD("EXIT (FAILED)\n"); + unlock_kernel(); return ret; failed_nomem: UFSD("EXIT (NOMEM)\n"); + unlock_kernel(); return -ENOMEM; } diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c9483d8f614..a7ba3bccadc 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1430,6 +1430,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type, struct super_block *sb; struct cgroupfs_root *new_root; + lock_kernel(); + /* First find the desired set of subsystems */ mutex_lock(&cgroup_mutex); ret = parse_cgroupfs_options(data, &opts); @@ -1559,6 +1561,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type, simple_set_mnt(mnt, sb); kfree(opts.release_agent); kfree(opts.name); + unlock_kernel(); return 0; drop_new_super: @@ -1568,6 +1571,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type, out_err: kfree(opts.release_agent); kfree(opts.name); + unlock_kernel(); return ret; } -- cgit v1.2.3-70-g09d2 From 38d018dba3f725b969f196550d92a6ec1c092428 Mon Sep 17 00:00:00 2001 From: Jan Blunck Date: Wed, 24 Feb 2010 13:25:34 +0100 Subject: BKL: Remove BKL from cgroup The BKL is only used in remount_fs and get_sb that are both protected by the superblocks s_umount rw_semaphore. Therefore it is safe to remove the BKL entirely. Signed-off-by: Jan Blunck Signed-off-by: Arnd Bergmann --- kernel/cgroup.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index a7ba3bccadc..304d2775994 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -52,7 +52,6 @@ #include #include #include -#include #include #include #include /* TODO: replace with more sophisticated array */ @@ -1222,7 +1221,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) struct cgroup *cgrp = &root->top_cgroup; struct cgroup_sb_opts opts; - lock_kernel(); mutex_lock(&cgrp->dentry->d_inode->i_mutex); mutex_lock(&cgroup_mutex); @@ -1255,7 +1253,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) kfree(opts.name); mutex_unlock(&cgroup_mutex); mutex_unlock(&cgrp->dentry->d_inode->i_mutex); - unlock_kernel(); return ret; } @@ -1430,8 +1427,6 @@ static int cgroup_get_sb(struct file_system_type *fs_type, struct super_block *sb; struct cgroupfs_root *new_root; - lock_kernel(); - /* First find the desired set of subsystems */ mutex_lock(&cgroup_mutex); ret = parse_cgroupfs_options(data, &opts); @@ -1561,7 +1556,6 @@ static int cgroup_get_sb(struct file_system_type *fs_type, simple_set_mnt(mnt, sb); kfree(opts.release_agent); kfree(opts.name); - unlock_kernel(); return 0; drop_new_super: @@ -1571,8 +1565,6 @@ static int cgroup_get_sb(struct file_system_type *fs_type, out_err: kfree(opts.release_agent); kfree(opts.name); - unlock_kernel(); - return ret; } -- cgit v1.2.3-70-g09d2 From 97bd234701b2b39a0e749c1fe0e44f1d14c94292 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 5 Oct 2010 10:41:14 +0200 Subject: workqueue: prepare for more tracepoints Define workqueue_work event class and use it for workqueue_execute_end trace point. Also, move trace/events/workqueue.h include downwards such that all struct definitions are visible to it. This is to prepare for more tracepoints and doesn't cause any functional change. Signed-off-by: Tejun Heo Cc: Frederic Weisbecker --- include/trace/events/workqueue.h | 32 +++++++++++++++++++------------- kernel/workqueue.c | 6 +++--- 2 files changed, 22 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index 49682d7e9d6..ec9d7244eb9 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -7,13 +7,7 @@ #include #include -/** - * workqueue_execute_start - called immediately before the workqueue callback - * @work: pointer to struct work_struct - * - * Allows to track workqueue execution. - */ -TRACE_EVENT(workqueue_execute_start, +DECLARE_EVENT_CLASS(workqueue_work, TP_PROTO(struct work_struct *work), @@ -21,24 +15,22 @@ TRACE_EVENT(workqueue_execute_start, TP_STRUCT__entry( __field( void *, work ) - __field( void *, function) ), TP_fast_assign( __entry->work = work; - __entry->function = work->func; ), - TP_printk("work struct %p: function %pf", __entry->work, __entry->function) + TP_printk("work struct %p", __entry->work) ); /** - * workqueue_execute_end - called immediately before the workqueue callback + * workqueue_execute_start - called immediately before the workqueue callback * @work: pointer to struct work_struct * * Allows to track workqueue execution. */ -TRACE_EVENT(workqueue_execute_end, +TRACE_EVENT(workqueue_execute_start, TP_PROTO(struct work_struct *work), @@ -46,15 +38,29 @@ TRACE_EVENT(workqueue_execute_end, TP_STRUCT__entry( __field( void *, work ) + __field( void *, function) ), TP_fast_assign( __entry->work = work; + __entry->function = work->func; ), - TP_printk("work struct %p", __entry->work) + TP_printk("work struct %p: function %pf", __entry->work, __entry->function) ); +/** + * workqueue_execute_end - called immediately before the workqueue callback + * @work: pointer to struct work_struct + * + * Allows to track workqueue execution. + */ +DEFINE_EVENT(workqueue_work, workqueue_execute_end, + + TP_PROTO(struct work_struct *work), + + TP_ARGS(work) +); #endif /* _TRACE_WORKQUEUE_H */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 19e4bc15ee9..026f778e879 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -42,9 +42,6 @@ #include #include -#define CREATE_TRACE_POINTS -#include - #include "workqueue_sched.h" enum { @@ -257,6 +254,9 @@ EXPORT_SYMBOL_GPL(system_long_wq); EXPORT_SYMBOL_GPL(system_nrt_wq); EXPORT_SYMBOL_GPL(system_unbound_wq); +#define CREATE_TRACE_POINTS +#include + #define for_each_busy_worker(worker, i, pos, gcwq) \ for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) -- cgit v1.2.3-70-g09d2 From cdadf0097cdca06c497ffaeb5982e028c6e4ed38 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 5 Oct 2010 10:49:55 +0200 Subject: workqueue: add queue_work and activate_work trace points These two tracepoints allow tracking when and how a work is queued and activated. This patch is based on Frederic's patch to add queue_work trace point. Signed-off-by: Tejun Heo Cc: Frederic Weisbecker --- include/trace/events/workqueue.h | 53 ++++++++++++++++++++++++++++++++++++++++ kernel/workqueue.c | 3 +++ 2 files changed, 56 insertions(+) (limited to 'kernel') diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index ec9d7244eb9..7d497291c85 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -24,6 +24,59 @@ DECLARE_EVENT_CLASS(workqueue_work, TP_printk("work struct %p", __entry->work) ); +/** + * workqueue_queue_work - called when a work gets queued + * @req_cpu: the requested cpu + * @cwq: pointer to struct cpu_workqueue_struct + * @work: pointer to struct work_struct + * + * This event occurs when a work is queued immediately or once a + * delayed work is actually queued on a workqueue (ie: once the delay + * has been reached). + */ +TRACE_EVENT(workqueue_queue_work, + + TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq, + struct work_struct *work), + + TP_ARGS(req_cpu, cwq, work), + + TP_STRUCT__entry( + __field( void *, work ) + __field( void *, function) + __field( void *, workqueue) + __field( unsigned int, req_cpu ) + __field( unsigned int, cpu ) + ), + + TP_fast_assign( + __entry->work = work; + __entry->function = work->func; + __entry->workqueue = cwq->wq; + __entry->req_cpu = req_cpu; + __entry->cpu = cwq->gcwq->cpu; + ), + + TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", + __entry->work, __entry->function, __entry->workqueue, + __entry->req_cpu, __entry->cpu) +); + +/** + * workqueue_activate_work - called when a work gets activated + * @work: pointer to struct work_struct + * + * This event occurs when a queued work is put on the active queue, + * which happens immediately after queueing unless @max_active limit + * is reached. + */ +DEFINE_EVENT(workqueue_work, workqueue_activate_work, + + TP_PROTO(struct work_struct *work), + + TP_ARGS(work) +); + /** * workqueue_execute_start - called immediately before the workqueue callback * @work: pointer to struct work_struct diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 026f778e879..cb2ccfbed0c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -997,6 +997,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, /* gcwq determined, get cwq and queue */ cwq = get_cwq(gcwq->cpu, wq); + trace_workqueue_queue_work(cpu, cwq, work); BUG_ON(!list_empty(&work->entry)); @@ -1004,6 +1005,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, work_flags = work_color_to_flags(cwq->work_color); if (likely(cwq->nr_active < cwq->max_active)) { + trace_workqueue_activate_work(work); cwq->nr_active++; worklist = gcwq_determine_ins_pos(gcwq, cwq); } else { @@ -1679,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) struct work_struct, entry); struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); + trace_workqueue_activate_work(work); move_linked_works(work, pos, NULL); __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); cwq->nr_active++; -- cgit v1.2.3-70-g09d2 From 773e3f93577ffb493fb7c39b1a6ecf39b5748e87 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 5 Oct 2010 14:03:02 -0700 Subject: rcu: move check from rcu_dereference_bh to rcu_read_lock_bh_held As suggested by Linus, push the irqs_disabled() down to the rcu_read_lock_bh_held() level so that all callers get the benefit of the correct check. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 2 +- kernel/rcupdate.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 83af1f8d8b7..9fbc54a2585 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -454,7 +454,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_bh(p) \ - rcu_dereference_check(p, rcu_read_lock_bh_held() || irqs_disabled()) + rcu_dereference_check(p, rcu_read_lock_bh_held()) /** * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 4d169835fb3..0af1dc70fec 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -86,7 +86,7 @@ int rcu_read_lock_bh_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; - return in_softirq(); + return in_softirq() || irqs_disabled(); } EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); -- cgit v1.2.3-70-g09d2 From b0a0f667a349247bd7f05f806b662a25653822bc Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 6 Oct 2010 17:32:51 -0700 Subject: sched: suppress RCU lockdep splat in task_fork_fair > =================================================== > [ INFO: suspicious rcu_dereference_check() usage. ] > --------------------------------------------------- > /home/greearb/git/linux.wireless-testing/kernel/sched.c:618 invoked rcu_dereference_check() without protection! > > other info that might help us debug this: > > rcu_scheduler_active = 1, debug_locks = 1 > 1 lock held by ifup/23517: > #0: (&rq->lock){-.-.-.}, at: [] task_fork_fair+0x3b/0x108 > > stack backtrace: > Pid: 23517, comm: ifup Not tainted 2.6.36-rc6-wl+ #5 > Call Trace: > [] ? printk+0xf/0x16 > [] lockdep_rcu_dereference+0x74/0x7d > [] task_group+0x6d/0x79 > [] set_task_rq+0xe/0x57 > [] task_fork_fair+0x57/0x108 > [] sched_fork+0x82/0xf9 > [] copy_process+0x569/0xe8e > [] do_fork+0x118/0x262 > [] ? do_page_fault+0x16a/0x2cf > [] ? up_read+0x16/0x2a > [] sys_clone+0x1b/0x20 > [] ptregs_clone+0x15/0x30 > [] ? sysenter_do_call+0x12/0x38 Here a newly created task is having its runqueue assigned. The new task is not yet on the tasklist, so cannot go away. This is therefore a false positive, suppress with an RCU read-side critical section. Reported-by: Ben Greear Tested-by: Ben Greear Date: Tue, 28 Sep 2010 16:32:43 +0800 Subject: rcu: using ACCESS_ONCE() to observe the jiffies_stall/rnp->qsmask value Using ACCESS_ONCE() to observe the jiffies_stall/rnp->qsmask value due to the caller didn't hold the root_rcu/rnp node's lock. Although use without ACCESS_ONCE() is safe due to the value loaded being used but once, the ACCESS_ONCE() is a good documentation aid -- the variables are being loaded without the services of a lock. Signed-off-by: Dongdong Deng CC: Dipankar Sarma CC: Paul E. McKenney Signed-off-by: Paul E. McKenney --- kernel/rcutree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcutree.c b/kernel/rcutree.c index e75073504a3..ccdc04c4798 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -545,9 +545,9 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) if (rcu_cpu_stall_suppress) return; - delta = jiffies - rsp->jiffies_stall; + delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall); rnp = rdp->mynode; - if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { + if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) { /* We haven't checked in, so go dump stack. */ print_cpu_stall(rsp); -- cgit v1.2.3-70-g09d2 From 6506cf6ce68d78a5470a8360c965dafe8e4b78e3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 16 Sep 2010 17:50:31 +0200 Subject: sched: fix RCU lockdep splat from task_group() This addresses the following RCU lockdep splat: [0.051203] CPU0: AMD QEMU Virtual CPU version 0.12.4 stepping 03 [0.052999] lockdep: fixing up alternatives. [0.054105] [0.054106] =================================================== [0.054999] [ INFO: suspicious rcu_dereference_check() usage. ] [0.054999] --------------------------------------------------- [0.054999] kernel/sched.c:616 invoked rcu_dereference_check() without protection! [0.054999] [0.054999] other info that might help us debug this: [0.054999] [0.054999] [0.054999] rcu_scheduler_active = 1, debug_locks = 1 [0.054999] 3 locks held by swapper/1: [0.054999] #0: (cpu_add_remove_lock){+.+.+.}, at: [] cpu_up+0x42/0x6a [0.054999] #1: (cpu_hotplug.lock){+.+.+.}, at: [] cpu_hotplug_begin+0x2a/0x51 [0.054999] #2: (&rq->lock){-.-...}, at: [] init_idle+0x2f/0x113 [0.054999] [0.054999] stack backtrace: [0.054999] Pid: 1, comm: swapper Not tainted 2.6.35 #1 [0.054999] Call Trace: [0.054999] [] lockdep_rcu_dereference+0x9b/0xa3 [0.054999] [] task_group+0x7b/0x8a [0.054999] [] set_task_rq+0x13/0x40 [0.054999] [] init_idle+0xd2/0x113 [0.054999] [] fork_idle+0xb8/0xc7 [0.054999] [] ? mark_held_locks+0x4d/0x6b [0.054999] [] do_fork_idle+0x17/0x2b [0.054999] [] native_cpu_up+0x1c1/0x724 [0.054999] [] ? do_fork_idle+0x0/0x2b [0.054999] [] _cpu_up+0xac/0x127 [0.054999] [] cpu_up+0x55/0x6a [0.054999] [] kernel_init+0xe1/0x1ff [0.054999] [] kernel_thread_helper+0x4/0x10 [0.054999] [] ? restore_args+0x0/0x30 [0.054999] [] ? kernel_init+0x0/0x1ff [0.054999] [] ? kernel_thread_helper+0x0/0x10 [0.056074] Booting Node 0, Processors #1lockdep: fixing up alternatives. [0.130045] #2lockdep: fixing up alternatives. [0.203089] #3 Ok. [0.275286] Brought up 4 CPUs [0.276005] Total of 4 processors activated (16017.17 BogoMIPS). The cgroup_subsys_state structures referenced by idle tasks are never freed, because the idle tasks should be part of the root cgroup, which is not removable. The problem is that while we do in-fact hold rq->lock, the newly spawned idle thread's cpu is not yet set to the correct cpu so the lockdep check in task_group(): lockdep_is_held(&task_rq(p)->lock) will fail. But this is a chicken and egg problem. Setting the CPU's runqueue requires that the CPU's runqueue already be set. ;-) So insert an RCU read-side critical section to avoid the complaint. Signed-off-by: Peter Zijlstra Signed-off-by: Paul E. McKenney --- kernel/sched.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index dc85ceb9083..ae8f75a5ceb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5337,7 +5337,19 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) idle->se.exec_start = sched_clock(); cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); + /* + * We're having a chicken and egg problem, even though we are + * holding rq->lock, the cpu isn't yet set to this cpu so the + * lockdep check in task_group() will fail. + * + * Similar case to sched_fork(). / Alternatively we could + * use task_rq_lock() here and obtain the other rq->lock. + * + * Silence PROVE_RCU + */ + rcu_read_lock(); __set_task_cpu(idle, cpu); + rcu_read_unlock(); rq->curr = rq->idle = idle; #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) -- cgit v1.2.3-70-g09d2 From 30310045dd20a286cf3800f063f79b468e132fb1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 11 Oct 2010 11:51:57 +0200 Subject: workqueue: fix HIGHPRI handling in keep_working() The policy function keep_working() didn't check GCWQ_HIGHPRI_PENDING and could return %false with highpri work pending. This could lead to late execution of a highpri work which was delayed due to @max_active throttling if other works are actively consuming CPU cycles. For example, the following could happen. 1. Work W0 which burns CPU cycles. 2. Two works W1 and W2 are queued to a highpri wq w/ @max_active of 1. 3. W1 starts executing and W2 is put to delayed queue. W0 and W1 are both runnable. 4. W1 finishes which puts W2 to pending queue but keep_working() incorrectly returns %false and the worker goes to sleep. 5. W0 finishes and W2 starts execution. With this patch applied, W2 starts execution as soon as W1 finishes. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index cb2ccfbed0c..b57a8babdec 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -604,7 +604,9 @@ static bool keep_working(struct global_cwq *gcwq) { atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); - return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1; + return !list_empty(&gcwq->worklist) && + (atomic_read(nr_running) <= 1 || + gcwq->flags & GCWQ_HIGHPRI_PENDING); } /* Do we need a new worker? Called from manager. */ -- cgit v1.2.3-70-g09d2 From 6370a6ad3b53df90b4700977f7718118a2cd524a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 11 Oct 2010 15:12:27 +0200 Subject: workqueue: add and use WQ_MEM_RECLAIM flag Add WQ_MEM_RECLAIM flag which currently maps to WQ_RESCUER, mark WQ_RESCUER as internal and replace all external WQ_RESCUER usages to WQ_MEM_RECLAIM. This makes the API users express the intent of the workqueue instead of indicating the internal mechanism used to guarantee forward progress. This is also to make it cleaner to add more semantics to WQ_MEM_RECLAIM. For example, if deemed necessary, memory reclaim workqueues can be made highpri. This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo Cc: Jeff Garzik Cc: Dave Chinner Cc: Steven Whitehouse --- Documentation/workqueue.txt | 29 +++++++++++++++-------------- drivers/ata/libata-sff.c | 2 +- fs/gfs2/main.c | 2 +- fs/xfs/linux-2.6/xfs_buf.c | 2 +- include/linux/workqueue.h | 11 ++++++----- kernel/workqueue.c | 7 +++++++ 6 files changed, 31 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt index e4498a2872c..996a27d9b8d 100644 --- a/Documentation/workqueue.txt +++ b/Documentation/workqueue.txt @@ -196,11 +196,11 @@ resources, scheduled and executed. suspend operations. Work items on the wq are drained and no new work item starts execution until thawed. - WQ_RESCUER + WQ_MEM_RECLAIM All wq which might be used in the memory reclaim paths _MUST_ - have this flag set. This reserves one worker exclusively for - the execution of this wq under memory pressure. + have this flag set. The wq is guaranteed to have at least one + execution context regardless of memory pressure. WQ_HIGHPRI @@ -356,11 +356,11 @@ If q1 has WQ_CPU_INTENSIVE set, 6. Guidelines -* Do not forget to use WQ_RESCUER if a wq may process work items which - are used during memory reclaim. Each wq with WQ_RESCUER set has one - rescuer thread reserved for it. If there is dependency among - multiple work items used during memory reclaim, they should be - queued to separate wq each with WQ_RESCUER. +* Do not forget to use WQ_MEM_RECLAIM if a wq may process work items + which are used during memory reclaim. Each wq with WQ_MEM_RECLAIM + set has an execution context reserved for it. If there is + dependency among multiple work items used during memory reclaim, + they should be queued to separate wq each with WQ_MEM_RECLAIM. * Unless strict ordering is required, there is no need to use ST wq. @@ -368,12 +368,13 @@ If q1 has WQ_CPU_INTENSIVE set, recommended. In most use cases, concurrency level usually stays well under the default limit. -* A wq serves as a domain for forward progress guarantee (WQ_RESCUER), - flush and work item attributes. Work items which are not involved - in memory reclaim and don't need to be flushed as a part of a group - of work items, and don't require any special attribute, can use one - of the system wq. There is no difference in execution - characteristics between using a dedicated wq and a system wq. +* A wq serves as a domain for forward progress guarantee + (WQ_MEM_RECLAIM, flush and work item attributes. Work items which + are not involved in memory reclaim and don't need to be flushed as a + part of a group of work items, and don't require any special + attribute, can use one of the system wq. There is no difference in + execution characteristics between using a dedicated wq and a system + wq. * Unless work items are expected to consume a huge amount of CPU cycles, using a bound wq is usually beneficial due to the increased diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index e30c537cce3..f5296bb19ec 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -3335,7 +3335,7 @@ void ata_sff_port_init(struct ata_port *ap) int __init ata_sff_init(void) { - ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE); + ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE); if (!ata_sff_wq) return -ENOMEM; diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index b1e9630eb46..1c5f46075d5 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -140,7 +140,7 @@ static int __init init_gfs2_fs(void) error = -ENOMEM; gfs_recovery_wq = alloc_workqueue("gfs_recovery", - WQ_NON_REENTRANT | WQ_RESCUER, 0); + WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); if (!gfs_recovery_wq) goto fail_wq; diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 286e36e21da..6838aefca71 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1933,7 +1933,7 @@ xfs_buf_init(void) goto out; xfslogd_workqueue = alloc_workqueue("xfslogd", - WQ_RESCUER | WQ_HIGHPRI, 1); + WQ_MEM_RECLAIM | WQ_HIGHPRI, 1); if (!xfslogd_workqueue) goto out_free_buf_zone; diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index e33ff4a9170..03bbe903e5c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -243,11 +243,12 @@ enum { WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ - WQ_RESCUER = 1 << 3, /* has an rescue worker */ + WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ WQ_DYING = 1 << 6, /* internal: workqueue is dying */ + WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ @@ -309,7 +310,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, /** * alloc_ordered_workqueue - allocate an ordered workqueue * @name: name of the workqueue - * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_RESCUER are meaningful) + * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) * * Allocate an ordered workqueue. An ordered workqueue executes at * most one work item at any given time in the queued order. They are @@ -325,11 +326,11 @@ alloc_ordered_workqueue(const char *name, unsigned int flags) } #define create_workqueue(name) \ - alloc_workqueue((name), WQ_RESCUER, 1) + alloc_workqueue((name), WQ_MEM_RECLAIM, 1) #define create_freezeable_workqueue(name) \ - alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1) + alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) #define create_singlethread_workqueue(name) \ - alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1) + alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b57a8babdec..2c6871cbcbe 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2847,6 +2847,13 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, struct workqueue_struct *wq; unsigned int cpu; + /* + * Workqueues which may be used during memory reclaim should + * have a rescuer to guarantee forward progress. + */ + if (flags & WQ_MEM_RECLAIM) + flags |= WQ_RESCUER; + /* * Unbound workqueues aren't concurrency managed and should be * dispatched to workers immediately. -- cgit v1.2.3-70-g09d2 From 84c7991059c9c4530cc911137c5bf508a41ed129 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 3 Oct 2010 21:41:13 +0100 Subject: perf: New helper function for pmu name Introduce perf_pmu_name() helper function that returns the name of the pmu. This gives us a generic way to get the name of a pmu regardless of how an architecture identifies it internally. Signed-off-by: Matt Fleming Acked-by: Peter Zijlstra Acked-by: Paul Mundt Signed-off-by: Robert Richter --- arch/sh/kernel/perf_event.c | 9 +++++++++ include/linux/perf_event.h | 1 + kernel/perf_event.c | 5 +++++ 3 files changed, 15 insertions(+) (limited to 'kernel') diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 2cb9ad59d4b..55fe89bbdfe 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -59,6 +59,15 @@ static inline int sh_pmu_initialized(void) return !!sh_pmu; } +const char *perf_pmu_name(void) +{ + if (!sh_pmu) + return NULL; + + return sh_pmu->name; +} +EXPORT_SYMBOL_GPL(perf_pmu_name); + int perf_num_counters(void) { if (!sh_pmu) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1a021924718..33f08dafda2 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -850,6 +850,7 @@ extern int perf_max_events; extern const struct pmu *hw_perf_event_init(struct perf_event *event); extern int perf_num_counters(void); +extern const char *perf_pmu_name(void); extern void perf_event_task_sched_in(struct task_struct *task); extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); extern void perf_event_task_tick(struct task_struct *task); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 403d1804b19..e2534691db0 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -85,6 +85,11 @@ void __weak hw_perf_enable(void) { barrier(); } void __weak perf_event_print_debug(void) { } +extern __weak const char *perf_pmu_name(void) +{ + return "pmu"; +} + static DEFINE_PER_CPU(int, perf_disable_count); void perf_disable(void) -- cgit v1.2.3-70-g09d2 From e144710b302525de5b90b9c3ba43562458d8957f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 Oct 2010 16:03:45 +0200 Subject: genirq: Distangle irq.h Move irq_desc and internal functions out of irq.h Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/irq.h | 292 +++--------------------------------------------- include/linux/irqdesc.h | 171 ++++++++++++++++++++++++++++ kernel/irq/internals.h | 100 +++++++++++++++++ 3 files changed, 284 insertions(+), 279 deletions(-) create mode 100644 include/linux/irqdesc.h (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index 82ed8231394..f5827abbc03 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -80,7 +80,6 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING #endif -struct proc_dir_entry; struct msi_desc; /** @@ -202,152 +201,36 @@ struct irq_chip { #endif }; -struct timer_rand_state; -struct irq_2_iommu; -/** - * struct irq_desc - interrupt descriptor - * @irq_data: per irq and chip data passed down to chip functions - * @timer_rand_state: pointer to timer rand state struct - * @kstat_irqs: irq stats per cpu - * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] - * @action: the irq action chain - * @status: status information - * @depth: disable-depth, for nested irq_disable() calls - * @wake_depth: enable depth, for multiple set_irq_wake() callers - * @irq_count: stats field to detect stalled irqs - * @last_unhandled: aging timer for unhandled count - * @irqs_unhandled: stats field for spurious unhandled interrupts - * @lock: locking for SMP - * @pending_mask: pending rebalanced interrupts - * @threads_active: number of irqaction threads currently running - * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers - * @dir: /proc/irq/ procfs entry - * @name: flow handler name for /proc/interrupts output - */ -struct irq_desc { - -#ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED - struct irq_data irq_data; -#else - /* - * This union will go away, once we fixed the direct access to - * irq_desc all over the place. The direct fields are a 1:1 - * overlay of irq_data. - */ - union { - struct irq_data irq_data; - struct { - unsigned int irq; - unsigned int node; - struct irq_chip *chip; - void *handler_data; - void *chip_data; - struct msi_desc *msi_desc; -#ifdef CONFIG_SMP - cpumask_var_t affinity; -#endif -#ifdef CONFIG_INTR_REMAP - struct irq_2_iommu *irq_2_iommu; -#endif - }; - }; -#endif - - struct timer_rand_state *timer_rand_state; - unsigned int *kstat_irqs; - irq_flow_handler_t handle_irq; - struct irqaction *action; /* IRQ action list */ - unsigned int status; /* IRQ status */ - - unsigned int depth; /* nested irq disables */ - unsigned int wake_depth; /* nested wake enables */ - unsigned int irq_count; /* For detecting broken IRQs */ - unsigned long last_unhandled; /* Aging timer for unhandled count */ - unsigned int irqs_unhandled; - raw_spinlock_t lock; -#ifdef CONFIG_SMP - const struct cpumask *affinity_hint; -#ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_var_t pending_mask; -#endif -#endif - atomic_t threads_active; - wait_queue_head_t wait_for_threads; -#ifdef CONFIG_PROC_FS - struct proc_dir_entry *dir; -#endif - const char *name; -} ____cacheline_internodealigned_in_smp; - -extern void arch_init_copy_chip_data(struct irq_desc *old_desc, - struct irq_desc *desc, int node); -extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); - -#ifndef CONFIG_SPARSE_IRQ -extern struct irq_desc irq_desc[NR_IRQS]; -#endif - -#ifdef CONFIG_NUMA_IRQ_DESC -extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); -#else -static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) -{ - return desc; -} -#endif - -extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); +/* This include will go away once we isolated irq_desc usage to core code */ +#include /* * Pick up the arch-dependent methods: */ #include +struct irqaction; extern int setup_irq(unsigned int irq, struct irqaction *new); extern void remove_irq(unsigned int irq, struct irqaction *act); #ifdef CONFIG_GENERIC_HARDIRQS #ifdef CONFIG_SMP - -#ifdef CONFIG_GENERIC_PENDING_IRQ - +# ifdef CONFIG_GENERIC_PENDING_IRQ void move_native_irq(int irq); void move_masked_irq(int irq); - -#else /* CONFIG_GENERIC_PENDING_IRQ */ - -static inline void move_irq(int irq) -{ -} - -static inline void move_native_irq(int irq) -{ -} - -static inline void move_masked_irq(int irq) -{ -} - -#endif /* CONFIG_GENERIC_PENDING_IRQ */ - -#else /* CONFIG_SMP */ - -#define move_native_irq(x) -#define move_masked_irq(x) - -#endif /* CONFIG_SMP */ +# else +static inline void move_irq(int irq) { } +static inline void move_native_irq(int irq) { } +static inline void move_masked_irq(int irq) { } +# endif +#else +static inline void move_native_irq(int irq) { } +static inline void move_masked_irq(int irq) { } +#endif extern int no_irq_affinity; -static inline int irq_balancing_disabled(unsigned int irq) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - return desc->status & IRQ_NO_BALANCING_MASK; -} - /* Handle irq action chains: */ extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); @@ -363,42 +246,10 @@ extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); -/* - * Monolithic do_IRQ implementation. - */ -#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ -extern unsigned int __do_IRQ(unsigned int irq); -#endif - -/* - * Architectures call this to let the generic IRQ layer - * handle an interrupt. If the descriptor is attached to an - * irqchip-style controller then we call the ->handle_irq() handler, - * and it calls __do_IRQ() if it's attached to an irqtype-style controller. - */ -static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) -{ -#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ - desc->handle_irq(irq, desc); -#else - if (likely(desc->handle_irq)) - desc->handle_irq(irq, desc); - else - __do_IRQ(irq); -#endif -} - -static inline void generic_handle_irq(unsigned int irq) -{ - generic_handle_irq_desc(irq, irq_to_desc(irq)); -} - /* Handling of unhandled and spurious interrupts: */ extern void note_interrupt(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret); -/* Resending of interrupts :*/ -void check_irq_resend(struct irq_desc *desc, unsigned int irq); /* Enable/disable irq debugging output: */ extern int noirqdebug_setup(char *str); @@ -421,16 +272,6 @@ extern void __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, const char *name); -/* caller has locked the irq_desc and both params are valid */ -static inline void __set_irq_handler_unlocked(int irq, - irq_flow_handler_t handler) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - desc->handle_irq = handler; -} - /* * Set a highlevel flow handler for a given IRQ: */ @@ -462,13 +303,6 @@ extern unsigned int create_irq_nr(unsigned int irq_want, int node); extern int create_irq(void); extern void destroy_irq(unsigned int irq); -/* Test to see if a driver has successfully requested an irq */ -static inline int irq_has_action(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - return desc->action != NULL; -} - /* Dynamic irq helper functions */ extern void dynamic_irq_init(unsigned int irq); void dynamic_irq_init_keep_chip_data(unsigned int irq); @@ -487,108 +321,8 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); #define get_irq_data(irq) (irq_to_desc(irq)->irq_data.handler_data) #define get_irq_msi(irq) (irq_to_desc(irq)->irq_data.msi_desc) -#define get_irq_desc_chip(desc) ((desc)->irq_data.chip) -#define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) -#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) -#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) - #endif /* CONFIG_GENERIC_HARDIRQS */ #endif /* !CONFIG_S390 */ -#ifdef CONFIG_SMP -/** - * alloc_desc_masks - allocate cpumasks for irq_desc - * @desc: pointer to irq_desc struct - * @node: node which will be handling the cpumasks - * @boot: true if need bootmem - * - * Allocates affinity and pending_mask cpumask if required. - * Returns true if successful (or not required). - */ -static inline bool alloc_desc_masks(struct irq_desc *desc, int node, - bool boot) -{ - gfp_t gfp = GFP_ATOMIC; - - if (boot) - gfp = GFP_NOWAIT; - -#ifdef CONFIG_CPUMASK_OFFSTACK - if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) - return false; - -#ifdef CONFIG_GENERIC_PENDING_IRQ - if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { - free_cpumask_var(desc->irq_data.affinity); - return false; - } -#endif -#endif - return true; -} - -static inline void init_desc_masks(struct irq_desc *desc) -{ - cpumask_setall(desc->irq_data.affinity); -#ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_clear(desc->pending_mask); -#endif -} - -/** - * init_copy_desc_masks - copy cpumasks for irq_desc - * @old_desc: pointer to old irq_desc struct - * @new_desc: pointer to new irq_desc struct - * - * Insures affinity and pending_masks are copied to new irq_desc. - * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the - * irq_desc struct so the copy is redundant. - */ - -static inline void init_copy_desc_masks(struct irq_desc *old_desc, - struct irq_desc *new_desc) -{ -#ifdef CONFIG_CPUMASK_OFFSTACK - cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity); - -#ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); -#endif -#endif -} - -static inline void free_desc_masks(struct irq_desc *old_desc, - struct irq_desc *new_desc) -{ - free_cpumask_var(old_desc->irq_data.affinity); - -#ifdef CONFIG_GENERIC_PENDING_IRQ - free_cpumask_var(old_desc->pending_mask); -#endif -} - -#else /* !CONFIG_SMP */ - -static inline bool alloc_desc_masks(struct irq_desc *desc, int node, - bool boot) -{ - return true; -} - -static inline void init_desc_masks(struct irq_desc *desc) -{ -} - -static inline void init_copy_desc_masks(struct irq_desc *old_desc, - struct irq_desc *new_desc) -{ -} - -static inline void free_desc_masks(struct irq_desc *old_desc, - struct irq_desc *new_desc) -{ -} -#endif /* CONFIG_SMP */ - #endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h new file mode 100644 index 00000000000..22e426fdd30 --- /dev/null +++ b/include/linux/irqdesc.h @@ -0,0 +1,171 @@ +#ifndef _LINUX_IRQDESC_H +#define _LINUX_IRQDESC_H + +/* + * Core internal functions to deal with irq descriptors + * + * This include will move to kernel/irq once we cleaned up the tree. + * For now it's included from + */ + +struct proc_dir_entry; +struct timer_rand_state; +struct irq_2_iommu; +/** + * struct irq_desc - interrupt descriptor + * @irq_data: per irq and chip data passed down to chip functions + * @timer_rand_state: pointer to timer rand state struct + * @kstat_irqs: irq stats per cpu + * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] + * @action: the irq action chain + * @status: status information + * @depth: disable-depth, for nested irq_disable() calls + * @wake_depth: enable depth, for multiple set_irq_wake() callers + * @irq_count: stats field to detect stalled irqs + * @last_unhandled: aging timer for unhandled count + * @irqs_unhandled: stats field for spurious unhandled interrupts + * @lock: locking for SMP + * @pending_mask: pending rebalanced interrupts + * @threads_active: number of irqaction threads currently running + * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers + * @dir: /proc/irq/ procfs entry + * @name: flow handler name for /proc/interrupts output + */ +struct irq_desc { + +#ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED + struct irq_data irq_data; +#else + /* + * This union will go away, once we fixed the direct access to + * irq_desc all over the place. The direct fields are a 1:1 + * overlay of irq_data. + */ + union { + struct irq_data irq_data; + struct { + unsigned int irq; + unsigned int node; + struct irq_chip *chip; + void *handler_data; + void *chip_data; + struct msi_desc *msi_desc; +#ifdef CONFIG_SMP + cpumask_var_t affinity; +#endif +#ifdef CONFIG_INTR_REMAP + struct irq_2_iommu *irq_2_iommu; +#endif + }; + }; +#endif + + struct timer_rand_state *timer_rand_state; + unsigned int *kstat_irqs; + irq_flow_handler_t handle_irq; + struct irqaction *action; /* IRQ action list */ + unsigned int status; /* IRQ status */ + + unsigned int depth; /* nested irq disables */ + unsigned int wake_depth; /* nested wake enables */ + unsigned int irq_count; /* For detecting broken IRQs */ + unsigned long last_unhandled; /* Aging timer for unhandled count */ + unsigned int irqs_unhandled; + raw_spinlock_t lock; +#ifdef CONFIG_SMP + const struct cpumask *affinity_hint; +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_var_t pending_mask; +#endif +#endif + atomic_t threads_active; + wait_queue_head_t wait_for_threads; +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *dir; +#endif + const char *name; +} ____cacheline_internodealigned_in_smp; + +extern void arch_init_copy_chip_data(struct irq_desc *old_desc, + struct irq_desc *desc, int node); +extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); + +#ifndef CONFIG_SPARSE_IRQ +extern struct irq_desc irq_desc[NR_IRQS]; +#endif + +#ifdef CONFIG_NUMA_IRQ_DESC +extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); +#else +static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) +{ + return desc; +} +#endif + +extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); + +#ifdef CONFIG_GENERIC_HARDIRQS + +#define get_irq_desc_chip(desc) ((desc)->irq_data.chip) +#define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) +#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) +#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) + +/* + * Monolithic do_IRQ implementation. + */ +#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ +extern unsigned int __do_IRQ(unsigned int irq); +#endif + +/* + * Architectures call this to let the generic IRQ layer + * handle an interrupt. If the descriptor is attached to an + * irqchip-style controller then we call the ->handle_irq() handler, + * and it calls __do_IRQ() if it's attached to an irqtype-style controller. + */ +static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) +{ +#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ + desc->handle_irq(irq, desc); +#else + if (likely(desc->handle_irq)) + desc->handle_irq(irq, desc); + else + __do_IRQ(irq); +#endif +} + +static inline void generic_handle_irq(unsigned int irq) +{ + generic_handle_irq_desc(irq, irq_to_desc(irq)); +} + +/* Test to see if a driver has successfully requested an irq */ +static inline int irq_has_action(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + return desc->action != NULL; +} + +static inline int irq_balancing_disabled(unsigned int irq) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + return desc->status & IRQ_NO_BALANCING_MASK; +} + +/* caller has locked the irq_desc and both params are valid */ +static inline void __set_irq_handler_unlocked(int irq, + irq_flow_handler_t handler) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + desc->handle_irq = handler; +} +#endif + +#endif diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index b905f0ab1bb..e281e45fbb5 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -1,6 +1,7 @@ /* * IRQ subsystem internal functions and variables: */ +#include extern int noirqdebug; @@ -22,6 +23,9 @@ extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); extern void clear_kstat_irqs(struct irq_desc *desc); extern raw_spinlock_t sparse_irq_lock; +/* Resending of interrupts :*/ +void check_irq_resend(struct irq_desc *desc, unsigned int irq); + #ifdef CONFIG_SPARSE_IRQ void replace_irq_desc(unsigned int irq, struct irq_desc *desc); #endif @@ -105,3 +109,99 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) #undef P +/* Stuff below will be cleaned up after the sparse allocator is done */ + +#ifdef CONFIG_SMP +/** + * alloc_desc_masks - allocate cpumasks for irq_desc + * @desc: pointer to irq_desc struct + * @node: node which will be handling the cpumasks + * @boot: true if need bootmem + * + * Allocates affinity and pending_mask cpumask if required. + * Returns true if successful (or not required). + */ +static inline bool alloc_desc_masks(struct irq_desc *desc, int node, + bool boot) +{ + gfp_t gfp = GFP_ATOMIC; + + if (boot) + gfp = GFP_NOWAIT; + +#ifdef CONFIG_CPUMASK_OFFSTACK + if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) + return false; + +#ifdef CONFIG_GENERIC_PENDING_IRQ + if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { + free_cpumask_var(desc->irq_data.affinity); + return false; + } +#endif +#endif + return true; +} + +static inline void init_desc_masks(struct irq_desc *desc) +{ + cpumask_setall(desc->irq_data.affinity); +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_clear(desc->pending_mask); +#endif +} + +/** + * init_copy_desc_masks - copy cpumasks for irq_desc + * @old_desc: pointer to old irq_desc struct + * @new_desc: pointer to new irq_desc struct + * + * Insures affinity and pending_masks are copied to new irq_desc. + * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the + * irq_desc struct so the copy is redundant. + */ + +static inline void init_copy_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +#ifdef CONFIG_CPUMASK_OFFSTACK + cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); +#endif +#endif +} + +static inline void free_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ + free_cpumask_var(old_desc->irq_data.affinity); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + free_cpumask_var(old_desc->pending_mask); +#endif +} + +#else /* !CONFIG_SMP */ + +static inline bool alloc_desc_masks(struct irq_desc *desc, int node, + bool boot) +{ + return true; +} + +static inline void init_desc_masks(struct irq_desc *desc) +{ +} + +static inline void init_copy_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +} + +static inline void free_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +} +#endif /* CONFIG_SMP */ -- cgit v1.2.3-70-g09d2 From 442471848f5abb55b99cba1229301655f67492b4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Sep 2010 10:40:18 +0200 Subject: genirq: Provide status modifier Provide a irq_desc.status modifier function to cleanup the direct access to irq_desc in arch and driver code. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/irq.h | 27 +++++++++++++++++++++++++-- kernel/irq/chip.c | 26 +++++++------------------- 2 files changed, 32 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index 80fdab208c1..e7e7ac83edd 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -72,6 +72,10 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ +#define IRQF_MODIFY_MASK \ + (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ + IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL) + #ifdef CONFIG_IRQ_PER_CPU # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) @@ -289,8 +293,27 @@ set_irq_chained_handler(unsigned int irq, extern void set_irq_nested_thread(unsigned int irq, int nest); -extern void set_irq_noprobe(unsigned int irq); -extern void set_irq_probe(unsigned int irq); +void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); + +static inline void irq_set_status_flags(unsigned int irq, unsigned long set) +{ + irq_modify_status(irq, 0, set); +} + +static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) +{ + irq_modify_status(irq, clr, 0); +} + +static inline void set_irq_noprobe(unsigned int irq) +{ + irq_modify_status(irq, 0, IRQ_NOPROBE); +} + +static inline void set_irq_probe(unsigned int irq) +{ + irq_modify_status(irq, IRQ_NOPROBE, 0); +} /* Handle dynamic irq creation and destruction */ extern unsigned int create_irq_nr(unsigned int irq_want, int node); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 323547983f1..2b1f6906b82 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -851,32 +851,20 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, __set_irq_handler(irq, handle, 0, name); } -void set_irq_noprobe(unsigned int irq) +void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; - if (!desc) { - printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq); + if (!desc) return; - } - - raw_spin_lock_irqsave(&desc->lock, flags); - desc->status |= IRQ_NOPROBE; - raw_spin_unlock_irqrestore(&desc->lock, flags); -} -void set_irq_probe(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - - if (!desc) { - printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq); - return; - } + /* Sanitize flags */ + set &= IRQF_MODIFY_MASK; + clr &= IRQF_MODIFY_MASK; raw_spin_lock_irqsave(&desc->lock, flags); - desc->status &= ~IRQ_NOPROBE; + desc->status &= ~clr; + desc->status |= set; raw_spin_unlock_irqrestore(&desc->lock, flags); } -- cgit v1.2.3-70-g09d2 From f303a6dd127b5ec6de90d1cd79ed19820c7e9658 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Sep 2010 17:34:01 +0200 Subject: genirq: Sanitize irq_data accessors Get the data structure from the core and provide inline wrappers to access the irq_data members. Provide accessor inlines for irq_data as well. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/irq.h | 62 +++++++++++++++++++++++++++++++++++++++++++++++++---- kernel/irq/chip.c | 8 +++++++ 2 files changed, 66 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index e7e7ac83edd..bea40556c5a 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -85,6 +85,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, #endif struct msi_desc; +struct irq_2_iommu; /** * struct irq_data - per irq and irq chip data passed down to chip functions @@ -332,11 +333,64 @@ extern int set_irq_data(unsigned int irq, void *data); extern int set_irq_chip_data(unsigned int irq, void *data); extern int set_irq_type(unsigned int irq, unsigned int type); extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); +extern struct irq_data *irq_get_irq_data(unsigned int irq); -#define get_irq_chip(irq) (irq_to_desc(irq)->irq_data.chip) -#define get_irq_chip_data(irq) (irq_to_desc(irq)->irq_data.chip_data) -#define get_irq_data(irq) (irq_to_desc(irq)->irq_data.handler_data) -#define get_irq_msi(irq) (irq_to_desc(irq)->irq_data.msi_desc) +static inline struct irq_chip *get_irq_chip(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->chip : NULL; +} + +static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) +{ + return d->chip; +} + +static inline void *get_irq_chip_data(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->chip_data : NULL; +} + +static inline void *irq_data_get_irq_chip_data(struct irq_data *d) +{ + return d->chip_data; +} + +static inline void *get_irq_data(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->handler_data : NULL; +} + +static inline void *irq_data_get_irq_data(struct irq_data *d) +{ + return d->handler_data; +} + +static inline struct msi_desc *get_irq_msi(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->msi_desc : NULL; +} + +static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) +{ + return d->msi_desc; +} + +#ifdef CONFIG_INTR_REMAP +static inline struct irq_2_iommu *get_irq_iommu(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->irq_2_iommu : NULL; +} + +static inline struct irq_2_iommu *irq_data_get_iommu(struct irq_data *d) +{ + return d->irq_2_iommu; +} +#endif #endif /* CONFIG_GENERIC_HARDIRQS */ diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 2b1f6906b82..659be326c8e 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -256,6 +256,14 @@ int set_irq_chip_data(unsigned int irq, void *data) } EXPORT_SYMBOL(set_irq_chip_data); +struct irq_data *irq_get_irq_data(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + return desc ? &desc->irq_data : NULL; +} +EXPORT_SYMBOL_GPL(irq_get_irq_data); + /** * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq * -- cgit v1.2.3-70-g09d2 From 3795de236d67a05994a1a12759db9d4dd9ffc42c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Sep 2010 17:09:43 +0200 Subject: genirq: Distangle kernel/irq/handle.c kernel/irq/handle.c has become a dumpground for random code in random order. Split out the irq descriptor management and the dummy irq_chip implementation into separate files. Cleanup the include maze while at it. No code change. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- kernel/irq/Makefile | 2 +- kernel/irq/dummychip.c | 68 ++++++++++ kernel/irq/handle.c | 333 +------------------------------------------------ kernel/irq/irqdesc.c | 269 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 340 insertions(+), 332 deletions(-) create mode 100644 kernel/irq/dummychip.c create mode 100644 kernel/irq/irqdesc.c (limited to 'kernel') diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 7d047808419..1eaab0da56d 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile @@ -1,5 +1,5 @@ -obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o +obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c new file mode 100644 index 00000000000..918dea9de9e --- /dev/null +++ b/kernel/irq/dummychip.c @@ -0,0 +1,68 @@ +/* + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006, Thomas Gleixner, Russell King + * + * This file contains the dummy interrupt chip implementation + */ +#include +#include + +#include "internals.h" + +/* + * What should we do if we get a hw irq event on an illegal vector? + * Each architecture has to answer this themself. + */ +static void ack_bad(struct irq_data *data) +{ + struct irq_desc *desc = irq_data_to_desc(data); + + print_irq_desc(data->irq, desc); + ack_bad_irq(data->irq); +} + +/* + * NOP functions + */ +static void noop(struct irq_data *data) { } + +static unsigned int noop_ret(struct irq_data *data) +{ + return 0; +} + +#ifndef CONFIG_GENERIC_HARDIRQS_NO_CRUFT +static void compat_noop(unsigned int irq) { } +#define END_INIT .end = compat_noop +#else +#define END_INIT +#endif + +/* + * Generic no controller implementation + */ +struct irq_chip no_irq_chip = { + .name = "none", + .irq_startup = noop_ret, + .irq_shutdown = noop, + .irq_enable = noop, + .irq_disable = noop, + .irq_ack = ack_bad, + END_INIT +}; + +/* + * Generic dummy implementation which can be used for + * real dumb interrupt sources + */ +struct irq_chip dummy_irq_chip = { + .name = "dummy", + .irq_startup = noop_ret, + .irq_shutdown = noop, + .irq_enable = noop, + .irq_disable = noop, + .irq_ack = noop, + .irq_mask = noop, + .irq_unmask = noop, + END_INIT +}; diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 3fcef37154a..e2347eb6330 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -11,24 +11,15 @@ */ #include -#include -#include -#include #include +#include #include #include -#include -#include -#include + #include #include "internals.h" -/* - * lockdep: we want to handle all irq_desc locks as a single lock-class: - */ -struct lock_class_key irq_desc_lock_class; - /** * handle_bad_irq - handle spurious and unhandled irqs * @irq: the interrupt number @@ -43,308 +34,6 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) ack_bad_irq(irq); } -#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) -static void __init init_irq_default_affinity(void) -{ - alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); - cpumask_setall(irq_default_affinity); -} -#else -static void __init init_irq_default_affinity(void) -{ -} -#endif - -/* - * Linux has a controller-independent interrupt architecture. - * Every controller has a 'controller-template', that is used - * by the main code to do the right thing. Each driver-visible - * interrupt source is transparently wired to the appropriate - * controller. Thus drivers need not be aware of the - * interrupt-controller. - * - * The code is designed to be easily extended with new/different - * interrupt controllers, without having to do assembly magic or - * having to touch the generic code. - * - * Controller mappings for all interrupt sources: - */ -int nr_irqs = NR_IRQS; -EXPORT_SYMBOL_GPL(nr_irqs); - -#ifdef CONFIG_SPARSE_IRQ - -static struct irq_desc irq_desc_init = { - .status = IRQ_DISABLED, - .handle_irq = handle_bad_irq, - .depth = 1, - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), -}; - -void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) -{ - void *ptr; - - ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), - GFP_ATOMIC, node); - - /* - * don't overwite if can not get new one - * init_copy_kstat_irqs() could still use old one - */ - if (ptr) { - printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); - desc->kstat_irqs = ptr; - } -} - -static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) -{ - memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); - - raw_spin_lock_init(&desc->lock); - desc->irq_data.irq = irq; -#ifdef CONFIG_SMP - desc->irq_data.node = node; -#endif - lockdep_set_class(&desc->lock, &irq_desc_lock_class); - init_kstat_irqs(desc, node, nr_cpu_ids); - if (!desc->kstat_irqs) { - printk(KERN_ERR "can not alloc kstat_irqs\n"); - BUG_ON(1); - } - if (!alloc_desc_masks(desc, node, false)) { - printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); - BUG_ON(1); - } - init_desc_masks(desc); - arch_init_chip_data(desc, node); -} - -/* - * Protect the sparse_irqs: - */ -DEFINE_RAW_SPINLOCK(sparse_irq_lock); - -static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); - -static void set_irq_desc(unsigned int irq, struct irq_desc *desc) -{ - radix_tree_insert(&irq_desc_tree, irq, desc); -} - -struct irq_desc *irq_to_desc(unsigned int irq) -{ - return radix_tree_lookup(&irq_desc_tree, irq); -} - -void replace_irq_desc(unsigned int irq, struct irq_desc *desc) -{ - void **ptr; - - ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); - if (ptr) - radix_tree_replace_slot(ptr, desc); -} - -static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { - [0 ... NR_IRQS_LEGACY-1] = { - .status = IRQ_DISABLED, - .handle_irq = handle_bad_irq, - .depth = 1, - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), - } -}; - -static unsigned int *kstat_irqs_legacy; - -int __init early_irq_init(void) -{ - struct irq_desc *desc; - int legacy_count; - int node; - int i; - - init_irq_default_affinity(); - - /* initialize nr_irqs based on nr_cpu_ids */ - arch_probe_nr_irqs(); - printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); - - desc = irq_desc_legacy; - legacy_count = ARRAY_SIZE(irq_desc_legacy); - node = first_online_node; - - /* allocate based on nr_cpu_ids */ - kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * - sizeof(int), GFP_NOWAIT, node); - - irq_desc_init.irq_data.chip = &no_irq_chip; - - for (i = 0; i < legacy_count; i++) { - desc[i].irq_data.irq = i; - desc[i].irq_data.chip = &no_irq_chip; -#ifdef CONFIG_SMP - desc[i].irq_data.node = node; -#endif - desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; - lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); - alloc_desc_masks(&desc[i], node, true); - init_desc_masks(&desc[i]); - set_irq_desc(i, &desc[i]); - } - - return arch_early_irq_init(); -} - -struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) -{ - struct irq_desc *desc; - unsigned long flags; - - if (irq >= nr_irqs) { - WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", - irq, nr_irqs); - return NULL; - } - - desc = irq_to_desc(irq); - if (desc) - return desc; - - raw_spin_lock_irqsave(&sparse_irq_lock, flags); - - /* We have to check it to avoid races with another CPU */ - desc = irq_to_desc(irq); - if (desc) - goto out_unlock; - - desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); - - printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); - if (!desc) { - printk(KERN_ERR "can not alloc irq_desc\n"); - BUG_ON(1); - } - init_one_irq_desc(irq, desc, node); - - set_irq_desc(irq, desc); - -out_unlock: - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); - - return desc; -} - -#else /* !CONFIG_SPARSE_IRQ */ - -struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { - [0 ... NR_IRQS-1] = { - .status = IRQ_DISABLED, - .handle_irq = handle_bad_irq, - .depth = 1, - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), - } -}; - -static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; -int __init early_irq_init(void) -{ - struct irq_desc *desc; - int count; - int i; - - init_irq_default_affinity(); - - printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); - - desc = irq_desc; - count = ARRAY_SIZE(irq_desc); - - for (i = 0; i < count; i++) { - desc[i].irq_data.irq = i; - desc[i].irq_data.chip = &no_irq_chip; - alloc_desc_masks(&desc[i], 0, true); - init_desc_masks(&desc[i]); - desc[i].kstat_irqs = kstat_irqs_all[i]; - } - return arch_early_irq_init(); -} - -struct irq_desc *irq_to_desc(unsigned int irq) -{ - return (irq < NR_IRQS) ? irq_desc + irq : NULL; -} - -struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) -{ - return irq_to_desc(irq); -} -#endif /* !CONFIG_SPARSE_IRQ */ - -void clear_kstat_irqs(struct irq_desc *desc) -{ - memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); -} - -/* - * What should we do if we get a hw irq event on an illegal vector? - * Each architecture has to answer this themself. - */ -static void ack_bad(struct irq_data *data) -{ - struct irq_desc *desc = irq_data_to_desc(data); - - print_irq_desc(data->irq, desc); - ack_bad_irq(data->irq); -} - -/* - * NOP functions - */ -static void noop(struct irq_data *data) { } - -static unsigned int noop_ret(struct irq_data *data) -{ - return 0; -} - -#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED -static void compat_noop(unsigned int irq) { } -#define END_INIT .end = compat_noop -#else -#define END_INIT -#endif - -/* - * Generic no controller implementation - */ -struct irq_chip no_irq_chip = { - .name = "none", - .irq_startup = noop_ret, - .irq_shutdown = noop, - .irq_enable = noop, - .irq_disable = noop, - .irq_ack = ack_bad, - END_INIT -}; - -/* - * Generic dummy implementation which can be used for - * real dumb interrupt sources - */ -struct irq_chip dummy_irq_chip = { - .name = "dummy", - .irq_startup = noop_ret, - .irq_shutdown = noop, - .irq_enable = noop, - .irq_disable = noop, - .irq_ack = noop, - .irq_mask = noop, - .irq_unmask = noop, - END_INIT -}; - /* * Special, empty irq handler: */ @@ -540,21 +229,3 @@ out: return 1; } #endif - -void early_init_irq_lock_class(void) -{ - struct irq_desc *desc; - int i; - - for_each_irq_desc(i, desc) { - lockdep_set_class(&desc->lock, &irq_desc_lock_class); - } -} - -unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) -{ - struct irq_desc *desc = irq_to_desc(irq); - return desc ? desc->kstat_irqs[cpu] : 0; -} -EXPORT_SYMBOL(kstat_irqs_cpu); - diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c new file mode 100644 index 00000000000..fbf8cfa0051 --- /dev/null +++ b/kernel/irq/irqdesc.c @@ -0,0 +1,269 @@ +/* + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006, Thomas Gleixner, Russell King + * + * This file contains the interrupt descriptor management code + * + * Detailed information is available in Documentation/DocBook/genericirq + * + */ +#include +#include +#include +#include +#include +#include + +#include "internals.h" + +/* + * lockdep: we want to handle all irq_desc locks as a single lock-class: + */ +struct lock_class_key irq_desc_lock_class; + +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) +static void __init init_irq_default_affinity(void) +{ + alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); + cpumask_setall(irq_default_affinity); +} +#else +static void __init init_irq_default_affinity(void) +{ +} +#endif + +int nr_irqs = NR_IRQS; +EXPORT_SYMBOL_GPL(nr_irqs); + +#ifdef CONFIG_SPARSE_IRQ + +static struct irq_desc irq_desc_init = { + .status = IRQ_DISABLED, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), +}; + +void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) +{ + void *ptr; + + ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), + GFP_ATOMIC, node); + + /* + * don't overwite if can not get new one + * init_copy_kstat_irqs() could still use old one + */ + if (ptr) { + printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); + desc->kstat_irqs = ptr; + } +} + +static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) +{ + memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); + + raw_spin_lock_init(&desc->lock); + desc->irq_data.irq = irq; +#ifdef CONFIG_SMP + desc->irq_data.node = node; +#endif + lockdep_set_class(&desc->lock, &irq_desc_lock_class); + init_kstat_irqs(desc, node, nr_cpu_ids); + if (!desc->kstat_irqs) { + printk(KERN_ERR "can not alloc kstat_irqs\n"); + BUG_ON(1); + } + if (!alloc_desc_masks(desc, node, false)) { + printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); + BUG_ON(1); + } + init_desc_masks(desc); + arch_init_chip_data(desc, node); +} + +/* + * Protect the sparse_irqs: + */ +DEFINE_RAW_SPINLOCK(sparse_irq_lock); + +static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); + +static void set_irq_desc(unsigned int irq, struct irq_desc *desc) +{ + radix_tree_insert(&irq_desc_tree, irq, desc); +} + +struct irq_desc *irq_to_desc(unsigned int irq) +{ + return radix_tree_lookup(&irq_desc_tree, irq); +} + +void replace_irq_desc(unsigned int irq, struct irq_desc *desc) +{ + void **ptr; + + ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); + if (ptr) + radix_tree_replace_slot(ptr, desc); +} + +static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { + [0 ... NR_IRQS_LEGACY-1] = { + .status = IRQ_DISABLED, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), + } +}; + +static unsigned int *kstat_irqs_legacy; + +int __init early_irq_init(void) +{ + struct irq_desc *desc; + int legacy_count; + int node; + int i; + + init_irq_default_affinity(); + + /* initialize nr_irqs based on nr_cpu_ids */ + arch_probe_nr_irqs(); + printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); + + desc = irq_desc_legacy; + legacy_count = ARRAY_SIZE(irq_desc_legacy); + node = first_online_node; + + /* allocate based on nr_cpu_ids */ + kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * + sizeof(int), GFP_NOWAIT, node); + + irq_desc_init.irq_data.chip = &no_irq_chip; + + for (i = 0; i < legacy_count; i++) { + desc[i].irq_data.irq = i; + desc[i].irq_data.chip = &no_irq_chip; +#ifdef CONFIG_SMP + desc[i].irq_data.node = node; +#endif + desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); + alloc_desc_masks(&desc[i], node, true); + init_desc_masks(&desc[i]); + set_irq_desc(i, &desc[i]); + } + + return arch_early_irq_init(); +} + +struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) +{ + struct irq_desc *desc; + unsigned long flags; + + if (irq >= nr_irqs) { + WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", + irq, nr_irqs); + return NULL; + } + + desc = irq_to_desc(irq); + if (desc) + return desc; + + raw_spin_lock_irqsave(&sparse_irq_lock, flags); + + /* We have to check it to avoid races with another CPU */ + desc = irq_to_desc(irq); + if (desc) + goto out_unlock; + + desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); + + printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); + if (!desc) { + printk(KERN_ERR "can not alloc irq_desc\n"); + BUG_ON(1); + } + init_one_irq_desc(irq, desc, node); + + set_irq_desc(irq, desc); + +out_unlock: + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + + return desc; +} + +#else /* !CONFIG_SPARSE_IRQ */ + +struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { + [0 ... NR_IRQS-1] = { + .status = IRQ_DISABLED, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), + } +}; + +static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; +int __init early_irq_init(void) +{ + struct irq_desc *desc; + int count; + int i; + + init_irq_default_affinity(); + + printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); + + desc = irq_desc; + count = ARRAY_SIZE(irq_desc); + + for (i = 0; i < count; i++) { + desc[i].irq_data.irq = i; + desc[i].irq_data.chip = &no_irq_chip; + alloc_desc_masks(&desc[i], 0, true); + init_desc_masks(&desc[i]); + desc[i].kstat_irqs = kstat_irqs_all[i]; + } + return arch_early_irq_init(); +} + +struct irq_desc *irq_to_desc(unsigned int irq) +{ + return (irq < NR_IRQS) ? irq_desc + irq : NULL; +} + +struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) +{ + return irq_to_desc(irq); +} +#endif /* !CONFIG_SPARSE_IRQ */ + +void clear_kstat_irqs(struct irq_desc *desc) +{ + memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); +} + +void early_init_irq_lock_class(void) +{ + struct irq_desc *desc; + int i; + + for_each_irq_desc(i, desc) { + lockdep_set_class(&desc->lock, &irq_desc_lock_class); + } +} + +unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) +{ + struct irq_desc *desc = irq_to_desc(irq); + return desc ? desc->kstat_irqs[cpu] : 0; +} +EXPORT_SYMBOL(kstat_irqs_cpu); -- cgit v1.2.3-70-g09d2 From 154cd387cdf0e5566ce523cbddf92dd2a062dfd6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Sep 2010 15:58:45 +0200 Subject: genirq: Remove early_init_irq_lock_class() early_init_irq_lock_class() is called way before anything touches the irq descriptors. In case of SPARSE_IRQ=y this is a NOP operation because the radix tree is empty at this point. For the SPARSE_IRQ=n case it's sufficient to set the lock class in early_init_irq(). Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/lockdep.h | 8 -------- init/main.c | 1 - kernel/irq/irqdesc.c | 11 +---------- 3 files changed, 1 insertion(+), 19 deletions(-) (limited to 'kernel') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 06aed8305bf..17d050ce7ab 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -424,14 +424,6 @@ do { \ #endif /* CONFIG_LOCKDEP */ -#ifdef CONFIG_GENERIC_HARDIRQS -extern void early_init_irq_lock_class(void); -#else -static inline void early_init_irq_lock_class(void) -{ -} -#endif - #ifdef CONFIG_TRACE_IRQFLAGS extern void early_boot_irqs_off(void); extern void early_boot_irqs_on(void); diff --git a/init/main.c b/init/main.c index 94ab488039a..9684c9670b4 100644 --- a/init/main.c +++ b/init/main.c @@ -556,7 +556,6 @@ asmlinkage void __init start_kernel(void) local_irq_disable(); early_boot_irqs_off(); - early_init_irq_lock_class(); /* * Interrupts are still disabled. Do necessary setups, then diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index fbf8cfa0051..0a7a0908afb 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -231,6 +231,7 @@ int __init early_irq_init(void) alloc_desc_masks(&desc[i], 0, true); init_desc_masks(&desc[i]); desc[i].kstat_irqs = kstat_irqs_all[i]; + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); } return arch_early_irq_init(); } @@ -251,16 +252,6 @@ void clear_kstat_irqs(struct irq_desc *desc) memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); } -void early_init_irq_lock_class(void) -{ - struct irq_desc *desc; - int i; - - for_each_irq_desc(i, desc) { - lockdep_set_class(&desc->lock, &irq_desc_lock_class); - } -} - unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) { struct irq_desc *desc = irq_to_desc(irq); -- cgit v1.2.3-70-g09d2 From d895f51ebb54cefe367bda135fcf2cd734d51d03 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 17:45:49 +0200 Subject: genirq: Remove export of kstat_irqs_cpu The statistics accessor is only used by proc/stats and show_interrupts(). Both are compiled in. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- kernel/irq/irqdesc.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 0a7a0908afb..78ff426a6cb 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -257,4 +257,3 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) struct irq_desc *desc = irq_to_desc(irq); return desc ? desc->kstat_irqs[cpu] : 0; } -EXPORT_SYMBOL(kstat_irqs_cpu); -- cgit v1.2.3-70-g09d2 From 1318a481fc37c503a901b96ae06b692ca2b21af5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 21:01:37 +0200 Subject: genirq: Provide default irq init flags Arch code sets it's own irq_desc.status flags right after boot and for dynamically allocated interrupts. That might involve iterating over a huge array. Allow ARCH_IRQ_INIT_FLAGS to set separate flags aside of IRQ_DISABLED which is the default. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/irq.h | 6 ++++++ kernel/irq/chip.c | 2 +- kernel/irq/irqdesc.c | 6 +++--- 3 files changed, 10 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index bea40556c5a..30a300991ed 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -214,6 +214,12 @@ struct irq_chip { */ #include +#ifndef ARCH_IRQ_INIT_FLAGS +# define ARCH_IRQ_INIT_FLAGS 0 +#endif + +#define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) + struct irqaction; extern int setup_irq(unsigned int irq, struct irqaction *new); extern void remove_irq(unsigned int irq, struct irqaction *act); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 659be326c8e..3405761d622 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -31,7 +31,7 @@ static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) /* Ensure we don't have left over values from a previous use of this irq */ raw_spin_lock_irqsave(&desc->lock, flags); - desc->status = IRQ_DISABLED; + desc->status = IRQ_DEFAULT_INIT_FLAGS; desc->irq_data.chip = &no_irq_chip; desc->handle_irq = handle_bad_irq; desc->depth = 1; diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 78ff426a6cb..29963f99f24 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(nr_irqs); #ifdef CONFIG_SPARSE_IRQ static struct irq_desc irq_desc_init = { - .status = IRQ_DISABLED, + .status = IRQ_DEFAULT_INIT_FLAGS, .handle_irq = handle_bad_irq, .depth = 1, .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), @@ -113,7 +113,7 @@ void replace_irq_desc(unsigned int irq, struct irq_desc *desc) static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { [0 ... NR_IRQS_LEGACY-1] = { - .status = IRQ_DISABLED, + .status = IRQ_DEFAULT_INIT_FLAGS, .handle_irq = handle_bad_irq, .depth = 1, .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), @@ -204,7 +204,7 @@ out_unlock: struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { [0 ... NR_IRQS-1] = { - .status = IRQ_DISABLED, + .status = IRQ_DEFAULT_INIT_FLAGS, .handle_irq = handle_bad_irq, .depth = 1, .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), -- cgit v1.2.3-70-g09d2 From 1f5a5b87f78fade3ae48dfd55e8765d1d622ea4e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 17:48:26 +0200 Subject: genirq: Implement a sane sparse_irq allocator The current sparse_irq allocator has several short comings due to failures in the design or the lack of it: - Requires iteration over the number of active irqs to find a free slot (Some architectures have grown their own workarounds for this) - Removal of entries is not possible - Racy between create_irq_nr and destroy_irq (plugged by horrible callbacks) - Migration of active irq descriptors is not possible - No bulk allocation of irq ranges - Sprinkeled irq_desc references all over the place outside of kernel/irq/ (The previous chip functions series is addressing this issue) Implement a sane allocator which fixes the above short comings (though migration of active descriptors needs a full tree wide cleanup of the direct and mostly unlocked access to irq_desc). The new allocator still uses a radix_tree, but uses a bitmap for keeping track of allocated irq numbers. That allows: - Fast lookup of a free slot - Allows the removal of descriptors - Prevents the create/destroy race - Bulk allocation of consecutive irq ranges - Basic design is ready for migration of life descriptors after further cleanups The bitmap is also used in the SPARSE_IRQ=n case for lookup and raceless (de)allocation of irq numbers. So it removes the requirement for looping through the descriptor array to find slots. Right now it uses sparse_irq_lock to protect the bitmap and the radix tree, but after cleaning up all users we should be able convert that to a mutex and to switch the radix_tree and decriptor allocations to GFP_KERNEL. [ Folded in a bugfix from Yinghai Lu ] Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/irq.h | 23 +++++ kernel/irq/irqdesc.c | 231 +++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 246 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index 30a300991ed..cefacf928b3 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -398,6 +398,29 @@ static inline struct irq_2_iommu *irq_data_get_iommu(struct irq_data *d) } #endif +int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); +void irq_free_descs(unsigned int irq, unsigned int cnt); + +static inline int irq_alloc_desc(int node) +{ + return irq_alloc_descs(-1, 0, 1, node); +} + +static inline int irq_alloc_desc_at(unsigned int at, int node) +{ + return irq_alloc_descs(at, at, 1, node); +} + +static inline int irq_alloc_desc_from(unsigned int from, int node) +{ + return irq_alloc_descs(-1, from, 1, node); +} + +static inline void irq_free_desc(unsigned int irq) +{ + irq_free_descs(irq, 1); +} + #endif /* CONFIG_GENERIC_HARDIRQS */ #endif /* !CONFIG_S390 */ diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 29963f99f24..4eea48b4f57 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "internals.h" @@ -33,9 +34,54 @@ static void __init init_irq_default_affinity(void) } #endif +#ifdef CONFIG_SMP +static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) +{ + if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) + return -ENOMEM; + +#ifdef CONFIG_GENERIC_PENDING_IRQ + if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { + free_cpumask_var(desc->irq_data.affinity); + return -ENOMEM; + } +#endif + return 0; +} + +static void desc_smp_init(struct irq_desc *desc, int node) +{ + desc->node = node; + cpumask_copy(desc->irq_data.affinity, irq_default_affinity); +} + +#else +static inline int +alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } +static inline void desc_smp_init(struct irq_desc *desc, int node) { } +#endif + +static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) +{ + desc->irq_data.irq = irq; + desc->irq_data.chip = &no_irq_chip; + desc->irq_data.chip_data = NULL; + desc->irq_data.handler_data = NULL; + desc->irq_data.msi_desc = NULL; + desc->status = IRQ_DEFAULT_INIT_FLAGS; + desc->handle_irq = handle_bad_irq; + desc->depth = 1; + desc->name = NULL; + memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); + desc_smp_init(desc, node); +} + int nr_irqs = NR_IRQS; EXPORT_SYMBOL_GPL(nr_irqs); +DEFINE_RAW_SPINLOCK(sparse_irq_lock); +static DECLARE_BITMAP(allocated_irqs, NR_IRQS); + #ifdef CONFIG_SPARSE_IRQ static struct irq_desc irq_desc_init = { @@ -85,14 +131,9 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) arch_init_chip_data(desc, node); } -/* - * Protect the sparse_irqs: - */ -DEFINE_RAW_SPINLOCK(sparse_irq_lock); - static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); -static void set_irq_desc(unsigned int irq, struct irq_desc *desc) +static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) { radix_tree_insert(&irq_desc_tree, irq, desc); } @@ -111,6 +152,94 @@ void replace_irq_desc(unsigned int irq, struct irq_desc *desc) radix_tree_replace_slot(ptr, desc); } +static void delete_irq_desc(unsigned int irq) +{ + radix_tree_delete(&irq_desc_tree, irq); +} + +#ifdef CONFIG_SMP +static void free_masks(struct irq_desc *desc) +{ +#ifdef CONFIG_GENERIC_PENDING_IRQ + free_cpumask_var(desc->pending_mask); +#endif + free_cpumask_var(desc->affinity); +} +#else +static inline void free_masks(struct irq_desc *desc) { } +#endif + +static struct irq_desc *alloc_desc(int irq, int node) +{ + struct irq_desc *desc; + gfp_t gfp = GFP_KERNEL; + + desc = kzalloc_node(sizeof(*desc), gfp, node); + if (!desc) + return NULL; + /* allocate based on nr_cpu_ids */ + desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs), + gfp, node); + if (!desc->kstat_irqs) + goto err_desc; + + if (alloc_masks(desc, gfp, node)) + goto err_kstat; + + raw_spin_lock_init(&desc->lock); + lockdep_set_class(&desc->lock, &irq_desc_lock_class); + + desc_set_defaults(irq, desc, node); + + return desc; + +err_kstat: + kfree(desc->kstat_irqs); +err_desc: + kfree(desc); + return NULL; +} + +static void free_desc(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + raw_spin_lock_irqsave(&sparse_irq_lock, flags); + delete_irq_desc(irq); + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + + free_masks(desc); + kfree(desc->kstat_irqs); + kfree(desc); +} + +static int alloc_descs(unsigned int start, unsigned int cnt, int node) +{ + struct irq_desc *desc; + unsigned long flags; + int i; + + for (i = 0; i < cnt; i++) { + desc = alloc_desc(start + i, node); + if (!desc) + goto err; + raw_spin_lock_irqsave(&sparse_irq_lock, flags); + irq_insert_desc(start + i, desc); + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + } + return start; + +err: + for (i--; i >= 0; i--) + free_desc(start + i); + + raw_spin_lock_irqsave(&sparse_irq_lock, flags); + bitmap_clear(allocated_irqs, start, cnt); + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + return -ENOMEM; +} + static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { [0 ... NR_IRQS_LEGACY-1] = { .status = IRQ_DEFAULT_INIT_FLAGS, @@ -155,7 +284,7 @@ int __init early_irq_init(void) lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); alloc_desc_masks(&desc[i], node, true); init_desc_masks(&desc[i]); - set_irq_desc(i, &desc[i]); + irq_insert_desc(i, &desc[i]); } return arch_early_irq_init(); @@ -192,7 +321,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) } init_one_irq_desc(irq, desc, node); - set_irq_desc(irq, desc); + irq_insert_desc(irq, desc); out_unlock: raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); @@ -245,8 +374,94 @@ struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) { return irq_to_desc(irq); } + +#ifdef CONFIG_SMP +static inline int desc_node(struct irq_desc *desc) +{ + return desc->irq_data.node; +} +#else +static inline int desc_node(struct irq_desc *desc) { return 0; } +#endif + +static void free_desc(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + raw_spin_lock_irqsave(&desc->lock, flags); + desc_set_defaults(irq, desc, desc_node(desc)); + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + +static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) +{ + return start; +} #endif /* !CONFIG_SPARSE_IRQ */ +/* Dynamic interrupt handling */ + +/** + * irq_free_descs - free irq descriptors + * @from: Start of descriptor range + * @cnt: Number of consecutive irqs to free + */ +void irq_free_descs(unsigned int from, unsigned int cnt) +{ + unsigned long flags; + int i; + + if (from >= nr_irqs || (from + cnt) > nr_irqs) + return; + + for (i = 0; i < cnt; i++) + free_desc(from + i); + + raw_spin_lock_irqsave(&sparse_irq_lock, flags); + bitmap_clear(allocated_irqs, from, cnt); + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); +} + +/** + * irq_alloc_descs - allocate and initialize a range of irq descriptors + * @irq: Allocate for specific irq number if irq >= 0 + * @from: Start the search from this irq number + * @cnt: Number of consecutive irqs to allocate. + * @node: Preferred node on which the irq descriptor should be allocated + * + * Returns the first irq number or error code + */ +int __ref +irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) +{ + unsigned long flags; + int start, ret; + + if (!cnt) + return -EINVAL; + + raw_spin_lock_irqsave(&sparse_irq_lock, flags); + + start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); + ret = -EEXIST; + if (irq >=0 && start != irq) + goto err; + + ret = -ENOMEM; + if (start >= nr_irqs) + goto err; + + bitmap_set(allocated_irqs, start, cnt); + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + return alloc_descs(start, cnt, node); + +err: + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + return ret; +} + +/* Statistics access */ void clear_kstat_irqs(struct irq_desc *desc) { memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); -- cgit v1.2.3-70-g09d2 From 13bfe99e09123ef5edb6acb81ba337d2db600b53 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 30 Sep 2010 02:46:07 +0200 Subject: genirq: Prepare proc for real sparse irq support /proc/irq never removes any entries, but when irq descriptors can be freed for real this is necessary. Otherwise we'd reference a freed descriptor in /proc/irq/N Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- kernel/irq/internals.h | 2 ++ kernel/irq/irqdesc.c | 2 ++ kernel/irq/proc.c | 18 ++++++++++++++++++ 3 files changed, 22 insertions(+) (limited to 'kernel') diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index e281e45fbb5..8eb01e379cc 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -32,10 +32,12 @@ void replace_irq_desc(unsigned int irq, struct irq_desc *desc); #ifdef CONFIG_PROC_FS extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); +extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); extern void register_handler_proc(unsigned int irq, struct irqaction *action); extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); #else static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } +static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } static inline void register_handler_proc(unsigned int irq, struct irqaction *action) { } static inline void unregister_handler_proc(unsigned int irq, diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 4eea48b4f57..6312a2c8397 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -205,6 +205,8 @@ static void free_desc(unsigned int irq) struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; + unregister_irq_proc(irq, desc); + raw_spin_lock_irqsave(&sparse_irq_lock, flags); delete_irq_desc(irq); raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index d9fddf918b4..01b1d3a8898 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -297,6 +297,24 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) &irq_spurious_proc_fops, (void *)(long)irq); } +void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) +{ + char name [MAX_NAMELEN]; + + if (!root_irq_dir || !desc->dir) + return; +#ifdef CONFIG_SMP + remove_proc_entry("smp_affinity", desc->dir); + remove_proc_entry("affinity_hint", desc->dir); + remove_proc_entry("node", desc->dir); +#endif + remove_proc_entry("spurious", desc->dir); + + memset(name, 0, MAX_NAMELEN); + sprintf(name, "%u", irq); + remove_proc_entry(name, root_irq_dir); +} + #undef MAX_NAMELEN void unregister_handler_proc(unsigned int irq, struct irqaction *action) -- cgit v1.2.3-70-g09d2 From a98d24b71b6e229965f18dc00d28dc71cb8fe324 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 30 Sep 2010 10:45:07 +0200 Subject: genirq: Implement sane enumeration Use the allocator bitmap to lookup active interrupts. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/irqnr.h | 5 +++++ kernel/irq/irqdesc.c | 11 +++++++++++ 2 files changed, 16 insertions(+) (limited to 'kernel') diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 7bf89bc8cbc..05aa8c23483 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h @@ -25,6 +25,7 @@ extern int nr_irqs; extern struct irq_desc *irq_to_desc(unsigned int irq); +unsigned int irq_get_next_irq(unsigned int offset); # define for_each_irq_desc(irq, desc) \ for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ @@ -47,6 +48,10 @@ extern struct irq_desc *irq_to_desc(unsigned int irq); #define irq_node(irq) 0 #endif +# define for_each_active_irq(irq) \ + for (irq = irq_get_next_irq(0); irq < nr_irqs; \ + irq = irq_get_next_irq(irq + 1)) + #endif /* CONFIG_GENERIC_HARDIRQS */ #define for_each_irq_nr(irq) \ diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 6312a2c8397..2e7e94ef64d 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -463,6 +463,17 @@ err: return ret; } +/** + * irq_get_next_irq - get next allocated irq number + * @offset: where to start the search + * + * Returns next irq number after offset or nr_irqs if none is found. + */ +unsigned int irq_get_next_irq(unsigned int offset) +{ + return find_next_bit(allocated_irqs, nr_irqs, offset); +} + /* Statistics access */ void clear_kstat_irqs(struct irq_desc *desc) { -- cgit v1.2.3-70-g09d2 From 06f6c3399e9f9ff6eafc200e80f9226c3cee0eaf Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 12 Oct 2010 12:31:46 +0200 Subject: genirq: Implement irq reservation Mark a range of interrupts as allocated. In the SPARSE_IRQ=n case we need this to update the bitmap for the legacy irqs so the enumerator via irq_get_next_irq() works. Signed-off-by: Thomas Gleixner --- include/linux/irq.h | 1 + kernel/irq/irqdesc.c | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index cefacf928b3..096b74d5d0d 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -400,6 +400,7 @@ static inline struct irq_2_iommu *irq_data_get_iommu(struct irq_data *d) int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); void irq_free_descs(unsigned int irq, unsigned int cnt); +int irq_reserve_irqs(unsigned int from, unsigned int cnt); static inline int irq_alloc_desc(int node) { diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 2e7e94ef64d..35d9052901b 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -463,6 +463,32 @@ err: return ret; } +/** + * irq_reserve_irqs - mark irqs allocated + * @from: mark from irq number + * @cnt: number of irqs to mark + * + * Returns 0 on success or an appropriate error code + */ +int irq_reserve_irqs(unsigned int from, unsigned int cnt) +{ + unsigned long flags; + unsigned int start; + int ret = 0; + + if (!cnt || (from + cnt) > nr_irqs) + return -EINVAL; + + raw_spin_lock_irqsave(&sparse_irq_lock, flags); + start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); + if (start == from) + bitmap_set(allocated_irqs, start, cnt); + else + ret = -EEXIST; + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + return ret; +} + /** * irq_get_next_irq - get next allocated irq number * @offset: where to start the search -- cgit v1.2.3-70-g09d2 From aa99ec0f3f26bf2bcd0fa5176de93598427f1e5e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 20:02:56 +0200 Subject: genirq: Use sane sparse allocator Make irq_to_desc_alloc_node() a wrapper around the new allocator. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- kernel/irq/irqdesc.c | 129 ++++++++------------------------------------------- 1 file changed, 20 insertions(+), 109 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 35d9052901b..7cbe4f93e2f 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -51,7 +51,7 @@ static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) static void desc_smp_init(struct irq_desc *desc, int node) { - desc->node = node; + desc->irq_data.node = node; cpumask_copy(desc->irq_data.affinity, irq_default_affinity); } @@ -84,13 +84,6 @@ static DECLARE_BITMAP(allocated_irqs, NR_IRQS); #ifdef CONFIG_SPARSE_IRQ -static struct irq_desc irq_desc_init = { - .status = IRQ_DEFAULT_INIT_FLAGS, - .handle_irq = handle_bad_irq, - .depth = 1, - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), -}; - void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) { void *ptr; @@ -108,29 +101,6 @@ void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) } } -static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) -{ - memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); - - raw_spin_lock_init(&desc->lock); - desc->irq_data.irq = irq; -#ifdef CONFIG_SMP - desc->irq_data.node = node; -#endif - lockdep_set_class(&desc->lock, &irq_desc_lock_class); - init_kstat_irqs(desc, node, nr_cpu_ids); - if (!desc->kstat_irqs) { - printk(KERN_ERR "can not alloc kstat_irqs\n"); - BUG_ON(1); - } - if (!alloc_desc_masks(desc, node, false)) { - printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); - BUG_ON(1); - } - init_desc_masks(desc); - arch_init_chip_data(desc, node); -} - static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) @@ -171,8 +141,9 @@ static inline void free_masks(struct irq_desc *desc) { } static struct irq_desc *alloc_desc(int irq, int node) { + /* Temporary hack until we can switch to GFP_KERNEL */ + gfp_t gfp = gfp_allowed_mask == GFP_BOOT_MASK ? GFP_NOWAIT : GFP_ATOMIC; struct irq_desc *desc; - gfp_t gfp = GFP_KERNEL; desc = kzalloc_node(sizeof(*desc), gfp, node); if (!desc) @@ -226,6 +197,8 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node) desc = alloc_desc(start + i, node); if (!desc) goto err; + /* temporary until I fixed x86 madness */ + arch_init_chip_data(desc, node); raw_spin_lock_irqsave(&sparse_irq_lock, flags); irq_insert_desc(start + i, desc); raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); @@ -242,23 +215,19 @@ err: return -ENOMEM; } -static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { - [0 ... NR_IRQS_LEGACY-1] = { - .status = IRQ_DEFAULT_INIT_FLAGS, - .handle_irq = handle_bad_irq, - .depth = 1, - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), - } -}; +struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) +{ + int res = irq_alloc_descs(irq, irq, 1, node); -static unsigned int *kstat_irqs_legacy; + if (res == -EEXIST || res == irq) + return irq_to_desc(irq); + return NULL; +} int __init early_irq_init(void) { + int i, node = first_online_node; struct irq_desc *desc; - int legacy_count; - int node; - int i; init_irq_default_affinity(); @@ -266,71 +235,14 @@ int __init early_irq_init(void) arch_probe_nr_irqs(); printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); - desc = irq_desc_legacy; - legacy_count = ARRAY_SIZE(irq_desc_legacy); - node = first_online_node; - - /* allocate based on nr_cpu_ids */ - kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * - sizeof(int), GFP_NOWAIT, node); - - irq_desc_init.irq_data.chip = &no_irq_chip; - - for (i = 0; i < legacy_count; i++) { - desc[i].irq_data.irq = i; - desc[i].irq_data.chip = &no_irq_chip; -#ifdef CONFIG_SMP - desc[i].irq_data.node = node; -#endif - desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; - lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); - alloc_desc_masks(&desc[i], node, true); - init_desc_masks(&desc[i]); - irq_insert_desc(i, &desc[i]); + for (i = 0; i < NR_IRQS_LEGACY; i++) { + desc = alloc_desc(i, node); + set_bit(i, allocated_irqs); + irq_insert_desc(i, desc); } - return arch_early_irq_init(); } -struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) -{ - struct irq_desc *desc; - unsigned long flags; - - if (irq >= nr_irqs) { - WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", - irq, nr_irqs); - return NULL; - } - - desc = irq_to_desc(irq); - if (desc) - return desc; - - raw_spin_lock_irqsave(&sparse_irq_lock, flags); - - /* We have to check it to avoid races with another CPU */ - desc = irq_to_desc(irq); - if (desc) - goto out_unlock; - - desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); - - printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); - if (!desc) { - printk(KERN_ERR "can not alloc irq_desc\n"); - BUG_ON(1); - } - init_one_irq_desc(irq, desc, node); - - irq_insert_desc(irq, desc); - -out_unlock: - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); - - return desc; -} - #else /* !CONFIG_SPARSE_IRQ */ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { @@ -345,9 +257,8 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; int __init early_irq_init(void) { + int count, i, node = first_online_node; struct irq_desc *desc; - int count; - int i; init_irq_default_affinity(); @@ -359,9 +270,9 @@ int __init early_irq_init(void) for (i = 0; i < count; i++) { desc[i].irq_data.irq = i; desc[i].irq_data.chip = &no_irq_chip; - alloc_desc_masks(&desc[i], 0, true); - init_desc_masks(&desc[i]); desc[i].kstat_irqs = kstat_irqs_all[i]; + alloc_masks(desc + i, GFP_KERNEL, node); + desc_smp_init(desc + i, node); lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); } return arch_early_irq_init(); -- cgit v1.2.3-70-g09d2 From b683de2b3cb17bb10fa6fd4af614dc75b5749fe0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 27 Sep 2010 20:55:03 +0200 Subject: genirq: Query arch for number of early descriptors sparse irq sets up NR_IRQS_LEGACY irq descriptors and archs then go ahead and allocate more. Use the unused return value of arch_probe_nr_irqs() to let the architecture return the number of early allocations. Fix up all users. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- arch/arm/kernel/irq.c | 6 ++---- arch/sh/kernel/irq.c | 2 +- arch/x86/kernel/apic/io_apic.c | 2 +- include/linux/irq.h | 4 ++++ kernel/irq/irqdesc.c | 10 +++++----- kernel/softirq.c | 4 +++- 6 files changed, 16 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index c0d5c3b3a76..5456d11d6ae 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -157,10 +157,8 @@ void __init init_IRQ(void) struct irq_desc *desc; int irq; - for (irq = 0; irq < nr_irqs; irq++) { - desc = irq_to_desc_alloc_node(irq, 0); + for (irq = 0; irq < nr_irqs; irq++) desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; - } init_arch_irq(); } @@ -169,7 +167,7 @@ void __init init_IRQ(void) int __init arch_probe_nr_irqs(void) { nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; - return 0; + return nr_irqs; } #endif diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 257de1f0692..ae5bac39b89 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -290,7 +290,7 @@ void __init init_IRQ(void) int __init arch_probe_nr_irqs(void) { nr_irqs = sh_mv.mv_nr_irqs; - return 0; + return NR_IRQS_LEGACY; } #endif diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index f1efebaf551..5aee1d1a306 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -3880,7 +3880,7 @@ int __init arch_probe_nr_irqs(void) if (nr < nr_irqs) nr_irqs = nr; - return 0; + return NR_IRQS_LEGACY; } #endif diff --git a/include/linux/irq.h b/include/linux/irq.h index 096b74d5d0d..ef878823ee3 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -214,6 +214,10 @@ struct irq_chip { */ #include +#ifndef NR_IRQS_LEGACY +# define NR_IRQS_LEGACY 0 +#endif + #ifndef ARCH_IRQ_INIT_FLAGS # define ARCH_IRQ_INIT_FLAGS 0 #endif diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 7cbe4f93e2f..a1fbd1d347a 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -226,16 +226,16 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) int __init early_irq_init(void) { - int i, node = first_online_node; + int i, initcnt, node = first_online_node; struct irq_desc *desc; init_irq_default_affinity(); - /* initialize nr_irqs based on nr_cpu_ids */ - arch_probe_nr_irqs(); - printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); + /* Let arch update nr_irqs and return the nr of preallocated irqs */ + initcnt = arch_probe_nr_irqs(); + printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); - for (i = 0; i < NR_IRQS_LEGACY; i++) { + for (i = 0; i < initcnt; i++) { desc = alloc_desc(i, node); set_bit(i, allocated_irqs); irq_insert_desc(i, desc); diff --git a/kernel/softirq.c b/kernel/softirq.c index 07b4f1b1a73..14a7b80b2cc 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -886,9 +886,10 @@ int __init __weak early_irq_init(void) return 0; } +#ifdef CONFIG_GENERIC_HARDIRQS int __init __weak arch_probe_nr_irqs(void) { - return 0; + return NR_IRQS_LEGACY; } int __init __weak arch_early_irq_init(void) @@ -900,3 +901,4 @@ int __weak arch_init_chip_data(struct irq_desc *desc, int node) { return 0; } +#endif -- cgit v1.2.3-70-g09d2 From b7d0d8258a9f71949b810e0f82a3d75088f4d364 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Sep 2010 18:44:23 +0200 Subject: genirq: Remove arch_init_chip_data() This function should have not been there in the first place. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/interrupt.h | 3 --- kernel/irq/irqdesc.c | 2 -- kernel/softirq.c | 5 ----- 3 files changed, 10 deletions(-) (limited to 'kernel') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a0384a4d1e6..19988983aea 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -641,11 +641,8 @@ static inline void init_irq_proc(void) struct seq_file; int show_interrupts(struct seq_file *p, void *v); -struct irq_desc; - extern int early_irq_init(void); extern int arch_probe_nr_irqs(void); extern int arch_early_irq_init(void); -extern int arch_init_chip_data(struct irq_desc *desc, int node); #endif diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index a1fbd1d347a..6c71f8ea5d7 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -197,8 +197,6 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node) desc = alloc_desc(start + i, node); if (!desc) goto err; - /* temporary until I fixed x86 madness */ - arch_init_chip_data(desc, node); raw_spin_lock_irqsave(&sparse_irq_lock, flags); irq_insert_desc(start + i, desc); raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); diff --git a/kernel/softirq.c b/kernel/softirq.c index 14a7b80b2cc..d19b1c9aa7c 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -896,9 +896,4 @@ int __init __weak arch_early_irq_init(void) { return 0; } - -int __weak arch_init_chip_data(struct irq_desc *desc, int node) -{ - return 0; -} #endif -- cgit v1.2.3-70-g09d2 From b7b29338dc7111ed8bd4d6555d84afae13ebe752 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Sep 2010 18:46:55 +0200 Subject: genirq: Sanitize dynamic irq handling Use the cleanup functions of the dynamic allocator. No need to have separate implementations. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/irq.h | 12 ++++-- kernel/irq/chip.c | 102 ------------------------------------------------- kernel/irq/internals.h | 1 - kernel/irq/irqdesc.c | 41 +++++++++++--------- 4 files changed, 31 insertions(+), 125 deletions(-) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index 49702b22883..e9639115dff 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -326,11 +326,15 @@ extern unsigned int create_irq_nr(unsigned int irq_want, int node); extern int create_irq(void); extern void destroy_irq(unsigned int irq); -/* Dynamic irq helper functions */ -extern void dynamic_irq_init(unsigned int irq); -void dynamic_irq_init_keep_chip_data(unsigned int irq); +/* + * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and + * irq_free_desc instead. + */ extern void dynamic_irq_cleanup(unsigned int irq); -void dynamic_irq_cleanup_keep_chip_data(unsigned int irq); +static inline void dynamic_irq_init(unsigned int irq) +{ + dynamic_irq_cleanup(irq); +} /* Set/get chip/data for an IRQ: */ extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 3405761d622..baa5c4acad8 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -18,108 +18,6 @@ #include "internals.h" -static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) -{ - struct irq_desc *desc; - unsigned long flags; - - desc = irq_to_desc(irq); - if (!desc) { - WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); - return; - } - - /* Ensure we don't have left over values from a previous use of this irq */ - raw_spin_lock_irqsave(&desc->lock, flags); - desc->status = IRQ_DEFAULT_INIT_FLAGS; - desc->irq_data.chip = &no_irq_chip; - desc->handle_irq = handle_bad_irq; - desc->depth = 1; - desc->irq_data.msi_desc = NULL; - desc->irq_data.handler_data = NULL; - if (!keep_chip_data) - desc->irq_data.chip_data = NULL; - desc->action = NULL; - desc->irq_count = 0; - desc->irqs_unhandled = 0; -#ifdef CONFIG_SMP - cpumask_setall(desc->irq_data.affinity); -#ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_clear(desc->pending_mask); -#endif -#endif - raw_spin_unlock_irqrestore(&desc->lock, flags); -} - -/** - * dynamic_irq_init - initialize a dynamically allocated irq - * @irq: irq number to initialize - */ -void dynamic_irq_init(unsigned int irq) -{ - dynamic_irq_init_x(irq, false); -} - -/** - * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq - * @irq: irq number to initialize - * - * does not set irq_to_desc(irq)->irq_data.chip_data to NULL - */ -void dynamic_irq_init_keep_chip_data(unsigned int irq) -{ - dynamic_irq_init_x(irq, true); -} - -static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data) -{ - struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - - if (!desc) { - WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); - return; - } - - raw_spin_lock_irqsave(&desc->lock, flags); - if (desc->action) { - raw_spin_unlock_irqrestore(&desc->lock, flags); - WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", - irq); - return; - } - desc->irq_data.msi_desc = NULL; - desc->irq_data.handler_data = NULL; - if (!keep_chip_data) - desc->irq_data.chip_data = NULL; - desc->handle_irq = handle_bad_irq; - desc->irq_data.chip = &no_irq_chip; - desc->name = NULL; - clear_kstat_irqs(desc); - raw_spin_unlock_irqrestore(&desc->lock, flags); -} - -/** - * dynamic_irq_cleanup - cleanup a dynamically allocated irq - * @irq: irq number to initialize - */ -void dynamic_irq_cleanup(unsigned int irq) -{ - dynamic_irq_cleanup_x(irq, false); -} - -/** - * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq - * @irq: irq number to initialize - * - * does not set irq_to_desc(irq)->irq_data.chip_data to NULL - */ -void dynamic_irq_cleanup_keep_chip_data(unsigned int irq) -{ - dynamic_irq_cleanup_x(irq, true); -} - - /** * set_irq_chip - set the irq chip for an irq * @irq: irq number diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 8eb01e379cc..f444203a772 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -20,7 +20,6 @@ extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); extern struct lock_class_key irq_desc_lock_class; extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); -extern void clear_kstat_irqs(struct irq_desc *desc); extern raw_spinlock_t sparse_irq_lock; /* Resending of interrupts :*/ diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 6c71f8ea5d7..c9d5a1c1287 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -53,12 +53,21 @@ static void desc_smp_init(struct irq_desc *desc, int node) { desc->irq_data.node = node; cpumask_copy(desc->irq_data.affinity, irq_default_affinity); +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_clear(desc->pending_mask); +#endif +} + +static inline int desc_node(struct irq_desc *desc) +{ + return desc->irq_data.node; } #else static inline int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } static inline void desc_smp_init(struct irq_desc *desc, int node) { } +static inline int desc_node(struct irq_desc *desc) { return 0; } #endif static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) @@ -71,6 +80,8 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) desc->status = IRQ_DEFAULT_INIT_FLAGS; desc->handle_irq = handle_bad_irq; desc->depth = 1; + desc->irq_count = 0; + desc->irqs_unhandled = 0; desc->name = NULL; memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); desc_smp_init(desc, node); @@ -286,23 +297,9 @@ struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) return irq_to_desc(irq); } -#ifdef CONFIG_SMP -static inline int desc_node(struct irq_desc *desc) -{ - return desc->irq_data.node; -} -#else -static inline int desc_node(struct irq_desc *desc) { return 0; } -#endif - static void free_desc(unsigned int irq) { - struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - - raw_spin_lock_irqsave(&desc->lock, flags); - desc_set_defaults(irq, desc, desc_node(desc)); - raw_spin_unlock_irqrestore(&desc->lock, flags); + dynamic_irq_cleanup(irq); } static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) @@ -409,10 +406,18 @@ unsigned int irq_get_next_irq(unsigned int offset) return find_next_bit(allocated_irqs, nr_irqs, offset); } -/* Statistics access */ -void clear_kstat_irqs(struct irq_desc *desc) +/** + * dynamic_irq_cleanup - cleanup a dynamically allocated irq + * @irq: irq number to initialize + */ +void dynamic_irq_cleanup(unsigned int irq) { - memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + raw_spin_lock_irqsave(&desc->lock, flags); + desc_set_defaults(irq, desc, desc_node(desc)); + raw_spin_unlock_irqrestore(&desc->lock, flags); } unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) -- cgit v1.2.3-70-g09d2 From 78f90d91f395cd0dc1ef3f21e0c5cd6fd50d202c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Sep 2010 17:18:47 +0200 Subject: genirq: Remove the now unused sparse irq leftovers The move_irq_desc() function was only used due to the problem that the allocator did not free the old descriptors. So the descriptors had to be moved in create_irq_nr(). That's history. The code would have never been able to move active interrupt descriptors on affinity settings. That can be done in a completely different way w/o all this horror. Remove all of it. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/irqdesc.h | 12 +---- kernel/irq/Kconfig | 5 -- kernel/irq/Makefile | 1 - kernel/irq/internals.h | 102 --------------------------------------- kernel/irq/irqdesc.c | 30 +----------- kernel/irq/numa_migrate.c | 120 ---------------------------------------------- 6 files changed, 4 insertions(+), 266 deletions(-) delete mode 100644 kernel/irq/numa_migrate.c (limited to 'kernel') diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index f77dc5618d7..979c68cc745 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -82,24 +82,16 @@ struct irq_desc { const char *name; } ____cacheline_internodealigned_in_smp; -extern void arch_init_copy_chip_data(struct irq_desc *old_desc, - struct irq_desc *desc, int node); -extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); - #ifndef CONFIG_SPARSE_IRQ extern struct irq_desc irq_desc[NR_IRQS]; #endif -#ifdef CONFIG_NUMA_IRQ_DESC -extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); -#else +/* Will be removed once the last users in power and sh are gone */ +extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) { return desc; } -#endif - -extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); #ifdef CONFIG_GENERIC_HARDIRQS diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index a42c0191d71..31d766bf5d2 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -26,11 +26,6 @@ config GENERIC_IRQ_PROBE config GENERIC_PENDING_IRQ def_bool n -if SPARSE_IRQ && NUMA -config NUMA_IRQ_DESC - def_bool n -endif - config AUTO_IRQ_AFFINITY def_bool n diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 1eaab0da56d..54329cd7b3e 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile @@ -3,5 +3,4 @@ obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devr obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o -obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o obj-$(CONFIG_PM_SLEEP) += pm.o diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index f444203a772..4571ae7e085 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -18,17 +18,11 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); -extern struct lock_class_key irq_desc_lock_class; extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); -extern raw_spinlock_t sparse_irq_lock; /* Resending of interrupts :*/ void check_irq_resend(struct irq_desc *desc, unsigned int irq); -#ifdef CONFIG_SPARSE_IRQ -void replace_irq_desc(unsigned int irq, struct irq_desc *desc); -#endif - #ifdef CONFIG_PROC_FS extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); @@ -110,99 +104,3 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) #undef P -/* Stuff below will be cleaned up after the sparse allocator is done */ - -#ifdef CONFIG_SMP -/** - * alloc_desc_masks - allocate cpumasks for irq_desc - * @desc: pointer to irq_desc struct - * @node: node which will be handling the cpumasks - * @boot: true if need bootmem - * - * Allocates affinity and pending_mask cpumask if required. - * Returns true if successful (or not required). - */ -static inline bool alloc_desc_masks(struct irq_desc *desc, int node, - bool boot) -{ - gfp_t gfp = GFP_ATOMIC; - - if (boot) - gfp = GFP_NOWAIT; - -#ifdef CONFIG_CPUMASK_OFFSTACK - if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) - return false; - -#ifdef CONFIG_GENERIC_PENDING_IRQ - if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { - free_cpumask_var(desc->irq_data.affinity); - return false; - } -#endif -#endif - return true; -} - -static inline void init_desc_masks(struct irq_desc *desc) -{ - cpumask_setall(desc->irq_data.affinity); -#ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_clear(desc->pending_mask); -#endif -} - -/** - * init_copy_desc_masks - copy cpumasks for irq_desc - * @old_desc: pointer to old irq_desc struct - * @new_desc: pointer to new irq_desc struct - * - * Insures affinity and pending_masks are copied to new irq_desc. - * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the - * irq_desc struct so the copy is redundant. - */ - -static inline void init_copy_desc_masks(struct irq_desc *old_desc, - struct irq_desc *new_desc) -{ -#ifdef CONFIG_CPUMASK_OFFSTACK - cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity); - -#ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); -#endif -#endif -} - -static inline void free_desc_masks(struct irq_desc *old_desc, - struct irq_desc *new_desc) -{ - free_cpumask_var(old_desc->irq_data.affinity); - -#ifdef CONFIG_GENERIC_PENDING_IRQ - free_cpumask_var(old_desc->pending_mask); -#endif -} - -#else /* !CONFIG_SMP */ - -static inline bool alloc_desc_masks(struct irq_desc *desc, int node, - bool boot) -{ - return true; -} - -static inline void init_desc_masks(struct irq_desc *desc) -{ -} - -static inline void init_copy_desc_masks(struct irq_desc *old_desc, - struct irq_desc *new_desc) -{ -} - -static inline void free_desc_masks(struct irq_desc *old_desc, - struct irq_desc *new_desc) -{ -} -#endif /* CONFIG_SMP */ diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index c9d5a1c1287..4f0b9c9d5c4 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -20,7 +20,7 @@ /* * lockdep: we want to handle all irq_desc locks as a single lock-class: */ -struct lock_class_key irq_desc_lock_class; +static struct lock_class_key irq_desc_lock_class; #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) static void __init init_irq_default_affinity(void) @@ -90,28 +90,11 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) int nr_irqs = NR_IRQS; EXPORT_SYMBOL_GPL(nr_irqs); -DEFINE_RAW_SPINLOCK(sparse_irq_lock); +static DEFINE_RAW_SPINLOCK(sparse_irq_lock); static DECLARE_BITMAP(allocated_irqs, NR_IRQS); #ifdef CONFIG_SPARSE_IRQ -void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) -{ - void *ptr; - - ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), - GFP_ATOMIC, node); - - /* - * don't overwite if can not get new one - * init_copy_kstat_irqs() could still use old one - */ - if (ptr) { - printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); - desc->kstat_irqs = ptr; - } -} - static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) @@ -124,15 +107,6 @@ struct irq_desc *irq_to_desc(unsigned int irq) return radix_tree_lookup(&irq_desc_tree, irq); } -void replace_irq_desc(unsigned int irq, struct irq_desc *desc) -{ - void **ptr; - - ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); - if (ptr) - radix_tree_replace_slot(ptr, desc); -} - static void delete_irq_desc(unsigned int irq) { radix_tree_delete(&irq_desc_tree, irq); diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c deleted file mode 100644 index e7f1f16402c..00000000000 --- a/kernel/irq/numa_migrate.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * NUMA irq-desc migration code - * - * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to - * the new "home node" of the IRQ. - */ - -#include -#include -#include -#include -#include -#include - -#include "internals.h" - -static void init_copy_kstat_irqs(struct irq_desc *old_desc, - struct irq_desc *desc, - int node, int nr) -{ - init_kstat_irqs(desc, node, nr); - - if (desc->kstat_irqs != old_desc->kstat_irqs) - memcpy(desc->kstat_irqs, old_desc->kstat_irqs, - nr * sizeof(*desc->kstat_irqs)); -} - -static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) -{ - if (old_desc->kstat_irqs == desc->kstat_irqs) - return; - - kfree(old_desc->kstat_irqs); - old_desc->kstat_irqs = NULL; -} - -static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, - struct irq_desc *desc, int node) -{ - memcpy(desc, old_desc, sizeof(struct irq_desc)); - if (!alloc_desc_masks(desc, node, false)) { - printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " - "for migration.\n", irq); - return false; - } - raw_spin_lock_init(&desc->lock); - desc->irq_data.node = node; - lockdep_set_class(&desc->lock, &irq_desc_lock_class); - init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); - init_copy_desc_masks(old_desc, desc); - arch_init_copy_chip_data(old_desc, desc, node); - return true; -} - -static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) -{ - free_kstat_irqs(old_desc, desc); - free_desc_masks(old_desc, desc); - arch_free_chip_data(old_desc, desc); -} - -static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, - int node) -{ - struct irq_desc *desc; - unsigned int irq; - unsigned long flags; - - irq = old_desc->irq_data.irq; - - raw_spin_lock_irqsave(&sparse_irq_lock, flags); - - /* We have to check it to avoid races with another CPU */ - desc = irq_to_desc(irq); - - if (desc && old_desc != desc) - goto out_unlock; - - desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); - if (!desc) { - printk(KERN_ERR "irq %d: can not get new irq_desc " - "for migration.\n", irq); - /* still use old one */ - desc = old_desc; - goto out_unlock; - } - if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) { - /* still use old one */ - kfree(desc); - desc = old_desc; - goto out_unlock; - } - - replace_irq_desc(irq, desc); - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); - - /* free the old one */ - free_one_irq_desc(old_desc, desc); - kfree(old_desc); - - return desc; - -out_unlock: - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); - - return desc; -} - -struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) -{ - /* those static or target node is -1, do not move them */ - if (desc->irq_data.irq < NR_IRQS_LEGACY || node == -1) - return desc; - - if (desc->irq_data.node != node) - desc = __real_move_irq_desc(desc, node); - - return desc; -} - -- cgit v1.2.3-70-g09d2 From a05a900a51c7622ebd8ddb41f14f8bf9db599d8d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 8 Oct 2010 12:47:53 +0200 Subject: genirq: Make sparse_lock a mutex No callers from atomic regions. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- kernel/irq/irqdesc.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 4f0b9c9d5c4..0e302f90d2e 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -90,7 +90,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) int nr_irqs = NR_IRQS; EXPORT_SYMBOL_GPL(nr_irqs); -static DEFINE_RAW_SPINLOCK(sparse_irq_lock); +static DEFINE_MUTEX(sparse_irq_lock); static DECLARE_BITMAP(allocated_irqs, NR_IRQS); #ifdef CONFIG_SPARSE_IRQ @@ -159,13 +159,12 @@ err_desc: static void free_desc(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; unregister_irq_proc(irq, desc); - raw_spin_lock_irqsave(&sparse_irq_lock, flags); + mutex_lock(&sparse_irq_lock); delete_irq_desc(irq); - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + mutex_unlock(&sparse_irq_lock); free_masks(desc); kfree(desc->kstat_irqs); @@ -175,16 +174,15 @@ static void free_desc(unsigned int irq) static int alloc_descs(unsigned int start, unsigned int cnt, int node) { struct irq_desc *desc; - unsigned long flags; int i; for (i = 0; i < cnt; i++) { desc = alloc_desc(start + i, node); if (!desc) goto err; - raw_spin_lock_irqsave(&sparse_irq_lock, flags); + mutex_lock(&sparse_irq_lock); irq_insert_desc(start + i, desc); - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + mutex_unlock(&sparse_irq_lock); } return start; @@ -192,9 +190,9 @@ err: for (i--; i >= 0; i--) free_desc(start + i); - raw_spin_lock_irqsave(&sparse_irq_lock, flags); + mutex_lock(&sparse_irq_lock); bitmap_clear(allocated_irqs, start, cnt); - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + mutex_unlock(&sparse_irq_lock); return -ENOMEM; } @@ -291,7 +289,6 @@ static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) */ void irq_free_descs(unsigned int from, unsigned int cnt) { - unsigned long flags; int i; if (from >= nr_irqs || (from + cnt) > nr_irqs) @@ -300,9 +297,9 @@ void irq_free_descs(unsigned int from, unsigned int cnt) for (i = 0; i < cnt; i++) free_desc(from + i); - raw_spin_lock_irqsave(&sparse_irq_lock, flags); + mutex_lock(&sparse_irq_lock); bitmap_clear(allocated_irqs, from, cnt); - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + mutex_unlock(&sparse_irq_lock); } /** @@ -317,13 +314,12 @@ void irq_free_descs(unsigned int from, unsigned int cnt) int __ref irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) { - unsigned long flags; int start, ret; if (!cnt) return -EINVAL; - raw_spin_lock_irqsave(&sparse_irq_lock, flags); + mutex_lock(&sparse_irq_lock); start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); ret = -EEXIST; @@ -335,11 +331,11 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) goto err; bitmap_set(allocated_irqs, start, cnt); - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + mutex_unlock(&sparse_irq_lock); return alloc_descs(start, cnt, node); err: - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + mutex_unlock(&sparse_irq_lock); return ret; } @@ -352,20 +348,19 @@ err: */ int irq_reserve_irqs(unsigned int from, unsigned int cnt) { - unsigned long flags; unsigned int start; int ret = 0; if (!cnt || (from + cnt) > nr_irqs) return -EINVAL; - raw_spin_lock_irqsave(&sparse_irq_lock, flags); + mutex_lock(&sparse_irq_lock); start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); if (start == from) bitmap_set(allocated_irqs, start, cnt); else ret = -EEXIST; - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + mutex_unlock(&sparse_irq_lock); return ret; } -- cgit v1.2.3-70-g09d2 From baa0d233afe765daa6dc01ff233aea8c5944f534 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 5 Oct 2010 15:14:35 +0200 Subject: genirq: Switch sparse_irq allocator to GFP_KERNEL The allocator functions are now called outside of preempt disabled regions. Switch to GFP_KERNEL. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- kernel/irq/irqdesc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 0e302f90d2e..f6f660cef7a 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -95,7 +95,7 @@ static DECLARE_BITMAP(allocated_irqs, NR_IRQS); #ifdef CONFIG_SPARSE_IRQ -static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); +static RADIX_TREE(irq_desc_tree, GFP_KERNEL); static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) { @@ -126,9 +126,8 @@ static inline void free_masks(struct irq_desc *desc) { } static struct irq_desc *alloc_desc(int irq, int node) { - /* Temporary hack until we can switch to GFP_KERNEL */ - gfp_t gfp = gfp_allowed_mask == GFP_BOOT_MASK ? GFP_NOWAIT : GFP_ATOMIC; struct irq_desc *desc; + gfp_t gfp = GFP_KERNEL; desc = kzalloc_node(sizeof(*desc), gfp, node); if (!desc) -- cgit v1.2.3-70-g09d2 From 5b8c4f23c54edda640a4850bc9b81dee70acb525 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Sep 2010 14:33:43 +0000 Subject: printk: Make console_sem a semaphore not a pseudo mutex It needs to be investigated whether it can be replaced by a real mutex, but that needs more thought. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Christoph Hellwig LKML-Reference: <20100907125057.179587334@linutronix.de> --- kernel/printk.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index 8fe465ac008..2531017795f 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -85,7 +85,7 @@ EXPORT_SYMBOL(oops_in_progress); * provides serialisation for access to the entire console * driver system. */ -static DECLARE_MUTEX(console_sem); +static DEFINE_SEMAPHORE(console_sem); struct console *console_drivers; EXPORT_SYMBOL_GPL(console_drivers); @@ -556,7 +556,7 @@ static void zap_locks(void) /* If a crash is occurring, make sure we can't deadlock */ spin_lock_init(&logbuf_lock); /* And make sure that we print immediately */ - init_MUTEX(&console_sem); + sema_init(&console_sem, 1); } #if defined(CONFIG_PRINTK_TIME) -- cgit v1.2.3-70-g09d2 From c0a19ebc018222ffd1dd93af5b53d9efd779c19b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 12 Oct 2010 21:58:27 +0200 Subject: genirq: Fix CONFIG_GENIRQ_NO_DEPRECATED=y build This option can be set to verify the full conversion to the new chip functions. Fix the fallout of the patch rework, so the core code compiles and works with it. Signed-off-by: Thomas Gleixner --- kernel/irq/dummychip.c | 2 +- kernel/irq/irqdesc.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c index 918dea9de9e..20dc5474947 100644 --- a/kernel/irq/dummychip.c +++ b/kernel/irq/dummychip.c @@ -31,7 +31,7 @@ static unsigned int noop_ret(struct irq_data *data) return 0; } -#ifndef CONFIG_GENERIC_HARDIRQS_NO_CRUFT +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED static void compat_noop(unsigned int irq) { } #define END_INIT .end = compat_noop #else diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index f6f660cef7a..9d917ff7267 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -118,7 +118,7 @@ static void free_masks(struct irq_desc *desc) #ifdef CONFIG_GENERIC_PENDING_IRQ free_cpumask_var(desc->pending_mask); #endif - free_cpumask_var(desc->affinity); + free_cpumask_var(desc->irq_data.affinity); } #else static inline void free_masks(struct irq_desc *desc) { } -- cgit v1.2.3-70-g09d2 From 14cae9bd2faf6d0d75702c2e107e75207bcdfec1 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 29 Sep 2010 10:08:23 +0200 Subject: tracing: Fix function-graph build warning on 32-bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix kernel/trace/trace_functions_graph.c: In function ‘trace_print_graph_duration’: kernel/trace/trace_functions_graph.c:652: warning: comparison of distinct pointer types lacks a cast when building 36-rc6 on a 32-bit due to the strict type check failing in the min() macro. Signed-off-by: Borislav Petkov Cc: Chase Douglas Cc: Steven Rostedt Cc: Ingo Molnar LKML-Reference: <20100929080823.GA13595@liondog.tnic> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_functions_graph.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 02c708ae0d4..ef49e9370b2 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -665,8 +665,9 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) /* Print nsecs (we don't want to exceed 7 numbers) */ if (len < 7) { - snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu", - nsecs_rem); + size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); + + snprintf(nsecs_str, slen, "%03lu", nsecs_rem); ret = trace_seq_printf(s, ".%s", nsecs_str); if (!ret) return TRACE_TYPE_PARTIAL_LINE; -- cgit v1.2.3-70-g09d2 From fd02e6f7ae085840d43d780149fcf95a614eca5e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 14 Oct 2010 12:10:24 +0900 Subject: kprobes: Fix selftest to clear flags field for reusing probes Fix selftest to clear flags field for reusing probes because the flags field can be modified by Kprobes. This also set NULL to kprobe.addr instead of 0. Signed-off-by: Masami Hiramatsu Cc: Rusty Russell Cc: Ananth N Mavinakayanahalli Cc: 2nddept-manager@sdl.hitachi.co.jp LKML-Reference: <20101014031024.4100.50107.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Ingo Molnar --- kernel/test_kprobes.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c index 4f104515a19..f8b11a28317 100644 --- a/kernel/test_kprobes.c +++ b/kernel/test_kprobes.c @@ -115,7 +115,9 @@ static int test_kprobes(void) int ret; struct kprobe *kps[2] = {&kp, &kp2}; - kp.addr = 0; /* addr should be cleard for reusing kprobe. */ + /* addr and flags should be cleard for reusing kprobe. */ + kp.addr = NULL; + kp.flags = 0; ret = register_kprobes(kps, 2); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " @@ -210,7 +212,9 @@ static int test_jprobes(void) int ret; struct jprobe *jps[2] = {&jp, &jp2}; - jp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */ + /* addr and flags should be cleard for reusing kprobe. */ + jp.kp.addr = NULL; + jp.kp.flags = 0; ret = register_jprobes(jps, 2); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " @@ -323,7 +327,9 @@ static int test_kretprobes(void) int ret; struct kretprobe *rps[2] = {&rp, &rp2}; - rp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */ + /* addr and flags should be cleard for reusing kprobe. */ + rp.kp.addr = NULL; + rp.kp.flags = 0; ret = register_kretprobes(rps, 2); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " -- cgit v1.2.3-70-g09d2 From fb62db2ba943b1683f1d7181bb2988fce4c60870 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 13 Oct 2010 11:02:34 -0700 Subject: futex: Fix kernel-doc notation & typos Convert futex_requeue() function parameters to use @name kernel-doc notation and add @fshared & @cmpval to prevent kernel-doc warnings. Add @list to struct futex_q. Fix a few typos. Signed-off-by: Randy Dunlap Acked-by: Rusty Russell LKML-Reference: <20101013110234.89b06043.randy.dunlap@oracle.com> Signed-off-by: Ingo Molnar --- kernel/futex.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 92a31d4cd56..9b9fda73ba2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -91,6 +91,7 @@ struct futex_pi_state { /** * struct futex_q - The hashed futex queue entry, one per waiting task + * @list: priority-sorted list of tasks waiting on this futex * @task: the task waiting on the futex * @lock_ptr: the hash bucket lock * @key: the key the futex is hashed on @@ -104,7 +105,7 @@ struct futex_pi_state { * * A futex_q has a woken state, just like tasks have TASK_RUNNING. * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. - * The order of wakup is always to make the first condition true, then + * The order of wakeup is always to make the first condition true, then * the second. * * PI futexes are typically woken before they are removed from the hash list via @@ -295,7 +296,7 @@ void put_futex_key(int fshared, union futex_key *key) * Slow path to fixup the fault we just took in the atomic write * access to @uaddr. * - * We have no generic implementation of a non destructive write to the + * We have no generic implementation of a non-destructive write to the * user address. We know that we faulted in the atomic pagefault * disabled section so we can as well avoid the #PF overhead by * calling get_user_pages() right away. @@ -515,7 +516,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, */ pi_state = this->pi_state; /* - * Userspace might have messed up non PI and PI futexes + * Userspace might have messed up non-PI and PI futexes */ if (unlikely(!pi_state)) return -EINVAL; @@ -736,8 +737,8 @@ static void wake_futex(struct futex_q *q) /* * We set q->lock_ptr = NULL _before_ we wake up the task. If - * a non futex wake up happens on another CPU then the task - * might exit and p would dereference a non existing task + * a non-futex wake up happens on another CPU then the task + * might exit and p would dereference a non-existing task * struct. Prevent this by holding a reference on p across the * wake up. */ @@ -1131,11 +1132,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, /** * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 - * uaddr1: source futex user address - * uaddr2: target futex user address - * nr_wake: number of waiters to wake (must be 1 for requeue_pi) - * nr_requeue: number of waiters to requeue (0-INT_MAX) - * requeue_pi: if we are attempting to requeue from a non-pi futex to a + * @uaddr1: source futex user address + * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED + * @uaddr2: target futex user address + * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) + * @nr_requeue: number of waiters to requeue (0-INT_MAX) + * @cmpval: @uaddr1 expected value (or %NULL) + * @requeue_pi: if we are attempting to requeue from a non-pi futex to a * pi futex (pi to pi requeue is not supported) * * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire @@ -2651,7 +2654,7 @@ static int __init futex_init(void) * of the complex code paths. Also we want to prevent * registration of robust lists in that case. NULL is * guaranteed to fault and we get -EFAULT on functional - * implementation, the non functional ones will return + * implementation, the non-functional ones will return * -ENOSYS. */ curval = cmpxchg_futex_value_locked(NULL, 0, 0); -- cgit v1.2.3-70-g09d2 From 864616ee6785d9fac7a2cd80c01a2da89579f2e4 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Thu, 14 Oct 2010 16:09:13 +0900 Subject: sched: Comment updates: fix default latency and granularity numbers Targeted preemption latency and minimal preemption granularity for CPU-bound tasks have been changed. This patch updates the comments about these values. Signed-off-by: Takuya Yoshikawa Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <20101014160913.eb24fef4.yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 623e9aceef8..bf87192e97f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -25,7 +25,7 @@ /* * Targeted preemption latency for CPU-bound tasks: - * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds) + * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) * * NOTE: this latency value is not the same as the concept of * 'timeslice length' - timeslices in CFS are of variable length @@ -52,7 +52,7 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling /* * Minimal preemption granularity for CPU-bound tasks: - * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) + * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) */ unsigned int sysctl_sched_min_granularity = 750000ULL; unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; -- cgit v1.2.3-70-g09d2 From 72441cb1fd77d092f09ddfac748955703884c9a7 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 13 Oct 2010 17:12:30 -0400 Subject: ftrace/x86: Add support for C version of recordmcount This patch adds the support for the C version of recordmcount and compile times show ~ 12% improvement. After verifying this works, other archs can add: HAVE_C_MCOUNT_RECORD in its Kconfig and it will use the C version of recordmcount instead of the perl version. Cc: Cc: Michal Marek Cc: linux-kbuild@vger.kernel.org Cc: John Reiser Signed-off-by: Steven Rostedt --- Makefile | 6 ++++++ arch/x86/Kconfig | 1 + kernel/trace/Kconfig | 5 +++++ scripts/Makefile | 1 + scripts/Makefile.build | 4 ++++ 5 files changed, 17 insertions(+) (limited to 'kernel') diff --git a/Makefile b/Makefile index 534c09c255d..0dd3a8d9313 100644 --- a/Makefile +++ b/Makefile @@ -568,6 +568,12 @@ endif ifdef CONFIG_FUNCTION_TRACER KBUILD_CFLAGS += -pg +ifdef CONFIG_DYNAMIC_FTRACE + ifdef CONFIG_HAVE_C_MCOUNT_RECORD + BUILD_C_RECORDMCOUNT := y + export BUILD_C_RECORDMCOUNT + endif +endif endif # We trigger additional mismatches with less inlining diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c14d8b4d2f7..788b50ef5fc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -33,6 +33,7 @@ config X86 select HAVE_KRETPROBES select HAVE_OPTPROBES select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_C_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 538501c6ea5..df00fbbaf60 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -49,6 +49,11 @@ config HAVE_SYSCALL_TRACEPOINTS help See Documentation/trace/ftrace-design.txt +config HAVE_C_MCOUNT_RECORD + bool + help + C version of recordmcount available? + config TRACER_MAX_TRACE bool diff --git a/scripts/Makefile b/scripts/Makefile index 842dbc2d5ae..2e088109fbd 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -11,6 +11,7 @@ hostprogs-$(CONFIG_KALLSYMS) += kallsyms hostprogs-$(CONFIG_LOGO) += pnmtologo hostprogs-$(CONFIG_VT) += conmakehash hostprogs-$(CONFIG_IKCONFIG) += bin2c +hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount always := $(hostprogs-y) $(hostprogs-m) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index a1a5cf95a68..4d03a7efc68 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -209,12 +209,16 @@ cmd_modversions = \ endif ifdef CONFIG_FTRACE_MCOUNT_RECORD +ifdef BUILD_C_RECORDMCOUNT +cmd_record_mcount = $(srctree)/scripts/recordmcount "$(@)"; +else cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \ "$(if $(CONFIG_64BIT),64,32)" \ "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \ "$(if $(part-of-module),1,0)" "$(@)"; endif +endif define rule_cc_o_c $(call echo-cmd,checksrc) $(cmd_checksrc) \ -- cgit v1.2.3-70-g09d2 From cf4db2597ae93b60efc0a7a4ec08690b75d629b1 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 14 Oct 2010 23:32:44 -0400 Subject: ftrace: Rename config option HAVE_C_MCOUNT_RECORD to HAVE_C_RECORDMCOUNT The config option used by archs to let the build system know that the C version of the recordmcount works for said arch is currently called HAVE_C_MCOUNT_RECORD which enables BUILD_C_RECORDMCOUNT. To be more consistent with the name that all archs may use, it has been renamed to HAVE_C_RECORDMCOUNT. This will be less confusing since we are building a C recordmcount and not a mcount_record. Suggested-by: Ingo Molnar Cc: Cc: Michal Marek Cc: linux-kbuild@vger.kernel.org Cc: John Reiser Signed-off-by: Steven Rostedt --- Makefile | 2 +- arch/x86/Kconfig | 2 +- kernel/trace/Kconfig | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/Makefile b/Makefile index 0dd3a8d9313..c2362c27300 100644 --- a/Makefile +++ b/Makefile @@ -569,7 +569,7 @@ endif ifdef CONFIG_FUNCTION_TRACER KBUILD_CFLAGS += -pg ifdef CONFIG_DYNAMIC_FTRACE - ifdef CONFIG_HAVE_C_MCOUNT_RECORD + ifdef CONFIG_HAVE_C_RECORDMCOUNT BUILD_C_RECORDMCOUNT := y export BUILD_C_RECORDMCOUNT endif diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 788b50ef5fc..9815221976a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -33,7 +33,7 @@ config X86 select HAVE_KRETPROBES select HAVE_OPTPROBES select HAVE_FTRACE_MCOUNT_RECORD - select HAVE_C_MCOUNT_RECORD + select HAVE_C_RECORDMCOUNT select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index df00fbbaf60..e550d2eda1d 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -49,7 +49,7 @@ config HAVE_SYSCALL_TRACEPOINTS help See Documentation/trace/ftrace-design.txt -config HAVE_C_MCOUNT_RECORD +config HAVE_C_RECORDMCOUNT bool help C version of recordmcount available? -- cgit v1.2.3-70-g09d2 From 6038f373a3dc1f1c26496e60b6c40b164716f07e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sun, 15 Aug 2010 18:52:59 +0200 Subject: llseek: automatically add .llseek fop All file_operations should get a .llseek operation so we can make nonseekable_open the default for future file operations without a .llseek pointer. The three cases that we can automatically detect are no_llseek, seq_lseek and default_llseek. For cases where we can we can automatically prove that the file offset is always ignored, we use noop_llseek, which maintains the current behavior of not returning an error from a seek. New drivers should normally not use noop_llseek but instead use no_llseek and call nonseekable_open at open time. Existing drivers can be converted to do the same when the maintainer knows for certain that no user code relies on calling seek on the device file. The generated code is often incorrectly indented and right now contains comments that clarify for each added line why a specific variant was chosen. In the version that gets submitted upstream, the comments will be gone and I will manually fix the indentation, because there does not seem to be a way to do that using coccinelle. Some amount of new code is currently sitting in linux-next that should get the same modifications, which I will do at the end of the merge window. Many thanks to Julia Lawall for helping me learn to write a semantic patch that does all this. ===== begin semantic patch ===== // This adds an llseek= method to all file operations, // as a preparation for making no_llseek the default. // // The rules are // - use no_llseek explicitly if we do nonseekable_open // - use seq_lseek for sequential files // - use default_llseek if we know we access f_pos // - use noop_llseek if we know we don't access f_pos, // but we still want to allow users to call lseek // @ open1 exists @ identifier nested_open; @@ nested_open(...) { <+... nonseekable_open(...) ...+> } @ open exists@ identifier open_f; identifier i, f; identifier open1.nested_open; @@ int open_f(struct inode *i, struct file *f) { <+... ( nonseekable_open(...) | nested_open(...) ) ...+> } @ read disable optional_qualifier exists @ identifier read_f; identifier f, p, s, off; type ssize_t, size_t, loff_t; expression E; identifier func; @@ ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off) { <+... ( *off = E | *off += E | func(..., off, ...) | E = *off ) ...+> } @ read_no_fpos disable optional_qualifier exists @ identifier read_f; identifier f, p, s, off; type ssize_t, size_t, loff_t; @@ ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off) { ... when != off } @ write @ identifier write_f; identifier f, p, s, off; type ssize_t, size_t, loff_t; expression E; identifier func; @@ ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off) { <+... ( *off = E | *off += E | func(..., off, ...) | E = *off ) ...+> } @ write_no_fpos @ identifier write_f; identifier f, p, s, off; type ssize_t, size_t, loff_t; @@ ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off) { ... when != off } @ fops0 @ identifier fops; @@ struct file_operations fops = { ... }; @ has_llseek depends on fops0 @ identifier fops0.fops; identifier llseek_f; @@ struct file_operations fops = { ... .llseek = llseek_f, ... }; @ has_read depends on fops0 @ identifier fops0.fops; identifier read_f; @@ struct file_operations fops = { ... .read = read_f, ... }; @ has_write depends on fops0 @ identifier fops0.fops; identifier write_f; @@ struct file_operations fops = { ... .write = write_f, ... }; @ has_open depends on fops0 @ identifier fops0.fops; identifier open_f; @@ struct file_operations fops = { ... .open = open_f, ... }; // use no_llseek if we call nonseekable_open //////////////////////////////////////////// @ nonseekable1 depends on !has_llseek && has_open @ identifier fops0.fops; identifier nso ~= "nonseekable_open"; @@ struct file_operations fops = { ... .open = nso, ... +.llseek = no_llseek, /* nonseekable */ }; @ nonseekable2 depends on !has_llseek @ identifier fops0.fops; identifier open.open_f; @@ struct file_operations fops = { ... .open = open_f, ... +.llseek = no_llseek, /* open uses nonseekable */ }; // use seq_lseek for sequential files ///////////////////////////////////// @ seq depends on !has_llseek @ identifier fops0.fops; identifier sr ~= "seq_read"; @@ struct file_operations fops = { ... .read = sr, ... +.llseek = seq_lseek, /* we have seq_read */ }; // use default_llseek if there is a readdir /////////////////////////////////////////// @ fops1 depends on !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier readdir_e; @@ // any other fop is used that changes pos struct file_operations fops = { ... .readdir = readdir_e, ... +.llseek = default_llseek, /* readdir is present */ }; // use default_llseek if at least one of read/write touches f_pos ///////////////////////////////////////////////////////////////// @ fops2 depends on !fops1 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier read.read_f; @@ // read fops use offset struct file_operations fops = { ... .read = read_f, ... +.llseek = default_llseek, /* read accesses f_pos */ }; @ fops3 depends on !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier write.write_f; @@ // write fops use offset struct file_operations fops = { ... .write = write_f, ... + .llseek = default_llseek, /* write accesses f_pos */ }; // Use noop_llseek if neither read nor write accesses f_pos /////////////////////////////////////////////////////////// @ fops4 depends on !fops1 && !fops2 && !fops3 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier read_no_fpos.read_f; identifier write_no_fpos.write_f; @@ // write fops use offset struct file_operations fops = { ... .write = write_f, .read = read_f, ... +.llseek = noop_llseek, /* read and write both use no f_pos */ }; @ depends on has_write && !has_read && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier write_no_fpos.write_f; @@ struct file_operations fops = { ... .write = write_f, ... +.llseek = noop_llseek, /* write uses no f_pos */ }; @ depends on has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier read_no_fpos.read_f; @@ struct file_operations fops = { ... .read = read_f, ... +.llseek = noop_llseek, /* read uses no f_pos */ }; @ depends on !has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; @@ struct file_operations fops = { ... +.llseek = noop_llseek, /* no read or write fn */ }; ===== End semantic patch ===== Signed-off-by: Arnd Bergmann Cc: Julia Lawall Cc: Christoph Hellwig --- arch/arm/kernel/etm.c | 1 + arch/arm/mach-msm/last_radio_log.c | 3 ++- arch/arm/mach-msm/smd_debug.c | 1 + arch/arm/plat-mxc/audmux-v2.c | 1 + arch/avr32/boards/mimc200/fram.c | 1 + arch/blackfin/kernel/kgdb_test.c | 1 + arch/blackfin/mach-bf561/coreb.c | 1 + arch/cris/arch-v10/drivers/ds1302.c | 1 + arch/cris/arch-v10/drivers/gpio.c | 1 + arch/cris/arch-v10/drivers/i2c.c | 1 + arch/cris/arch-v10/drivers/pcf8563.c | 1 + arch/cris/arch-v10/drivers/sync_serial.c | 3 ++- arch/cris/arch-v32/drivers/cryptocop.c | 3 ++- arch/cris/arch-v32/drivers/i2c.c | 1 + arch/cris/arch-v32/drivers/mach-a3/gpio.c | 1 + arch/cris/arch-v32/drivers/mach-fs/gpio.c | 1 + arch/cris/arch-v32/drivers/pcf8563.c | 1 + arch/cris/arch-v32/drivers/sync_serial.c | 3 ++- arch/cris/kernel/profile.c | 1 + arch/ia64/kernel/salinfo.c | 2 ++ arch/ia64/sn/kernel/sn2/sn_hwperf.c | 1 + arch/m68k/bvme6000/rtc.c | 1 + arch/m68k/mvme16x/rtc.c | 1 + arch/mips/kernel/rtlx.c | 3 ++- arch/mips/kernel/vpe.c | 3 ++- arch/mips/sibyte/common/sb_tbprof.c | 1 + arch/powerpc/kernel/lparcfg.c | 1 + arch/powerpc/kernel/rtas_flash.c | 3 +++ arch/powerpc/kernel/rtasd.c | 1 + arch/powerpc/platforms/iseries/mf.c | 1 + arch/powerpc/platforms/pseries/reconfig.c | 3 ++- arch/powerpc/platforms/pseries/scanlog.c | 1 + arch/s390/crypto/prng.c | 1 + arch/s390/hypfs/hypfs_diag.c | 1 + arch/s390/hypfs/hypfs_vm.c | 1 + arch/s390/hypfs/inode.c | 1 + arch/s390/kernel/debug.c | 1 + arch/sh/boards/mach-landisk/gio.c | 1 + arch/sparc/kernel/apc.c | 1 + arch/sparc/kernel/mdesc.c | 1 + arch/tile/kernel/hardwall.c | 1 + arch/um/drivers/harddog_kern.c | 1 + arch/um/drivers/mconsole_kern.c | 1 + arch/um/drivers/mmapper_kern.c | 1 + arch/um/drivers/random.c | 1 + arch/x86/kernel/apm_32.c | 1 + arch/x86/kernel/cpu/mcheck/mce-severity.c | 1 + arch/x86/kernel/cpu/mcheck/mce.c | 1 + arch/x86/kernel/kdebugfs.c | 1 + arch/x86/kernel/microcode_core.c | 1 + arch/x86/kernel/tlb_uv.c | 1 + arch/x86/xen/debugfs.c | 1 + block/bsg.c | 1 + drivers/acpi/apei/erst-dbg.c | 1 + drivers/acpi/debugfs.c | 1 + drivers/acpi/ec_sys.c | 1 + drivers/acpi/event.c | 1 + drivers/block/DAC960.c | 3 ++- drivers/block/aoe/aoechr.c | 1 + drivers/block/paride/pg.c | 1 + drivers/block/paride/pt.c | 1 + drivers/block/pktcdvd.c | 1 + drivers/bluetooth/btmrvl_debugfs.c | 10 +++++++ drivers/bluetooth/hci_vhci.c | 1 + drivers/char/apm-emulation.c | 1 + drivers/char/bfin-otp.c | 1 + drivers/char/briq_panel.c | 1 + drivers/char/bsr.c | 1 + drivers/char/cs5535_gpio.c | 3 ++- drivers/char/ds1302.c | 1 + drivers/char/ds1620.c | 1 + drivers/char/dsp56k.c | 1 + drivers/char/dtlk.c | 1 + drivers/char/genrtc.c | 1 + drivers/char/hw_random/core.c | 1 + drivers/char/ip2/ip2main.c | 1 + drivers/char/ipmi/ipmi_devintf.c | 1 + drivers/char/ipmi/ipmi_watchdog.c | 1 + drivers/char/istallion.c | 1 + drivers/char/lp.c | 1 + drivers/char/mem.c | 3 +++ drivers/char/misc.c | 1 + drivers/char/mmtimer.c | 1 + drivers/char/mspec.c | 9 ++++--- drivers/char/mwave/mwavedd.c | 3 ++- drivers/char/nwbutton.c | 1 + drivers/char/pc8736x_gpio.c | 1 + drivers/char/pcmcia/cm4000_cs.c | 1 + drivers/char/pcmcia/cm4040_cs.c | 1 + drivers/char/random.c | 2 ++ drivers/char/rio/rio_linux.c | 1 + drivers/char/scx200_gpio.c | 1 + drivers/char/snsc.c | 1 + drivers/char/stallion.c | 1 + drivers/char/sx.c | 1 + drivers/char/sysrq.c | 1 + drivers/char/tb0219.c | 1 + drivers/char/tlclk.c | 1 + drivers/char/toshiba.c | 1 + drivers/char/uv_mmtimer.c | 1 + drivers/char/xilinx_hwicap/xilinx_hwicap.c | 1 + drivers/dma/coh901318.c | 1 + drivers/firewire/nosy.c | 1 + drivers/gpu/drm/drm_drv.c | 3 ++- drivers/gpu/drm/i810/i810_dma.c | 1 + drivers/gpu/drm/i830/i830_dma.c | 1 + drivers/gpu/drm/i915/i915_debugfs.c | 1 + drivers/gpu/vga/vgaarb.c | 1 + drivers/hid/hid-debug.c | 1 + drivers/hid/hid-roccat.c | 1 + drivers/hid/hidraw.c | 1 + drivers/hid/usbhid/hiddev.c | 1 + drivers/hwmon/asus_atk0110.c | 1 + drivers/ide/ide-tape.c | 1 + drivers/idle/i7300_idle.c | 1 + drivers/infiniband/hw/ipath/ipath_diag.c | 4 ++- drivers/infiniband/hw/ipath/ipath_file_ops.c | 3 ++- drivers/infiniband/hw/ipath/ipath_fs.c | 3 +++ drivers/infiniband/hw/qib/qib_diag.c | 4 ++- drivers/infiniband/hw/qib/qib_file_ops.c | 3 ++- drivers/infiniband/hw/qib/qib_fs.c | 1 + drivers/input/evdev.c | 3 ++- drivers/input/input.c | 1 + drivers/input/joydev.c | 1 + drivers/input/misc/uinput.c | 1 + drivers/input/mousedev.c | 1 + drivers/input/serio/serio_raw.c | 1 + drivers/isdn/mISDN/timerdev.c | 1 + drivers/lguest/lguest_user.c | 1 + drivers/macintosh/ans-lcd.c | 1 + drivers/macintosh/via-pmu.c | 1 + drivers/md/dm-ioctl.c | 1 + drivers/media/IR/imon.c | 6 +++-- drivers/media/IR/lirc_dev.c | 1 + drivers/media/dvb/bt8xx/dst_ca.c | 3 ++- drivers/media/dvb/dvb-core/dmxdev.c | 2 ++ drivers/media/dvb/dvb-core/dvb_ca_en50221.c | 1 + drivers/media/dvb/dvb-core/dvb_frontend.c | 3 ++- drivers/media/dvb/dvb-core/dvb_net.c | 1 + drivers/media/dvb/dvb-core/dvbdev.c | 1 + drivers/media/dvb/firewire/firedtv-ci.c | 1 + drivers/media/dvb/ttpci/av7110.c | 1 + drivers/media/dvb/ttpci/av7110_av.c | 2 ++ drivers/media/dvb/ttpci/av7110_ca.c | 1 + drivers/media/dvb/ttpci/av7110_ir.c | 1 + drivers/mfd/ab3100-core.c | 1 + drivers/misc/hpilo.c | 1 + drivers/misc/phantom.c | 1 + drivers/misc/sgi-gru/grufile.c | 1 + drivers/mmc/core/debugfs.c | 1 + drivers/mtd/ubi/cdev.c | 1 + drivers/net/caif/caif_spi.c | 6 +++-- drivers/net/cxgb4/cxgb4_main.c | 1 + drivers/net/ppp_generic.c | 3 ++- drivers/net/wimax/i2400m/debugfs.c | 2 ++ drivers/net/wireless/airo.c | 24 +++++++++++------ drivers/net/wireless/ath/ath5k/debug.c | 7 +++++ drivers/net/wireless/ath/ath9k/debug.c | 33 ++++++++++++++++-------- drivers/net/wireless/ath/ath9k/htc_drv_main.c | 9 ++++--- drivers/net/wireless/iwlwifi/iwl-3945-rs.c | 1 + drivers/net/wireless/iwlwifi/iwl-agn-rs.c | 3 +++ drivers/net/wireless/iwmc3200wifi/debugfs.c | 4 +++ drivers/net/wireless/iwmc3200wifi/sdio.c | 1 + drivers/net/wireless/libertas/debugfs.c | 1 + drivers/net/wireless/ray_cs.c | 2 ++ drivers/net/wireless/rt2x00/rt2x00debug.c | 4 +++ drivers/net/wireless/wl12xx/wl1251_debugfs.c | 2 ++ drivers/net/wireless/wl12xx/wl1271_debugfs.c | 4 ++- drivers/oprofile/oprofile_files.c | 8 +++++- drivers/oprofile/oprofilefs.c | 3 +++ drivers/pci/pcie/aer/aer_inject.c | 1 + drivers/platform/x86/sony-laptop.c | 1 + drivers/rtc/rtc-m41t80.c | 1 + drivers/s390/block/dasd_eer.c | 1 + drivers/s390/char/fs3270.c | 1 + drivers/s390/char/monreader.c | 1 + drivers/s390/char/monwriter.c | 1 + drivers/s390/char/tape_char.c | 1 + drivers/s390/char/vmcp.c | 1 + drivers/s390/char/vmlogrdr.c | 1 + drivers/s390/char/vmwatchdog.c | 1 + drivers/s390/char/zcore.c | 2 ++ drivers/s390/cio/chsc_sch.c | 1 + drivers/s390/cio/css.c | 1 + drivers/s390/crypto/zcrypt_api.c | 3 ++- drivers/s390/scsi/zfcp_cfdc.c | 3 ++- drivers/sbus/char/display7seg.c | 1 + drivers/sbus/char/envctrl.c | 1 + drivers/scsi/3w-9xxx.c | 3 ++- drivers/scsi/3w-sas.c | 3 ++- drivers/scsi/3w-xxxx.c | 3 ++- drivers/scsi/aacraid/linit.c | 1 + drivers/scsi/ch.c | 1 + drivers/scsi/dpt_i2o.c | 1 + drivers/scsi/gdth.c | 1 + drivers/scsi/megaraid.c | 1 + drivers/scsi/megaraid/megaraid_mm.c | 1 + drivers/scsi/megaraid/megaraid_sas.c | 1 + drivers/scsi/mpt2sas/mpt2sas_ctl.c | 1 + drivers/scsi/osd/osd_uld.c | 1 + drivers/scsi/pmcraid.c | 1 + drivers/scsi/qla2xxx/qla_os.c | 1 + drivers/scsi/scsi_tgt_if.c | 1 + drivers/scsi/sg.c | 1 + drivers/serial/mfd.c | 2 ++ drivers/spi/dw_spi.c | 1 + drivers/spi/spidev.c | 1 + drivers/staging/comedi/comedi_fops.c | 1 + drivers/staging/crystalhd/crystalhd_lnx.c | 1 + drivers/staging/dream/camera/msm_camera.c | 3 +++ drivers/staging/dream/pmem.c | 2 ++ drivers/staging/dream/qdsp5/adsp_driver.c | 1 + drivers/staging/dream/qdsp5/audio_aac.c | 1 + drivers/staging/dream/qdsp5/audio_amrnb.c | 1 + drivers/staging/dream/qdsp5/audio_evrc.c | 1 + drivers/staging/dream/qdsp5/audio_in.c | 2 ++ drivers/staging/dream/qdsp5/audio_mp3.c | 1 + drivers/staging/dream/qdsp5/audio_out.c | 2 ++ drivers/staging/dream/qdsp5/audio_qcelp.c | 1 + drivers/staging/dream/qdsp5/evlog.h | 1 + drivers/staging/dream/qdsp5/snd.c | 1 + drivers/staging/frontier/alphatrack.c | 1 + drivers/staging/frontier/tranzport.c | 1 + drivers/staging/iio/industrialio-core.c | 1 + drivers/staging/iio/industrialio-ring.c | 1 + drivers/staging/lirc/lirc_imon.c | 3 ++- drivers/staging/lirc/lirc_it87.c | 1 + drivers/staging/lirc/lirc_sasem.c | 1 + drivers/staging/memrar/memrar_handler.c | 1 + drivers/staging/panel/panel.c | 1 + drivers/staging/tidspbridge/rmgr/drv_interface.c | 1 + drivers/telephony/ixj.c | 3 ++- drivers/telephony/phonedev.c | 1 + drivers/uio/uio.c | 1 + drivers/usb/class/cdc-wdm.c | 3 ++- drivers/usb/class/usblp.c | 1 + drivers/usb/class/usbtmc.c | 1 + drivers/usb/core/file.c | 1 + drivers/usb/gadget/f_hid.c | 1 + drivers/usb/gadget/printer.c | 3 ++- drivers/usb/host/ehci-dbg.c | 4 +++ drivers/usb/host/ohci-dbg.c | 3 +++ drivers/usb/image/mdc800.c | 1 + drivers/usb/misc/adutux.c | 1 + drivers/usb/misc/idmouse.c | 1 + drivers/usb/misc/iowarrior.c | 1 + drivers/usb/misc/ldusb.c | 1 + drivers/usb/misc/rio500.c | 1 + drivers/usb/misc/usblcd.c | 1 + drivers/usb/usb-skeleton.c | 1 + drivers/vhost/net.c | 1 + drivers/video/fbmem.c | 1 + drivers/video/mbx/mbxdebugfs.c | 6 +++++ drivers/watchdog/ar7_wdt.c | 1 + drivers/watchdog/cpwd.c | 1 + drivers/watchdog/ep93xx_wdt.c | 1 + drivers/watchdog/omap_wdt.c | 1 + drivers/xen/evtchn.c | 1 + drivers/xen/xenfs/super.c | 1 + drivers/xen/xenfs/xenbus.c | 1 + fs/afs/mntpt.c | 1 + fs/autofs4/dev-ioctl.c | 1 + fs/binfmt_misc.c | 3 +++ fs/btrfs/super.c | 1 + fs/cachefiles/daemon.c | 1 + fs/char_dev.c | 1 + fs/coda/pioctl.c | 1 + fs/coda/psdev.c | 1 + fs/debugfs/file.c | 3 +++ fs/dlm/debug_fs.c | 3 ++- fs/dlm/plock.c | 3 ++- fs/dlm/user.c | 3 +++ fs/ecryptfs/file.c | 1 + fs/ecryptfs/miscdev.c | 1 + fs/eventfd.c | 1 + fs/eventpoll.c | 3 ++- fs/fifo.c | 1 + fs/fuse/control.c | 4 +++ fs/fuse/cuse.c | 1 + fs/gfs2/file.c | 2 ++ fs/hppfs/hppfs.c | 1 + fs/hugetlbfs/inode.c | 1 + fs/logfs/dir.c | 1 + fs/nfsd/nfsctl.c | 1 + fs/no-block.c | 1 + fs/notify/fanotify/fanotify_user.c | 1 + fs/notify/inotify/inotify_user.c | 1 + fs/ocfs2/dlmfs/dlmfs.c | 1 + fs/ocfs2/stack_user.c | 1 + fs/proc/base.c | 8 ++++++ fs/proc/proc_sysctl.c | 1 + fs/proc/root.c | 1 + fs/proc/task_mmu.c | 1 + fs/romfs/super.c | 1 + fs/signalfd.c | 1 + fs/squashfs/dir.c | 3 ++- fs/timerfd.c | 1 + fs/ubifs/debug.c | 1 + ipc/mqueue.c | 1 + ipc/shm.c | 2 ++ kernel/configs.c | 1 + kernel/gcov/fs.c | 1 + kernel/kprobes.c | 1 + kernel/pm_qos_params.c | 1 + kernel/profile.c | 1 + kernel/trace/blktrace.c | 2 ++ kernel/trace/ftrace.c | 2 ++ kernel/trace/ring_buffer.c | 1 + kernel/trace/trace_events.c | 6 +++++ kernel/trace/trace_stack.c | 1 + lib/dma-debug.c | 1 + net/atm/proc.c | 1 + net/dccp/probe.c | 1 + net/ipv4/tcp_probe.c | 1 + net/mac80211/debugfs.c | 19 +++++++++----- net/mac80211/rate.c | 1 + net/mac80211/rc80211_minstrel_debugfs.c | 1 + net/mac80211/rc80211_pid_debugfs.c | 1 + net/netfilter/xt_recent.c | 1 + net/nonet.c | 1 + net/rfkill/core.c | 1 + net/sctp/probe.c | 1 + net/socket.c | 1 + net/sunrpc/cache.c | 2 ++ net/wireless/debugfs.c | 1 + samples/kfifo/bytestream-example.c | 1 + samples/kfifo/inttype-example.c | 1 + samples/kfifo/record-example.c | 1 + samples/tracepoints/tracepoint-sample.c | 1 + security/apparmor/apparmorfs.c | 9 ++++--- security/inode.c | 1 + security/smack/smackfs.c | 5 ++++ sound/core/seq/oss/seq_oss.c | 1 + sound/core/sound.c | 3 ++- sound/oss/msnd_pinnacle.c | 1 + sound/soc/soc-core.c | 1 + sound/soc/soc-dapm.c | 1 + sound/sound_core.c | 1 + virt/kvm/kvm_main.c | 3 +++ 339 files changed, 538 insertions(+), 73 deletions(-) (limited to 'kernel') diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c index 33c7077174d..30b8878f120 100644 --- a/arch/arm/kernel/etm.c +++ b/arch/arm/kernel/etm.c @@ -314,6 +314,7 @@ static const struct file_operations etb_fops = { .read = etb_read, .open = etb_open, .release = etb_release, + .llseek = no_llseek, }; static struct miscdevice etb_miscdev = { diff --git a/arch/arm/mach-msm/last_radio_log.c b/arch/arm/mach-msm/last_radio_log.c index b64ba5a9868..1e243f46a96 100644 --- a/arch/arm/mach-msm/last_radio_log.c +++ b/arch/arm/mach-msm/last_radio_log.c @@ -48,7 +48,8 @@ static ssize_t last_radio_log_read(struct file *file, char __user *buf, } static struct file_operations last_radio_log_fops = { - .read = last_radio_log_read + .read = last_radio_log_read, + .llseek = default_llseek, }; void msm_init_last_radio_log(struct module *owner) diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c index 3b2dd717b78..f91c3b7bc65 100644 --- a/arch/arm/mach-msm/smd_debug.c +++ b/arch/arm/mach-msm/smd_debug.c @@ -212,6 +212,7 @@ static int debug_open(struct inode *inode, struct file *file) static const struct file_operations debug_ops = { .read = debug_read, .open = debug_open, + .llseek = default_llseek, }; static void debug_create(const char *name, mode_t mode, diff --git a/arch/arm/plat-mxc/audmux-v2.c b/arch/arm/plat-mxc/audmux-v2.c index f9e7cdbd000..25ad95ba92a 100644 --- a/arch/arm/plat-mxc/audmux-v2.c +++ b/arch/arm/plat-mxc/audmux-v2.c @@ -137,6 +137,7 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf, static const struct file_operations audmux_debugfs_fops = { .open = audmux_open_file, .read = audmux_read_file, + .llseek = default_llseek, }; static void audmux_debugfs_init(void) diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c index 54fbd95cee9..9764a1a1073 100644 --- a/arch/avr32/boards/mimc200/fram.c +++ b/arch/avr32/boards/mimc200/fram.c @@ -41,6 +41,7 @@ static int fram_mmap(struct file *filp, struct vm_area_struct *vma) static const struct file_operations fram_fops = { .owner = THIS_MODULE, .mmap = fram_mmap, + .llseek = noop_llseek, }; #define FRAM_MINOR 0 diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c index 9a4b0759438..08c0236acf3 100644 --- a/arch/blackfin/kernel/kgdb_test.c +++ b/arch/blackfin/kernel/kgdb_test.c @@ -88,6 +88,7 @@ static const struct file_operations kgdb_test_proc_fops = { .owner = THIS_MODULE, .read = kgdb_test_proc_read, .write = kgdb_test_proc_write, + .llseek = noop_llseek, }; static int __init kgdbtest_init(void) diff --git a/arch/blackfin/mach-bf561/coreb.c b/arch/blackfin/mach-bf561/coreb.c index deb2271d09a..c6a4c8f2d37 100644 --- a/arch/blackfin/mach-bf561/coreb.c +++ b/arch/blackfin/mach-bf561/coreb.c @@ -51,6 +51,7 @@ coreb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static const struct file_operations coreb_fops = { .owner = THIS_MODULE, .unlocked_ioctl = coreb_ioctl, + .llseek = noop_llseek, }; static struct miscdevice coreb_dev = { diff --git a/arch/cris/arch-v10/drivers/ds1302.c b/arch/cris/arch-v10/drivers/ds1302.c index 884275629ef..95aadb4a8cf 100644 --- a/arch/cris/arch-v10/drivers/ds1302.c +++ b/arch/cris/arch-v10/drivers/ds1302.c @@ -387,6 +387,7 @@ print_rtc_status(void) static const struct file_operations rtc_fops = { .owner = THIS_MODULE, .unlocked_ioctl = rtc_unlocked_ioctl, + .llseek = noop_llseek, }; /* Probe for the chip by writing something to its RAM and try reading it back. */ diff --git a/arch/cris/arch-v10/drivers/gpio.c b/arch/cris/arch-v10/drivers/gpio.c index a07b6d25b0c..a276f081173 100644 --- a/arch/cris/arch-v10/drivers/gpio.c +++ b/arch/cris/arch-v10/drivers/gpio.c @@ -745,6 +745,7 @@ static const struct file_operations gpio_fops = { .write = gpio_write, .open = gpio_open, .release = gpio_release, + .llseek = noop_llseek, }; static void ioif_watcher(const unsigned int gpio_in_available, diff --git a/arch/cris/arch-v10/drivers/i2c.c b/arch/cris/arch-v10/drivers/i2c.c index 77a94181381..c413539d420 100644 --- a/arch/cris/arch-v10/drivers/i2c.c +++ b/arch/cris/arch-v10/drivers/i2c.c @@ -617,6 +617,7 @@ static const struct file_operations i2c_fops = { .unlocked_ioctl = i2c_ioctl, .open = i2c_open, .release = i2c_release, + .llseek = noop_llseek, }; int __init diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c index 7dcb1f85f42..7aa6ff00a11 100644 --- a/arch/cris/arch-v10/drivers/pcf8563.c +++ b/arch/cris/arch-v10/drivers/pcf8563.c @@ -64,6 +64,7 @@ static int voltage_low; static const struct file_operations pcf8563_fops = { .owner = THIS_MODULE, .unlocked_ioctl = pcf8563_unlocked_ioctl, + .llseek = noop_llseek, }; unsigned char diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c index ee2dd4323da..e501632fa8c 100644 --- a/arch/cris/arch-v10/drivers/sync_serial.c +++ b/arch/cris/arch-v10/drivers/sync_serial.c @@ -250,7 +250,8 @@ static const struct file_operations sync_serial_fops = { .poll = sync_serial_poll, .unlocked_ioctl = sync_serial_ioctl, .open = sync_serial_open, - .release = sync_serial_release + .release = sync_serial_release, + .llseek = noop_llseek, }; static int __init etrax_sync_serial_init(void) diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c index b07646a3050..dcb43fddfb9 100644 --- a/arch/cris/arch-v32/drivers/cryptocop.c +++ b/arch/cris/arch-v32/drivers/cryptocop.c @@ -281,7 +281,8 @@ const struct file_operations cryptocop_fops = { .owner = THIS_MODULE, .open = cryptocop_open, .release = cryptocop_release, - .unlocked_ioctl = cryptocop_ioctl + .unlocked_ioctl = cryptocop_ioctl, + .llseek = noop_llseek, }; diff --git a/arch/cris/arch-v32/drivers/i2c.c b/arch/cris/arch-v32/drivers/i2c.c index 5a3e900c9a7..ddb23996f11 100644 --- a/arch/cris/arch-v32/drivers/i2c.c +++ b/arch/cris/arch-v32/drivers/i2c.c @@ -698,6 +698,7 @@ static const struct file_operations i2c_fops = { .unlocked_ioctl = i2c_ioctl, .open = i2c_open, .release = i2c_release, + .llseek = noop_llseek, }; static int __init i2c_init(void) diff --git a/arch/cris/arch-v32/drivers/mach-a3/gpio.c b/arch/cris/arch-v32/drivers/mach-a3/gpio.c index 2dcd27adbad..06b24ebda41 100644 --- a/arch/cris/arch-v32/drivers/mach-a3/gpio.c +++ b/arch/cris/arch-v32/drivers/mach-a3/gpio.c @@ -893,6 +893,7 @@ static const struct file_operations gpio_fops = { .write = gpio_write, .open = gpio_open, .release = gpio_release, + .llseek = noop_llseek, }; #ifdef CONFIG_ETRAX_VIRTUAL_GPIO diff --git a/arch/cris/arch-v32/drivers/mach-fs/gpio.c b/arch/cris/arch-v32/drivers/mach-fs/gpio.c index 5ec8a7d4e7d..0649c8bea67 100644 --- a/arch/cris/arch-v32/drivers/mach-fs/gpio.c +++ b/arch/cris/arch-v32/drivers/mach-fs/gpio.c @@ -870,6 +870,7 @@ static const struct file_operations gpio_fops = { .write = gpio_write, .open = gpio_open, .release = gpio_release, + .llseek = noop_llseek, }; #ifdef CONFIG_ETRAX_VIRTUAL_GPIO diff --git a/arch/cris/arch-v32/drivers/pcf8563.c b/arch/cris/arch-v32/drivers/pcf8563.c index bef6eb53b15..0f7b101ee5e 100644 --- a/arch/cris/arch-v32/drivers/pcf8563.c +++ b/arch/cris/arch-v32/drivers/pcf8563.c @@ -60,6 +60,7 @@ static int voltage_low; static const struct file_operations pcf8563_fops = { .owner = THIS_MODULE, .unlocked_ioctl = pcf8563_unlocked_ioctl, + .llseek = noop_llseek, }; unsigned char diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c index ca248f3adb8..a2e8a8c3985 100644 --- a/arch/cris/arch-v32/drivers/sync_serial.c +++ b/arch/cris/arch-v32/drivers/sync_serial.c @@ -247,7 +247,8 @@ static const struct file_operations sync_serial_fops = { .poll = sync_serial_poll, .unlocked_ioctl = sync_serial_ioctl, .open = sync_serial_open, - .release = sync_serial_release + .release = sync_serial_release, + .llseek = noop_llseek, }; static int __init etrax_sync_serial_init(void) diff --git a/arch/cris/kernel/profile.c b/arch/cris/kernel/profile.c index 195ec5fa0dd..b82e08615d1 100644 --- a/arch/cris/kernel/profile.c +++ b/arch/cris/kernel/profile.c @@ -59,6 +59,7 @@ write_cris_profile(struct file *file, const char __user *buf, static const struct file_operations cris_proc_profile_operations = { .read = read_cris_profile, .write = write_cris_profile, + .llseek = default_llseek, }; static int __init init_cris_profile(void) diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index aa8b5fa1a8d..6f22c404163 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -354,6 +354,7 @@ retry: static const struct file_operations salinfo_event_fops = { .open = salinfo_event_open, .read = salinfo_event_read, + .llseek = noop_llseek, }; static int @@ -571,6 +572,7 @@ static const struct file_operations salinfo_data_fops = { .release = salinfo_log_release, .read = salinfo_log_read, .write = salinfo_log_write, + .llseek = default_llseek, }; static int __cpuinit diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index fa1eceed0d2..30862c0358c 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -860,6 +860,7 @@ error: static const struct file_operations sn_hwperf_fops = { .unlocked_ioctl = sn_hwperf_ioctl, + .llseek = noop_llseek, }; static struct miscdevice sn_hwperf_dev = { diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c index cb8617bb194..1c4d4c7bf4d 100644 --- a/arch/m68k/bvme6000/rtc.c +++ b/arch/m68k/bvme6000/rtc.c @@ -155,6 +155,7 @@ static const struct file_operations rtc_fops = { .unlocked_ioctl = rtc_ioctl, .open = rtc_open, .release = rtc_release, + .llseek = noop_llseek, }; static struct miscdevice rtc_dev = { diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c index 11ac6f63967..39c79ebcd18 100644 --- a/arch/m68k/mvme16x/rtc.c +++ b/arch/m68k/mvme16x/rtc.c @@ -144,6 +144,7 @@ static const struct file_operations rtc_fops = { .unlocked_ioctl = rtc_ioctl, .open = rtc_open, .release = rtc_release, + .llseek = noop_llseek, }; static struct miscdevice rtc_dev= diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 26f9b9ab19c..557ef72472e 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -468,7 +468,8 @@ static const struct file_operations rtlx_fops = { .release = file_release, .write = file_write, .read = file_read, - .poll = file_poll + .poll = file_poll, + .llseek = noop_llseek, }; static struct irqaction rtlx_irq = { diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 2bd2151c586..3eb3cde2f66 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -1192,7 +1192,8 @@ static const struct file_operations vpe_fops = { .owner = THIS_MODULE, .open = vpe_open, .release = vpe_release, - .write = vpe_write + .write = vpe_write, + .llseek = noop_llseek, }; /* module wrapper entry points */ diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c index d4ed7a9156f..ca35b730d18 100644 --- a/arch/mips/sibyte/common/sb_tbprof.c +++ b/arch/mips/sibyte/common/sb_tbprof.c @@ -545,6 +545,7 @@ static const struct file_operations sbprof_tb_fops = { .unlocked_ioctl = sbprof_tb_ioctl, .compat_ioctl = sbprof_tb_ioctl, .mmap = NULL, + .llseek = default_llseek, }; static struct class *tb_class; diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 50362b6ef6e..b1dd962e247 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -780,6 +780,7 @@ static const struct file_operations lparcfg_fops = { .write = lparcfg_write, .open = lparcfg_open, .release = single_release, + .llseek = seq_lseek, }; static int __init lparcfg_init(void) diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 67a84d8f118..2b442e6c21e 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -716,6 +716,7 @@ static const struct file_operations rtas_flash_operations = { .write = rtas_flash_write, .open = rtas_excl_open, .release = rtas_flash_release, + .llseek = default_llseek, }; static const struct file_operations manage_flash_operations = { @@ -724,6 +725,7 @@ static const struct file_operations manage_flash_operations = { .write = manage_flash_write, .open = rtas_excl_open, .release = rtas_excl_release, + .llseek = default_llseek, }; static const struct file_operations validate_flash_operations = { @@ -732,6 +734,7 @@ static const struct file_operations validate_flash_operations = { .write = validate_flash_write, .open = rtas_excl_open, .release = validate_flash_release, + .llseek = default_llseek, }; static int __init rtas_flash_init(void) diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 638883e23e3..0438f819fe6 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -354,6 +354,7 @@ static const struct file_operations proc_rtas_log_operations = { .poll = rtas_log_poll, .open = rtas_log_open, .release = rtas_log_release, + .llseek = noop_llseek, }; static int enable_surveillance(int timeout) diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index 33e5fc7334f..42d0a886de0 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c @@ -1249,6 +1249,7 @@ out: static const struct file_operations proc_vmlinux_operations = { .write = proc_mf_change_vmlinux, + .llseek = default_llseek, }; static int __init mf_proc_init(void) diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 57ddbb43b33..1de2cbb9230 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -539,7 +539,8 @@ out: } static const struct file_operations ofdt_fops = { - .write = ofdt_write + .write = ofdt_write, + .llseek = noop_llseek, }; /* create /proc/powerpc/ofdt write-only by root */ diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c index 80e9e7652a4..554457294a2 100644 --- a/arch/powerpc/platforms/pseries/scanlog.c +++ b/arch/powerpc/platforms/pseries/scanlog.c @@ -170,6 +170,7 @@ const struct file_operations scanlog_fops = { .write = scanlog_write, .open = scanlog_open, .release = scanlog_release, + .llseek = noop_llseek, }; static int __init scanlog_init(void) diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index aa819dac236..975e3ab13cb 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -152,6 +152,7 @@ static const struct file_operations prng_fops = { .open = &prng_open, .release = NULL, .read = &prng_read, + .llseek = noop_llseek, }; static struct miscdevice prng_dev = { diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 1211bb1d2f2..020e51c063d 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -618,6 +618,7 @@ static const struct file_operations dbfs_d204_ops = { .open = dbfs_d204_open, .read = dbfs_d204_read, .release = dbfs_d204_release, + .llseek = no_llseek, }; static int hypfs_dbfs_init(void) diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index ee5ab1a578e..26cf177f6a3 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -275,6 +275,7 @@ static const struct file_operations dbfs_d2fc_ops = { .open = dbfs_d2fc_open, .read = dbfs_d2fc_read, .release = dbfs_d2fc_release, + .llseek = no_llseek, }; int hypfs_vm_init(void) diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 98a4a4c267a..74d98670be2 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -449,6 +449,7 @@ static const struct file_operations hypfs_file_ops = { .write = do_sync_write, .aio_read = hypfs_aio_read, .aio_write = hypfs_aio_write, + .llseek = no_llseek, }; static struct file_system_type hypfs_type = { diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 98192261491..5ad6bc078bf 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -174,6 +174,7 @@ static const struct file_operations debug_file_ops = { .write = debug_input, .open = debug_open, .release = debug_close, + .llseek = no_llseek, }; static struct dentry *debug_debugfs_root_entry; diff --git a/arch/sh/boards/mach-landisk/gio.c b/arch/sh/boards/mach-landisk/gio.c index 01e6abb769b..8132dff078f 100644 --- a/arch/sh/boards/mach-landisk/gio.c +++ b/arch/sh/boards/mach-landisk/gio.c @@ -128,6 +128,7 @@ static const struct file_operations gio_fops = { .open = gio_open, /* open */ .release = gio_close, /* release */ .unlocked_ioctl = gio_ioctl, + .llseek = noop_llseek, }; static int __init gio_init(void) diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index 2c0046ecc71..52de4a9424e 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c @@ -132,6 +132,7 @@ static const struct file_operations apc_fops = { .unlocked_ioctl = apc_ioctl, .open = apc_open, .release = apc_release, + .llseek = noop_llseek, }; static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops }; diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 83e85c2e802..6addb914fcc 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -890,6 +890,7 @@ static ssize_t mdesc_read(struct file *file, char __user *buf, static const struct file_operations mdesc_fops = { .read = mdesc_read, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice mdesc_misc = { diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c index 584b965dc82..1e54a784341 100644 --- a/arch/tile/kernel/hardwall.c +++ b/arch/tile/kernel/hardwall.c @@ -774,6 +774,7 @@ static const struct file_operations dev_hardwall_fops = { #endif .flush = hardwall_flush, .release = hardwall_release, + .llseek = noop_llseek, }; static struct cdev hardwall_dev; diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c index cfcac1ff4cf..dd1e6f871fe 100644 --- a/arch/um/drivers/harddog_kern.c +++ b/arch/um/drivers/harddog_kern.c @@ -166,6 +166,7 @@ static const struct file_operations harddog_fops = { .unlocked_ioctl = harddog_ioctl, .open = harddog_open, .release = harddog_release, + .llseek = no_llseek, }; static struct miscdevice harddog_miscdev = { diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index ebc680717e5..975613b23dc 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -843,6 +843,7 @@ static ssize_t mconsole_proc_write(struct file *file, static const struct file_operations mconsole_proc_fops = { .owner = THIS_MODULE, .write = mconsole_proc_write, + .llseek = noop_llseek, }; static int create_proc_mconsole(void) diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c index 7158393b679..8501e7d0015 100644 --- a/arch/um/drivers/mmapper_kern.c +++ b/arch/um/drivers/mmapper_kern.c @@ -93,6 +93,7 @@ static const struct file_operations mmapper_fops = { .mmap = mmapper_mmap, .open = mmapper_open, .release = mmapper_release, + .llseek = default_llseek, }; /* diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c index 4949044773b..981085a93f3 100644 --- a/arch/um/drivers/random.c +++ b/arch/um/drivers/random.c @@ -100,6 +100,7 @@ static const struct file_operations rng_chrdev_ops = { .owner = THIS_MODULE, .open = rng_dev_open, .read = rng_dev_read, + .llseek = noop_llseek, }; /* rng_init shouldn't be called more than once at boot time */ diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 4c9c67bf09b..fbbc4dadecc 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -1926,6 +1926,7 @@ static const struct file_operations apm_bios_fops = { .unlocked_ioctl = do_ioctl, .open = do_open, .release = do_release, + .llseek = noop_llseek, }; static struct miscdevice apm_device = { diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index 8a85dd1b1aa..1e8d66c1336 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -192,6 +192,7 @@ static const struct file_operations severities_coverage_fops = { .release = seq_release, .read = seq_read, .write = severities_coverage_write, + .llseek = seq_lseek, }; static int __init severities_debugfs_init(void) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index ed41562909f..7a35b72d7c0 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1665,6 +1665,7 @@ struct file_operations mce_chrdev_ops = { .read = mce_read, .poll = mce_poll, .unlocked_ioctl = mce_ioctl, + .llseek = no_llseek, }; EXPORT_SYMBOL_GPL(mce_chrdev_ops); diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index 8afd9f321f1..90fcf62854b 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c @@ -78,6 +78,7 @@ static int setup_data_open(struct inode *inode, struct file *file) static const struct file_operations fops_setup_data = { .read = setup_data_read, .open = setup_data_open, + .llseek = default_llseek, }; static int __init diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index fa6551d36c1..0b3d37e8360 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -232,6 +232,7 @@ static const struct file_operations microcode_fops = { .owner = THIS_MODULE, .write = microcode_write, .open = microcode_open, + .llseek = no_llseek, }; static struct miscdevice microcode_dev = { diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 312ef029281..50ac949c7f1 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -1285,6 +1285,7 @@ static const struct file_operations tunables_fops = { .open = tunables_open, .read = tunables_read, .write = tunables_write, + .llseek = default_llseek, }; static int __init uv_ptc_init(void) diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c index 1304bcec8ee..7c0fedd98ea 100644 --- a/arch/x86/xen/debugfs.c +++ b/arch/x86/xen/debugfs.c @@ -106,6 +106,7 @@ static const struct file_operations u32_array_fops = { .open = u32_array_open, .release= xen_array_release, .read = u32_array_read, + .llseek = no_llseek, }; struct dentry *xen_debugfs_create_u32_array(const char *name, mode_t mode, diff --git a/block/bsg.c b/block/bsg.c index 82d58829ba5..83e381a26b5 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -968,6 +968,7 @@ static const struct file_operations bsg_fops = { .release = bsg_release, .unlocked_ioctl = bsg_ioctl, .owner = THIS_MODULE, + .llseek = default_llseek, }; void bsg_unregister_queue(struct request_queue *q) diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c index 5281ddda277..cbab9b07bf2 100644 --- a/drivers/acpi/apei/erst-dbg.c +++ b/drivers/acpi/apei/erst-dbg.c @@ -180,6 +180,7 @@ static const struct file_operations erst_dbg_ops = { .read = erst_dbg_read, .write = erst_dbg_write, .unlocked_ioctl = erst_dbg_ioctl, + .llseek = no_llseek, }; static struct miscdevice erst_dbg_dev = { diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c index 7de27d49c4b..6355b575ee5 100644 --- a/drivers/acpi/debugfs.c +++ b/drivers/acpi/debugfs.c @@ -69,6 +69,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, static const struct file_operations cm_fops = { .write = cm_write, + .llseek = default_llseek, }; int __init acpi_debugfs_init(void) diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c index 0e869b3f81c..411620ef84c 100644 --- a/drivers/acpi/ec_sys.c +++ b/drivers/acpi/ec_sys.c @@ -101,6 +101,7 @@ static struct file_operations acpi_ec_io_ops = { .open = acpi_ec_open_io, .read = acpi_ec_read_io, .write = acpi_ec_write_io, + .llseek = default_llseek, }; int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count) diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index d439314a75d..85d90899380 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c @@ -110,6 +110,7 @@ static const struct file_operations acpi_system_event_ops = { .read = acpi_system_read_event, .release = acpi_system_close_event, .poll = acpi_system_poll_event, + .llseek = default_llseek, }; #endif /* CONFIG_ACPI_PROC_EVENT */ diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 4e2c367fec1..dfcb33e8d40 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -7062,7 +7062,8 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request, static const struct file_operations DAC960_gam_fops = { .owner = THIS_MODULE, - .unlocked_ioctl = DAC960_gam_ioctl + .unlocked_ioctl = DAC960_gam_ioctl, + .llseek = noop_llseek, }; static struct miscdevice DAC960_gam_dev = { diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c index 4a1b9e7464a..32b484ba21b 100644 --- a/drivers/block/aoe/aoechr.c +++ b/drivers/block/aoe/aoechr.c @@ -265,6 +265,7 @@ static const struct file_operations aoe_fops = { .open = aoechr_open, .release = aoechr_rel, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static char *aoe_devnode(struct device *dev, mode_t *mode) diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index c397b3ddba9..aa27cd84f63 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c @@ -234,6 +234,7 @@ static const struct file_operations pg_fops = { .write = pg_write, .open = pg_open, .release = pg_release, + .llseek = noop_llseek, }; static void pg_init_units(void) diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index bc5825fdeaa..c372c32e0db 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c @@ -239,6 +239,7 @@ static const struct file_operations pt_fops = { .unlocked_ioctl = pt_ioctl, .open = pt_open, .release = pt_release, + .llseek = noop_llseek, }; /* sysfs class support */ diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index b1cbeb59bb7..6a4642dd828 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -3046,6 +3046,7 @@ static const struct file_operations pkt_ctl_fops = { .compat_ioctl = pkt_ctl_compat_ioctl, #endif .owner = THIS_MODULE, + .llseek = no_llseek, }; static struct miscdevice pkt_misc = { diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c index 54739b08c30..fd6305bf953 100644 --- a/drivers/bluetooth/btmrvl_debugfs.c +++ b/drivers/bluetooth/btmrvl_debugfs.c @@ -92,6 +92,7 @@ static const struct file_operations btmrvl_hscfgcmd_fops = { .read = btmrvl_hscfgcmd_read, .write = btmrvl_hscfgcmd_write, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf, @@ -130,6 +131,7 @@ static const struct file_operations btmrvl_psmode_fops = { .read = btmrvl_psmode_read, .write = btmrvl_psmode_write, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf, @@ -173,6 +175,7 @@ static const struct file_operations btmrvl_pscmd_fops = { .read = btmrvl_pscmd_read, .write = btmrvl_pscmd_write, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf, @@ -211,6 +214,7 @@ static const struct file_operations btmrvl_gpiogap_fops = { .read = btmrvl_gpiogap_read, .write = btmrvl_gpiogap_write, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, @@ -252,6 +256,7 @@ static const struct file_operations btmrvl_hscmd_fops = { .read = btmrvl_hscmd_read, .write = btmrvl_hscmd_write, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf, @@ -289,6 +294,7 @@ static const struct file_operations btmrvl_hsmode_fops = { .read = btmrvl_hsmode_read, .write = btmrvl_hsmode_write, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_curpsmode_read(struct file *file, char __user *userbuf, @@ -306,6 +312,7 @@ static ssize_t btmrvl_curpsmode_read(struct file *file, char __user *userbuf, static const struct file_operations btmrvl_curpsmode_fops = { .read = btmrvl_curpsmode_read, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_psstate_read(struct file *file, char __user * userbuf, @@ -323,6 +330,7 @@ static ssize_t btmrvl_psstate_read(struct file *file, char __user * userbuf, static const struct file_operations btmrvl_psstate_fops = { .read = btmrvl_psstate_read, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_hsstate_read(struct file *file, char __user *userbuf, @@ -340,6 +348,7 @@ static ssize_t btmrvl_hsstate_read(struct file *file, char __user *userbuf, static const struct file_operations btmrvl_hsstate_fops = { .read = btmrvl_hsstate_read, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_txdnldready_read(struct file *file, char __user *userbuf, @@ -358,6 +367,7 @@ static ssize_t btmrvl_txdnldready_read(struct file *file, char __user *userbuf, static const struct file_operations btmrvl_txdnldready_fops = { .read = btmrvl_txdnldready_read, .open = btmrvl_open_generic, + .llseek = default_llseek, }; void btmrvl_debugfs_init(struct hci_dev *hdev) diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 3aa7b2a54b6..67c180c2c1e 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -282,6 +282,7 @@ static const struct file_operations vhci_fops = { .poll = vhci_poll, .open = vhci_open, .release = vhci_release, + .llseek = no_llseek, }; static struct miscdevice vhci_miscdev= { diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index 033e1505fca..5ffa6904ea6 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c @@ -402,6 +402,7 @@ static const struct file_operations apm_bios_fops = { .unlocked_ioctl = apm_ioctl, .open = apm_open, .release = apm_release, + .llseek = noop_llseek, }; static struct miscdevice apm_device = { diff --git a/drivers/char/bfin-otp.c b/drivers/char/bfin-otp.c index 836d4f0a876..44660f1c484 100644 --- a/drivers/char/bfin-otp.c +++ b/drivers/char/bfin-otp.c @@ -222,6 +222,7 @@ static const struct file_operations bfin_otp_fops = { .unlocked_ioctl = bfin_otp_ioctl, .read = bfin_otp_read, .write = bfin_otp_write, + .llseek = default_llseek, }; static struct miscdevice bfin_otp_misc_device = { diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c index d5fa113afe3..f6718f05dad 100644 --- a/drivers/char/briq_panel.c +++ b/drivers/char/briq_panel.c @@ -186,6 +186,7 @@ static const struct file_operations briq_panel_fops = { .write = briq_panel_write, .open = briq_panel_open, .release = briq_panel_release, + .llseek = noop_llseek, }; static struct miscdevice briq_panel_miscdev = { diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c index 91917133ae0..a4a6c2f044b 100644 --- a/drivers/char/bsr.c +++ b/drivers/char/bsr.c @@ -155,6 +155,7 @@ static const struct file_operations bsr_fops = { .owner = THIS_MODULE, .mmap = bsr_mmap, .open = bsr_open, + .llseek = noop_llseek, }; static void bsr_cleanup_devs(void) diff --git a/drivers/char/cs5535_gpio.c b/drivers/char/cs5535_gpio.c index 4d830dc482e..0cf1e5fad9a 100644 --- a/drivers/char/cs5535_gpio.c +++ b/drivers/char/cs5535_gpio.c @@ -169,7 +169,8 @@ static const struct file_operations cs5535_gpio_fops = { .owner = THIS_MODULE, .write = cs5535_gpio_write, .read = cs5535_gpio_read, - .open = cs5535_gpio_open + .open = cs5535_gpio_open, + .llseek = no_llseek, }; static int __init cs5535_gpio_init(void) diff --git a/drivers/char/ds1302.c b/drivers/char/ds1302.c index 170693c93c7..4f7aa364167 100644 --- a/drivers/char/ds1302.c +++ b/drivers/char/ds1302.c @@ -288,6 +288,7 @@ get_rtc_status(char *buf) static const struct file_operations rtc_fops = { .owner = THIS_MODULE, .unlocked_ioctl = rtc_ioctl, + .llseek = noop_llseek, }; /* Probe for the chip by writing something to its RAM and try reading it back. */ diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c index dbee8688f75..50462b63e51 100644 --- a/drivers/char/ds1620.c +++ b/drivers/char/ds1620.c @@ -357,6 +357,7 @@ static const struct file_operations ds1620_fops = { .open = ds1620_open, .read = ds1620_read, .unlocked_ioctl = ds1620_unlocked_ioctl, + .llseek = no_llseek, }; static struct miscdevice ds1620_miscdev = { diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c index 8a1b28a10ef..353be4707d9 100644 --- a/drivers/char/dsp56k.c +++ b/drivers/char/dsp56k.c @@ -482,6 +482,7 @@ static const struct file_operations dsp56k_fops = { .unlocked_ioctl = dsp56k_ioctl, .open = dsp56k_open, .release = dsp56k_release, + .llseek = noop_llseek, }; diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c index e3859d4eaea..007eca3ed77 100644 --- a/drivers/char/dtlk.c +++ b/drivers/char/dtlk.c @@ -105,6 +105,7 @@ static const struct file_operations dtlk_fops = .unlocked_ioctl = dtlk_ioctl, .open = dtlk_open, .release = dtlk_release, + .llseek = no_llseek, }; /* local prototypes */ diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c index b6c2cc167c1..eaa0e4264e1 100644 --- a/drivers/char/genrtc.c +++ b/drivers/char/genrtc.c @@ -497,6 +497,7 @@ static const struct file_operations gen_rtc_fops = { .unlocked_ioctl = gen_rtc_unlocked_ioctl, .open = gen_rtc_open, .release = gen_rtc_release, + .llseek = noop_llseek, }; static struct miscdevice rtc_gen_dev = diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 3d9c61e5acb..788da05190c 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -170,6 +170,7 @@ static const struct file_operations rng_chrdev_ops = { .owner = THIS_MODULE, .open = rng_dev_open, .read = rng_dev_read, + .llseek = noop_llseek, }; static struct miscdevice rng_miscdev = { diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c index d4b71e8d0d2..2e49e515d8e 100644 --- a/drivers/char/ip2/ip2main.c +++ b/drivers/char/ip2/ip2main.c @@ -236,6 +236,7 @@ static const struct file_operations ip2_ipl = { .write = ip2_ipl_write, .unlocked_ioctl = ip2_ipl_ioctl, .open = ip2_ipl_open, + .llseek = noop_llseek, }; static unsigned long irq_counter; diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index d8ec92a3898..c6709d6b0fd 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -850,6 +850,7 @@ static const struct file_operations ipmi_fops = { .release = ipmi_release, .fasync = ipmi_fasync, .poll = ipmi_poll, + .llseek = noop_llseek, }; #define DEVICE_NAME "ipmidev" diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 654d566ca57..da041f88f64 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -909,6 +909,7 @@ static const struct file_operations ipmi_wdog_fops = { .open = ipmi_open, .release = ipmi_close, .fasync = ipmi_fasync, + .llseek = no_llseek, }; static struct miscdevice ipmi_wdog_miscdev = { diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c index be28391adb7..667abd23ad6 100644 --- a/drivers/char/istallion.c +++ b/drivers/char/istallion.c @@ -704,6 +704,7 @@ static const struct file_operations stli_fsiomem = { .read = stli_memread, .write = stli_memwrite, .unlocked_ioctl = stli_memioctl, + .llseek = default_llseek, }; /*****************************************************************************/ diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 938a3a27388..d2344117eaf 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -748,6 +748,7 @@ static const struct file_operations lp_fops = { #ifdef CONFIG_PARPORT_1284 .read = lp_read, #endif + .llseek = noop_llseek, }; /* --- support for console on the line printer ----------------- */ diff --git a/drivers/char/mem.c b/drivers/char/mem.c index a398ecdbd75..8ebd232132f 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -804,6 +804,7 @@ static const struct file_operations full_fops = { static const struct file_operations oldmem_fops = { .read = read_oldmem, .open = open_oldmem, + .llseek = default_llseek, }; #endif @@ -830,6 +831,7 @@ static ssize_t kmsg_write(struct file *file, const char __user *buf, static const struct file_operations kmsg_fops = { .write = kmsg_write, + .llseek = noop_llseek, }; static const struct memdev { @@ -881,6 +883,7 @@ static int memory_open(struct inode *inode, struct file *filp) static const struct file_operations memory_fops = { .open = memory_open, + .llseek = noop_llseek, }; static char *mem_devnode(struct device *dev, mode_t *mode) diff --git a/drivers/char/misc.c b/drivers/char/misc.c index abdafd48898..778273c9324 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -162,6 +162,7 @@ static struct class *misc_class; static const struct file_operations misc_fops = { .owner = THIS_MODULE, .open = misc_open, + .llseek = noop_llseek, }; /** diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c index ea7c99fa978..1c4070ced06 100644 --- a/drivers/char/mmtimer.c +++ b/drivers/char/mmtimer.c @@ -72,6 +72,7 @@ static const struct file_operations mmtimer_fops = { .owner = THIS_MODULE, .mmap = mmtimer_mmap, .unlocked_ioctl = mmtimer_ioctl, + .llseek = noop_llseek, }; /* diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index ecb89d798e3..966a95bc974 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c @@ -316,7 +316,8 @@ uncached_mmap(struct file *file, struct vm_area_struct *vma) static const struct file_operations fetchop_fops = { .owner = THIS_MODULE, - .mmap = fetchop_mmap + .mmap = fetchop_mmap, + .llseek = noop_llseek, }; static struct miscdevice fetchop_miscdev = { @@ -327,7 +328,8 @@ static struct miscdevice fetchop_miscdev = { static const struct file_operations cached_fops = { .owner = THIS_MODULE, - .mmap = cached_mmap + .mmap = cached_mmap, + .llseek = noop_llseek, }; static struct miscdevice cached_miscdev = { @@ -338,7 +340,8 @@ static struct miscdevice cached_miscdev = { static const struct file_operations uncached_fops = { .owner = THIS_MODULE, - .mmap = uncached_mmap + .mmap = uncached_mmap, + .llseek = noop_llseek, }; static struct miscdevice uncached_miscdev = { diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c index a4ec50c9507..0464822eac5 100644 --- a/drivers/char/mwave/mwavedd.c +++ b/drivers/char/mwave/mwavedd.c @@ -479,7 +479,8 @@ static const struct file_operations mwave_fops = { .write = mwave_write, .unlocked_ioctl = mwave_ioctl, .open = mwave_open, - .release = mwave_close + .release = mwave_close, + .llseek = default_llseek, }; diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c index 2604246501e..8994ce32e6c 100644 --- a/drivers/char/nwbutton.c +++ b/drivers/char/nwbutton.c @@ -182,6 +182,7 @@ static int button_read (struct file *filp, char __user *buffer, static const struct file_operations button_fops = { .owner = THIS_MODULE, .read = button_read, + .llseek = noop_llseek, }; /* diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c index 8ecbcc174c1..b304ec05250 100644 --- a/drivers/char/pc8736x_gpio.c +++ b/drivers/char/pc8736x_gpio.c @@ -234,6 +234,7 @@ static const struct file_operations pc8736x_gpio_fileops = { .open = pc8736x_gpio_open, .write = nsc_gpio_write, .read = nsc_gpio_read, + .llseek = no_llseek, }; static void __init pc8736x_init_shadow(void) diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index ec73d9f6d9e..c99f6997e5e 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c @@ -1880,6 +1880,7 @@ static const struct file_operations cm4000_fops = { .unlocked_ioctl = cmm_ioctl, .open = cmm_open, .release= cmm_close, + .llseek = no_llseek, }; static struct pcmcia_device_id cm4000_ids[] = { diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index 815cde1d057..9ecc58baa8f 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c @@ -650,6 +650,7 @@ static const struct file_operations reader_fops = { .open = cm4040_open, .release = cm4040_close, .poll = cm4040_poll, + .llseek = no_llseek, }; static struct pcmcia_device_id cm4040_ids[] = { diff --git a/drivers/char/random.c b/drivers/char/random.c index caef35a4689..5a1aa64f4e7 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1165,6 +1165,7 @@ const struct file_operations random_fops = { .poll = random_poll, .unlocked_ioctl = random_ioctl, .fasync = random_fasync, + .llseek = noop_llseek, }; const struct file_operations urandom_fops = { @@ -1172,6 +1173,7 @@ const struct file_operations urandom_fops = { .write = random_write, .unlocked_ioctl = random_ioctl, .fasync = random_fasync, + .llseek = noop_llseek, }; /*************************************************************** diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c index d58c2eb07f0..1e87a93164b 100644 --- a/drivers/char/rio/rio_linux.c +++ b/drivers/char/rio/rio_linux.c @@ -241,6 +241,7 @@ static struct real_driver rio_real_driver = { static const struct file_operations rio_fw_fops = { .owner = THIS_MODULE, .unlocked_ioctl = rio_fw_ioctl, + .llseek = noop_llseek, }; static struct miscdevice rio_fw_device = { diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c index 99e5272e3c5..0bc135b9b16 100644 --- a/drivers/char/scx200_gpio.c +++ b/drivers/char/scx200_gpio.c @@ -67,6 +67,7 @@ static const struct file_operations scx200_gpio_fileops = { .read = nsc_gpio_read, .open = scx200_gpio_open, .release = scx200_gpio_release, + .llseek = no_llseek, }; static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */ diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c index 32b74de18f5..444ce17ac72 100644 --- a/drivers/char/snsc.c +++ b/drivers/char/snsc.c @@ -357,6 +357,7 @@ static const struct file_operations scdrv_fops = { .poll = scdrv_poll, .open = scdrv_open, .release = scdrv_release, + .llseek = noop_llseek, }; static struct class *snsc_class; diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c index f2167f8e5aa..8ef16490810 100644 --- a/drivers/char/stallion.c +++ b/drivers/char/stallion.c @@ -608,6 +608,7 @@ static unsigned int sc26198_baudtable[] = { static const struct file_operations stl_fsiomem = { .owner = THIS_MODULE, .unlocked_ioctl = stl_memioctl, + .llseek = noop_llseek, }; static struct class *stallion_class; diff --git a/drivers/char/sx.c b/drivers/char/sx.c index 5b24db4ff7f..e53f1686539 100644 --- a/drivers/char/sx.c +++ b/drivers/char/sx.c @@ -397,6 +397,7 @@ static struct real_driver sx_real_driver = { static const struct file_operations sx_fw_fops = { .owner = THIS_MODULE, .unlocked_ioctl = sx_fw_ioctl, + .llseek = noop_llseek, }; static struct miscdevice sx_fw_device = { diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index ef31bb81e84..f3019f53e87 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -772,6 +772,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf, static const struct file_operations proc_sysrq_trigger_operations = { .write = write_sysrq_trigger, + .llseek = noop_llseek, }; static void sysrq_init_procfs(void) diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c index cad4eb65f13..ad264185eb1 100644 --- a/drivers/char/tb0219.c +++ b/drivers/char/tb0219.c @@ -261,6 +261,7 @@ static const struct file_operations tb0219_fops = { .write = tanbac_tb0219_write, .open = tanbac_tb0219_open, .release = tanbac_tb0219_release, + .llseek = no_llseek, }; static void tb0219_restart(char *command) diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c index 80ea6bcfffd..d087532b29d 100644 --- a/drivers/char/tlclk.c +++ b/drivers/char/tlclk.c @@ -267,6 +267,7 @@ static const struct file_operations tlclk_fops = { .read = tlclk_read, .open = tlclk_open, .release = tlclk_release, + .llseek = noop_llseek, }; diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c index f8bc79f6de3..3d6e9617245 100644 --- a/drivers/char/toshiba.c +++ b/drivers/char/toshiba.c @@ -95,6 +95,7 @@ static long tosh_ioctl(struct file *, unsigned int, static const struct file_operations tosh_fops = { .owner = THIS_MODULE, .unlocked_ioctl = tosh_ioctl, + .llseek = noop_llseek, }; static struct miscdevice tosh_device = { diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c index c7072ba14f4..493b47a0d51 100644 --- a/drivers/char/uv_mmtimer.c +++ b/drivers/char/uv_mmtimer.c @@ -52,6 +52,7 @@ static const struct file_operations uv_mmtimer_fops = { .owner = THIS_MODULE, .mmap = uv_mmtimer_mmap, .unlocked_ioctl = uv_mmtimer_ioctl, + .llseek = noop_llseek, }; /** diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index b663d573aad..be6d6fb47cc 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -567,6 +567,7 @@ static const struct file_operations hwicap_fops = { .read = hwicap_read, .open = hwicap_open, .release = hwicap_release, + .llseek = noop_llseek, }; static int __devinit hwicap_setup(struct device *dev, int id, diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 557e2272e5b..ae2b8714d19 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -157,6 +157,7 @@ static const struct file_operations coh901318_debugfs_status_operations = { .owner = THIS_MODULE, .open = coh901318_debugfs_open, .read = coh901318_debugfs_read, + .llseek = default_llseek, }; diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 8528b10763e..bf184fb59a5 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -405,6 +405,7 @@ static const struct file_operations nosy_ops = { .poll = nosy_poll, .open = nosy_open, .release = nosy_release, + .llseek = noop_llseek, }; #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */ diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 84da748555b..ff6690f4fc8 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -284,7 +284,8 @@ EXPORT_SYMBOL(drm_exit); /** File operations structure */ static const struct file_operations drm_stub_fops = { .owner = THIS_MODULE, - .open = drm_stub_open + .open = drm_stub_open, + .llseek = noop_llseek, }; static int __init drm_core_init(void) diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 61b4caf220f..00f1bdaa65c 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -119,6 +119,7 @@ static const struct file_operations i810_buffer_fops = { .unlocked_ioctl = drm_ioctl, .mmap = i810_mmap_buffers, .fasync = drm_fasync, + .llseek = noop_llseek, }; static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 671aa18415a..5c6eb65f4e5 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c @@ -121,6 +121,7 @@ static const struct file_operations i830_buffer_fops = { .unlocked_ioctl = drm_ioctl, .mmap = i830_mmap_buffers, .fasync = drm_fasync, + .llseek = noop_llseek, }; static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5e43d707678..048149748fd 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -782,6 +782,7 @@ static const struct file_operations i915_wedged_fops = { .open = i915_wedged_open, .read = i915_wedged_read, .write = i915_wedged_write, + .llseek = default_llseek, }; /* As the drm_debugfs_init() routines are called before dev->dev_private is diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index b87569e96b1..3b7e0acf816 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -1211,6 +1211,7 @@ static const struct file_operations vga_arb_device_fops = { .poll = vga_arb_fpoll, .open = vga_arb_open, .release = vga_arb_release, + .llseek = noop_llseek, }; static struct miscdevice vga_arb_device = { diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 850d02a7a92..61a3e572224 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -1051,6 +1051,7 @@ static const struct file_operations hid_debug_events_fops = { .read = hid_debug_events_read, .poll = hid_debug_events_poll, .release = hid_debug_events_release, + .llseek = noop_llseek, }; diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c index f6e80c7ca61..5a6879e235a 100644 --- a/drivers/hid/hid-roccat.c +++ b/drivers/hid/hid-roccat.c @@ -384,6 +384,7 @@ static const struct file_operations roccat_ops = { .poll = roccat_poll, .open = roccat_open, .release = roccat_release, + .llseek = noop_llseek, }; static int __init roccat_init(void) diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 47d70c523d9..bb98fa87aa8 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -329,6 +329,7 @@ static const struct file_operations hidraw_ops = { .open = hidraw_open, .release = hidraw_release, .unlocked_ioctl = hidraw_ioctl, + .llseek = noop_llseek, }; void hidraw_report_event(struct hid_device *hid, u8 *data, int len) diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 0a29c51114a..e8f45d7ef3b 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -847,6 +847,7 @@ static const struct file_operations hiddev_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = hiddev_compat_ioctl, #endif + .llseek = noop_llseek, }; static char *hiddev_devnode(struct device *dev, mode_t *mode) diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 653db1bda93..23b8555215d 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -762,6 +762,7 @@ static const struct file_operations atk_debugfs_ggrp_fops = { .read = atk_debugfs_ggrp_read, .open = atk_debugfs_ggrp_open, .release = atk_debugfs_ggrp_release, + .llseek = no_llseek, }; static void atk_debugfs_init(struct atk_data *data) diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 6d622cb5ac8..23d1d1c5587 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -1903,6 +1903,7 @@ static const struct file_operations idetape_fops = { .unlocked_ioctl = idetape_chrdev_ioctl, .open = idetape_chrdev_open, .release = idetape_chrdev_release, + .llseek = noop_llseek, }; static int idetape_open(struct block_device *bdev, fmode_t mode) diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c index 15341fc1c68..c976285d313 100644 --- a/drivers/idle/i7300_idle.c +++ b/drivers/idle/i7300_idle.c @@ -536,6 +536,7 @@ static ssize_t stats_read_ul(struct file *fp, char __user *ubuf, size_t count, static const struct file_operations idle_fops = { .open = stats_open_generic, .read = stats_read_ul, + .llseek = default_llseek, }; struct debugfs_file_info { diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index d4ce8b63e19..daef61d5e5b 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c @@ -65,7 +65,8 @@ static const struct file_operations diag_file_ops = { .write = ipath_diag_write, .read = ipath_diag_read, .open = ipath_diag_open, - .release = ipath_diag_release + .release = ipath_diag_release, + .llseek = default_llseek, }; static ssize_t ipath_diagpkt_write(struct file *fp, @@ -75,6 +76,7 @@ static ssize_t ipath_diagpkt_write(struct file *fp, static const struct file_operations diagpkt_file_ops = { .owner = THIS_MODULE, .write = ipath_diagpkt_write, + .llseek = noop_llseek, }; static atomic_t diagpkt_count = ATOMIC_INIT(0); diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index 65eb8929db2..6078992da3f 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -63,7 +63,8 @@ static const struct file_operations ipath_file_ops = { .open = ipath_open, .release = ipath_close, .poll = ipath_poll, - .mmap = ipath_mmap + .mmap = ipath_mmap, + .llseek = noop_llseek, }; /* diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 2fca70836da..d13e72685dc 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -103,6 +103,7 @@ static ssize_t atomic_stats_read(struct file *file, char __user *buf, static const struct file_operations atomic_stats_ops = { .read = atomic_stats_read, + .llseek = default_llseek, }; static ssize_t atomic_counters_read(struct file *file, char __user *buf, @@ -120,6 +121,7 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf, static const struct file_operations atomic_counters_ops = { .read = atomic_counters_read, + .llseek = default_llseek, }; static ssize_t flash_read(struct file *file, char __user *buf, @@ -224,6 +226,7 @@ bail: static const struct file_operations flash_ops = { .read = flash_read, .write = flash_write, + .llseek = default_llseek, }; static int create_device_files(struct super_block *sb, diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c index 05dcf0d9a7d..204c4dd9dce 100644 --- a/drivers/infiniband/hw/qib/qib_diag.c +++ b/drivers/infiniband/hw/qib/qib_diag.c @@ -136,7 +136,8 @@ static const struct file_operations diag_file_ops = { .write = qib_diag_write, .read = qib_diag_read, .open = qib_diag_open, - .release = qib_diag_release + .release = qib_diag_release, + .llseek = default_llseek, }; static atomic_t diagpkt_count = ATOMIC_INIT(0); @@ -149,6 +150,7 @@ static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data, static const struct file_operations diagpkt_file_ops = { .owner = THIS_MODULE, .write = qib_diagpkt_write, + .llseek = noop_llseek, }; int qib_diag_add(struct qib_devdata *dd) diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 6b11645edf3..aa2be214270 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -63,7 +63,8 @@ static const struct file_operations qib_file_ops = { .open = qib_open, .release = qib_close, .poll = qib_poll, - .mmap = qib_mmapf + .mmap = qib_mmapf, + .llseek = noop_llseek, }; /* diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 9f989c0ba9d..a0e6613e8be 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -367,6 +367,7 @@ bail: static const struct file_operations flash_ops = { .read = flash_read, .write = flash_write, + .llseek = default_llseek, }; static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index c908c5f8364..51330363c0e 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -761,7 +761,8 @@ static const struct file_operations evdev_fops = { .compat_ioctl = evdev_ioctl_compat, #endif .fasync = evdev_fasync, - .flush = evdev_flush + .flush = evdev_flush, + .llseek = no_llseek, }; static int evdev_install_chrdev(struct evdev *evdev) diff --git a/drivers/input/input.c b/drivers/input/input.c index ab698205651..7919c253722 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -2047,6 +2047,7 @@ out: static const struct file_operations input_fops = { .owner = THIS_MODULE, .open = input_open_file, + .llseek = noop_llseek, }; static int __init input_init(void) diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index d85bd8a7967..502b2f73b43 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -736,6 +736,7 @@ static const struct file_operations joydev_fops = { .compat_ioctl = joydev_compat_ioctl, #endif .fasync = joydev_fasync, + .llseek = no_llseek, }; static int joydev_install_chrdev(struct joydev *joydev) diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 0d4266a533a..2771ea778d3 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -804,6 +804,7 @@ static const struct file_operations uinput_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = uinput_compat_ioctl, #endif + .llseek = no_llseek, }; static struct miscdevice uinput_misc = { diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index d528a2dba06..31ec7265aac 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -792,6 +792,7 @@ static const struct file_operations mousedev_fops = { .open = mousedev_open, .release = mousedev_release, .fasync = mousedev_fasync, + .llseek = noop_llseek, }; static int mousedev_install_chrdev(struct mousedev *mousedev) diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index 99866485444..cd82bb12591 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c @@ -243,6 +243,7 @@ static const struct file_operations serio_raw_fops = { .write = serio_raw_write, .poll = serio_raw_poll, .fasync = serio_raw_fasync, + .llseek = noop_llseek, }; diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index de43c8c70ad..859c81e9483 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -267,6 +267,7 @@ static const struct file_operations mISDN_fops = { .unlocked_ioctl = mISDN_ioctl, .open = mISDN_open, .release = mISDN_close, + .llseek = no_llseek, }; static struct miscdevice mISDNtimer = { diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 85b714df8ea..3c781cdddda 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -514,6 +514,7 @@ static const struct file_operations lguest_fops = { .release = close, .write = write, .read = read, + .llseek = default_llseek, }; /* diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c index a3d25da2f27..1a57e88a38f 100644 --- a/drivers/macintosh/ans-lcd.c +++ b/drivers/macintosh/ans-lcd.c @@ -137,6 +137,7 @@ const struct file_operations anslcd_fops = { .write = anslcd_write, .unlocked_ioctl = anslcd_ioctl, .open = anslcd_open, + .llseek = default_llseek, }; static struct miscdevice anslcd_dev = { diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 2d17e76066b..44d171cc225 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -2398,6 +2398,7 @@ static const struct file_operations pmu_device_fops = { #endif .open = pmu_open, .release = pmu_release, + .llseek = noop_llseek, }; static struct miscdevice pmu_device = { diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 3e39193e503..4b54618b415 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1596,6 +1596,7 @@ static const struct file_operations _ctl_fops = { .unlocked_ioctl = dm_ctl_ioctl, .compat_ioctl = dm_compat_ctl_ioctl, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice _dm_misc = { diff --git a/drivers/media/IR/imon.c b/drivers/media/IR/imon.c index c185422ef28..faed5a332c7 100644 --- a/drivers/media/IR/imon.c +++ b/drivers/media/IR/imon.c @@ -151,7 +151,8 @@ static const struct file_operations vfd_fops = { .owner = THIS_MODULE, .open = &display_open, .write = &vfd_write, - .release = &display_close + .release = &display_close, + .llseek = noop_llseek, }; /* lcd character device file operations */ @@ -159,7 +160,8 @@ static const struct file_operations lcd_fops = { .owner = THIS_MODULE, .open = &display_open, .write = &lcd_write, - .release = &display_close + .release = &display_close, + .llseek = noop_llseek, }; enum { diff --git a/drivers/media/IR/lirc_dev.c b/drivers/media/IR/lirc_dev.c index 5b145e8672f..0acf6396e06 100644 --- a/drivers/media/IR/lirc_dev.c +++ b/drivers/media/IR/lirc_dev.c @@ -163,6 +163,7 @@ static struct file_operations fops = { .unlocked_ioctl = lirc_dev_fop_ioctl, .open = lirc_dev_fop_open, .release = lirc_dev_fop_close, + .llseek = noop_llseek, }; static int lirc_cdev_add(struct irctl *ir) diff --git a/drivers/media/dvb/bt8xx/dst_ca.c b/drivers/media/dvb/bt8xx/dst_ca.c index cf870516284..7ed74dfc6b9 100644 --- a/drivers/media/dvb/bt8xx/dst_ca.c +++ b/drivers/media/dvb/bt8xx/dst_ca.c @@ -694,7 +694,8 @@ static const struct file_operations dst_ca_fops = { .open = dst_ca_open, .release = dst_ca_release, .read = dst_ca_read, - .write = dst_ca_write + .write = dst_ca_write, + .llseek = noop_llseek, }; static struct dvb_device dvbdev_ca = { diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c index 0042306ea11..75c20ac82c0 100644 --- a/drivers/media/dvb/dvb-core/dmxdev.c +++ b/drivers/media/dvb/dvb-core/dmxdev.c @@ -1150,6 +1150,7 @@ static const struct file_operations dvb_demux_fops = { .open = dvb_demux_open, .release = dvb_demux_release, .poll = dvb_demux_poll, + .llseek = default_llseek, }; static struct dvb_device dvbdev_demux = { @@ -1225,6 +1226,7 @@ static const struct file_operations dvb_dvr_fops = { .open = dvb_dvr_open, .release = dvb_dvr_release, .poll = dvb_dvr_poll, + .llseek = default_llseek, }; static struct dvb_device dvbdev_dvr = { diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c index cb97e6b8543..ff8921dd737 100644 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c @@ -1628,6 +1628,7 @@ static const struct file_operations dvb_ca_fops = { .open = dvb_ca_en50221_io_open, .release = dvb_ca_en50221_io_release, .poll = dvb_ca_en50221_io_poll, + .llseek = noop_llseek, }; static struct dvb_device dvbdev_ca = { diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c index 4d45b7d6b3f..970c9b8882d 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -2034,7 +2034,8 @@ static const struct file_operations dvb_frontend_fops = { .unlocked_ioctl = dvb_generic_ioctl, .poll = dvb_frontend_poll, .open = dvb_frontend_open, - .release = dvb_frontend_release + .release = dvb_frontend_release, + .llseek = noop_llseek, }; int dvb_register_frontend(struct dvb_adapter* dvb, diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index 6c3a8a06cca..82636f517b9 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c @@ -1475,6 +1475,7 @@ static const struct file_operations dvb_net_fops = { .unlocked_ioctl = dvb_net_ioctl, .open = dvb_generic_open, .release = dvb_net_close, + .llseek = noop_llseek, }; static struct dvb_device dvbdev_net = { diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c index b915c39d782..774b40e4f58 100644 --- a/drivers/media/dvb/dvb-core/dvbdev.c +++ b/drivers/media/dvb/dvb-core/dvbdev.c @@ -105,6 +105,7 @@ static const struct file_operations dvb_device_fops = { .owner = THIS_MODULE, .open = dvb_device_open, + .llseek = noop_llseek, }; static struct cdev dvb_device_cdev; diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c index d3c2cf60de7..8ffb565f070 100644 --- a/drivers/media/dvb/firewire/firedtv-ci.c +++ b/drivers/media/dvb/firewire/firedtv-ci.c @@ -220,6 +220,7 @@ static const struct file_operations fdtv_ca_fops = { .open = dvb_generic_open, .release = dvb_generic_release, .poll = fdtv_ca_io_poll, + .llseek = noop_llseek, }; static struct dvb_device fdtv_ca = { diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c index a6be529eec5..893fbc57c72 100644 --- a/drivers/media/dvb/ttpci/av7110.c +++ b/drivers/media/dvb/ttpci/av7110.c @@ -730,6 +730,7 @@ static const struct file_operations dvb_osd_fops = { .unlocked_ioctl = dvb_generic_ioctl, .open = dvb_generic_open, .release = dvb_generic_release, + .llseek = noop_llseek, }; static struct dvb_device dvbdev_osd = { diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c index 13efba942da..6ef3996565a 100644 --- a/drivers/media/dvb/ttpci/av7110_av.c +++ b/drivers/media/dvb/ttpci/av7110_av.c @@ -1521,6 +1521,7 @@ static const struct file_operations dvb_video_fops = { .open = dvb_video_open, .release = dvb_video_release, .poll = dvb_video_poll, + .llseek = noop_llseek, }; static struct dvb_device dvbdev_video = { @@ -1539,6 +1540,7 @@ static const struct file_operations dvb_audio_fops = { .open = dvb_audio_open, .release = dvb_audio_release, .poll = dvb_audio_poll, + .llseek = noop_llseek, }; static struct dvb_device dvbdev_audio = { diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c index 4eba35a018e..43f61f2eca9 100644 --- a/drivers/media/dvb/ttpci/av7110_ca.c +++ b/drivers/media/dvb/ttpci/av7110_ca.c @@ -353,6 +353,7 @@ static const struct file_operations dvb_ca_fops = { .open = dvb_ca_open, .release = dvb_generic_release, .poll = dvb_ca_poll, + .llseek = default_llseek, }; static struct dvb_device dvbdev_ca = { diff --git a/drivers/media/dvb/ttpci/av7110_ir.c b/drivers/media/dvb/ttpci/av7110_ir.c index b070e88d8c6..908f272fe26 100644 --- a/drivers/media/dvb/ttpci/av7110_ir.c +++ b/drivers/media/dvb/ttpci/av7110_ir.c @@ -312,6 +312,7 @@ static ssize_t av7110_ir_proc_write(struct file *file, const char __user *buffer static const struct file_operations av7110_ir_proc_fops = { .owner = THIS_MODULE, .write = av7110_ir_proc_write, + .llseek = noop_llseek, }; /* interrupt handler */ diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index 66379b41390..b048ecc56db 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -583,6 +583,7 @@ static ssize_t ab3100_get_set_reg(struct file *file, static const struct file_operations ab3100_get_set_reg_fops = { .open = ab3100_get_set_reg_open_file, .write = ab3100_get_set_reg, + .llseek = noop_llseek, }; static struct dentry *ab3100_dir; diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c index 557a8c2a733..69c1f2fca14 100644 --- a/drivers/misc/hpilo.c +++ b/drivers/misc/hpilo.c @@ -640,6 +640,7 @@ static const struct file_operations ilo_fops = { .poll = ilo_poll, .open = ilo_open, .release = ilo_close, + .llseek = noop_llseek, }; static irqreturn_t ilo_isr(int irq, void *data) diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c index 75ee0d3f6f4..6b38b596429 100644 --- a/drivers/misc/phantom.c +++ b/drivers/misc/phantom.c @@ -279,6 +279,7 @@ static const struct file_operations phantom_file_ops = { .unlocked_ioctl = phantom_ioctl, .compat_ioctl = phantom_compat_ioctl, .poll = phantom_poll, + .llseek = no_llseek, }; static irqreturn_t phantom_isr(int irq, void *data) diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index cb3b4d22847..28852dfa310 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -587,6 +587,7 @@ static const struct file_operations gru_fops = { .owner = THIS_MODULE, .unlocked_ioctl = gru_file_unlocked_ioctl, .mmap = gru_file_mmap, + .llseek = noop_llseek, }; static struct miscdevice gru_miscdev = { diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 53cb380c098..46bc6d7551a 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -245,6 +245,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = { .open = mmc_ext_csd_open, .read = mmc_ext_csd_read, .release = mmc_ext_csd_release, + .llseek = default_llseek, }; void mmc_add_card_debugfs(struct mmc_card *card) diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 3d2d1a69e9a..af9fb0ff821 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -1100,4 +1100,5 @@ const struct file_operations ubi_ctrl_cdev_operations = { .owner = THIS_MODULE, .unlocked_ioctl = ctrl_cdev_ioctl, .compat_ioctl = ctrl_cdev_compat_ioctl, + .llseek = noop_llseek, }; diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index f5058ff2b21..8427533fe31 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -240,13 +240,15 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf, static const struct file_operations dbgfs_state_fops = { .open = dbgfs_open, .read = dbgfs_state, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static const struct file_operations dbgfs_frame_fops = { .open = dbgfs_open, .read = dbgfs_frame, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static inline void dev_debugfs_add(struct cfspi *cfspi) diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index c327527fbbc..e2bf10d90ad 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c @@ -2026,6 +2026,7 @@ static const struct file_operations mem_debugfs_fops = { .owner = THIS_MODULE, .open = mem_open, .read = mem_read, + .llseek = default_llseek, }; static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 6695a51e09e..323e81e9e80 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -856,7 +856,8 @@ static const struct file_operations ppp_device_fops = { .poll = ppp_poll, .unlocked_ioctl = ppp_ioctl, .open = ppp_open, - .release = ppp_release + .release = ppp_release, + .llseek = noop_llseek, }; static __net_init int ppp_init_net(struct net *net) diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c index b1aec3e1892..9c70b5fa3f5 100644 --- a/drivers/net/wimax/i2400m/debugfs.c +++ b/drivers/net/wimax/i2400m/debugfs.c @@ -119,6 +119,7 @@ const struct file_operations i2400m_rx_stats_fops = { .open = i2400m_stats_open, .read = i2400m_rx_stats_read, .write = i2400m_rx_stats_write, + .llseek = default_llseek, }; @@ -171,6 +172,7 @@ const struct file_operations i2400m_tx_stats_fops = { .open = i2400m_stats_open, .read = i2400m_tx_stats_read, .write = i2400m_tx_stats_write, + .llseek = default_llseek, }; diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 1d05445d4ba..ce77575e88b 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -4430,21 +4430,24 @@ static const struct file_operations proc_statsdelta_ops = { .owner = THIS_MODULE, .read = proc_read, .open = proc_statsdelta_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_stats_ops = { .owner = THIS_MODULE, .read = proc_read, .open = proc_stats_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_status_ops = { .owner = THIS_MODULE, .read = proc_read, .open = proc_status_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_SSID_ops = { @@ -4452,7 +4455,8 @@ static const struct file_operations proc_SSID_ops = { .read = proc_read, .write = proc_write, .open = proc_SSID_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_BSSList_ops = { @@ -4460,7 +4464,8 @@ static const struct file_operations proc_BSSList_ops = { .read = proc_read, .write = proc_write, .open = proc_BSSList_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_APList_ops = { @@ -4468,7 +4473,8 @@ static const struct file_operations proc_APList_ops = { .read = proc_read, .write = proc_write, .open = proc_APList_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_config_ops = { @@ -4476,7 +4482,8 @@ static const struct file_operations proc_config_ops = { .read = proc_read, .write = proc_write, .open = proc_config_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_wepkey_ops = { @@ -4484,7 +4491,8 @@ static const struct file_operations proc_wepkey_ops = { .read = proc_read, .write = proc_write, .open = proc_wepkey_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static struct proc_dir_entry *airo_entry; diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index 4cccc29964f..fb339c3852e 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -271,6 +271,7 @@ static const struct file_operations fops_beacon = { .write = write_file_beacon, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -290,6 +291,7 @@ static const struct file_operations fops_reset = { .write = write_file_reset, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = noop_llseek, }; @@ -369,6 +371,7 @@ static const struct file_operations fops_debug = { .write = write_file_debug, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -480,6 +483,7 @@ static const struct file_operations fops_antenna = { .write = write_file_antenna, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -591,6 +595,7 @@ static const struct file_operations fops_frameerrors = { .write = write_file_frameerrors, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -748,6 +753,7 @@ static const struct file_operations fops_ani = { .write = write_file_ani, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -811,6 +817,7 @@ static const struct file_operations fops_queue = { .write = write_file_queue, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = default_llseek, }; diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 54aae931424..cf500bf25ad 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -71,7 +71,8 @@ static const struct file_operations fops_debug = { .read = read_file_debug, .write = write_file_debug, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; #endif @@ -116,7 +117,8 @@ static const struct file_operations fops_tx_chainmask = { .read = read_file_tx_chainmask, .write = write_file_tx_chainmask, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -158,7 +160,8 @@ static const struct file_operations fops_rx_chainmask = { .read = read_file_rx_chainmask, .write = write_file_rx_chainmask, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -259,7 +262,8 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf, static const struct file_operations fops_dma = { .read = read_file_dma, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -375,7 +379,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, static const struct file_operations fops_interrupt = { .read = read_file_interrupt, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; void ath_debug_stat_rc(struct ath_softc *sc, int final_rate) @@ -464,7 +469,8 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf, static const struct file_operations fops_rcstat = { .read = read_file_rcstat, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static const char * ath_wiphy_state_str(enum ath_wiphy_state state) @@ -623,7 +629,8 @@ static const struct file_operations fops_wiphy = { .read = read_file_wiphy, .write = write_file_wiphy, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; #define PR(str, elem) \ @@ -702,7 +709,8 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, static const struct file_operations fops_xmit = { .read = read_file_xmit, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static ssize_t read_file_recv(struct file *file, char __user *user_buf, @@ -814,7 +822,8 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) static const struct file_operations fops_recv = { .read = read_file_recv, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static ssize_t read_file_regidx(struct file *file, char __user *user_buf, @@ -852,7 +861,8 @@ static const struct file_operations fops_regidx = { .read = read_file_regidx, .write = write_file_regidx, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static ssize_t read_file_regval(struct file *file, char __user *user_buf, @@ -894,7 +904,8 @@ static const struct file_operations fops_regval = { .read = read_file_regval, .write = write_file_regval, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; int ath9k_init_debug(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 7d09b4b17bb..bc2ca7d898e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -536,7 +536,8 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, static const struct file_operations fops_tgt_stats = { .read = read_file_tgt_stats, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static ssize_t read_file_xmit(struct file *file, char __user *user_buf, @@ -584,7 +585,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf, static const struct file_operations fops_xmit = { .read = read_file_xmit, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static ssize_t read_file_recv(struct file *file, char __user *user_buf, @@ -613,7 +615,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, static const struct file_operations fops_recv = { .read = read_file_recv, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; int ath9k_htc_init_debug(struct ath_hw *ah) diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c index 8e84a08ff95..293e1dbc166 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c @@ -873,6 +873,7 @@ static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file, static const struct file_operations rs_sta_dbgfs_stats_table_ops = { .read = iwl3945_sta_dbgfs_stats_table_read, .open = iwl3945_open_file_generic, + .llseek = default_llseek, }; static void iwl3945_add_debugfs(void *priv, void *priv_sta, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 23e5c42e7d7..a4378ba31ef 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -2873,6 +2873,7 @@ static const struct file_operations rs_sta_dbgfs_scale_table_ops = { .write = rs_sta_dbgfs_scale_table_write, .read = rs_sta_dbgfs_scale_table_read, .open = open_file_generic, + .llseek = default_llseek, }; static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -2915,6 +2916,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, static const struct file_operations rs_sta_dbgfs_stats_table_ops = { .read = rs_sta_dbgfs_stats_table_read, .open = open_file_generic, + .llseek = default_llseek, }; static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file, @@ -2946,6 +2948,7 @@ static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file, static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { .read = rs_sta_dbgfs_rate_scale_data_read, .open = open_file_generic, + .llseek = default_llseek, }; static void rs_add_debugfs(void *priv, void *priv_sta, diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c index 53b0b7711f0..0a0cc9667cd 100644 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c @@ -402,24 +402,28 @@ static const struct file_operations iwm_debugfs_txq_fops = { .owner = THIS_MODULE, .open = iwm_generic_open, .read = iwm_debugfs_txq_read, + .llseek = default_llseek, }; static const struct file_operations iwm_debugfs_tx_credit_fops = { .owner = THIS_MODULE, .open = iwm_generic_open, .read = iwm_debugfs_tx_credit_read, + .llseek = default_llseek, }; static const struct file_operations iwm_debugfs_rx_ticket_fops = { .owner = THIS_MODULE, .open = iwm_generic_open, .read = iwm_debugfs_rx_ticket_read, + .llseek = default_llseek, }; static const struct file_operations iwm_debugfs_fw_err_fops = { .owner = THIS_MODULE, .open = iwm_generic_open, .read = iwm_debugfs_fw_err_read, + .llseek = default_llseek, }; void iwm_debugfs_init(struct iwm_priv *iwm) diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c index edcb52330cf..56383e7be83 100644 --- a/drivers/net/wireless/iwmc3200wifi/sdio.c +++ b/drivers/net/wireless/iwmc3200wifi/sdio.c @@ -364,6 +364,7 @@ static const struct file_operations iwm_debugfs_sdio_fops = { .owner = THIS_MODULE, .open = iwm_debugfs_sdio_open, .read = iwm_debugfs_sdio_read, + .llseek = default_llseek, }; static void if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir) diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index 74e94cc10e0..fbf3b0332bb 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -962,6 +962,7 @@ static const struct file_operations lbs_debug_fops = { .open = open_file_generic, .write = lbs_debugfs_write, .read = lbs_debugfs_read, + .llseek = default_llseek, }; /** diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 88560d0ae50..dab30a8c747 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -2802,6 +2802,7 @@ static ssize_t ray_cs_essid_proc_write(struct file *file, static const struct file_operations ray_cs_essid_proc_fops = { .owner = THIS_MODULE, .write = ray_cs_essid_proc_write, + .llseek = noop_llseek, }; static ssize_t int_proc_write(struct file *file, const char __user *buffer, @@ -2835,6 +2836,7 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer, static const struct file_operations int_proc_fops = { .owner = THIS_MODULE, .write = int_proc_write, + .llseek = noop_llseek, }; #endif diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c index 7d6f19a2805..cea81e4c5c8 100644 --- a/drivers/net/wireless/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/rt2x00/rt2x00debug.c @@ -315,6 +315,7 @@ static const struct file_operations rt2x00debug_fop_queue_dump = { .poll = rt2x00debug_poll_queue_dump, .open = rt2x00debug_open_queue_dump, .release = rt2x00debug_release_queue_dump, + .llseek = default_llseek, }; static ssize_t rt2x00debug_read_queue_stats(struct file *file, @@ -371,6 +372,7 @@ static const struct file_operations rt2x00debug_fop_queue_stats = { .read = rt2x00debug_read_queue_stats, .open = rt2x00debug_file_open, .release = rt2x00debug_file_release, + .llseek = default_llseek, }; #ifdef CONFIG_RT2X00_LIB_CRYPTO @@ -423,6 +425,7 @@ static const struct file_operations rt2x00debug_fop_crypto_stats = { .read = rt2x00debug_read_crypto_stats, .open = rt2x00debug_file_open, .release = rt2x00debug_file_release, + .llseek = default_llseek, }; #endif @@ -543,6 +546,7 @@ static const struct file_operations rt2x00debug_fop_dev_flags = { .read = rt2x00debug_read_dev_flags, .open = rt2x00debug_file_open, .release = rt2x00debug_file_release, + .llseek = default_llseek, }; static struct dentry *rt2x00debug_create_file_driver(const char *name, diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c index a4ae7c4d94b..fa620a5e530 100644 --- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c +++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c @@ -238,6 +238,7 @@ static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf, static const struct file_operations tx_queue_len_ops = { .read = tx_queue_len_read, .open = wl1251_open_file_generic, + .llseek = generic_file_llseek, }; static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf, @@ -259,6 +260,7 @@ static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf, static const struct file_operations tx_queue_status_ops = { .read = tx_queue_status_read, .open = wl1251_open_file_generic, + .llseek = generic_file_llseek, }; static void wl1251_debugfs_delete_files(struct wl1251 *wl) diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c index 6e25303a8e7..66c2b90ddfd 100644 --- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c +++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c @@ -239,6 +239,7 @@ static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf, static const struct file_operations tx_queue_len_ops = { .read = tx_queue_len_read, .open = wl1271_open_file_generic, + .llseek = default_llseek, }; static ssize_t gpio_power_read(struct file *file, char __user *user_buf, @@ -293,7 +294,8 @@ out: static const struct file_operations gpio_power_ops = { .read = gpio_power_read, .write = gpio_power_write, - .open = wl1271_open_file_generic + .open = wl1271_open_file_generic, + .llseek = default_llseek, }; static void wl1271_debugfs_delete_files(struct wl1271 *wl) diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c index bbd7516e086..9bb4d4bdc23 100644 --- a/drivers/oprofile/oprofile_files.c +++ b/drivers/oprofile/oprofile_files.c @@ -59,6 +59,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf, static const struct file_operations timeout_fops = { .read = timeout_read, .write = timeout_write, + .llseek = default_llseek, }; #endif @@ -93,7 +94,8 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou static const struct file_operations depth_fops = { .read = depth_read, - .write = depth_write + .write = depth_write, + .llseek = default_llseek, }; @@ -105,6 +107,7 @@ static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t cou static const struct file_operations pointer_size_fops = { .read = pointer_size_read, + .llseek = default_llseek, }; @@ -116,6 +119,7 @@ static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, static const struct file_operations cpu_type_fops = { .read = cpu_type_read, + .llseek = default_llseek, }; @@ -151,6 +155,7 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co static const struct file_operations enable_fops = { .read = enable_read, .write = enable_write, + .llseek = default_llseek, }; @@ -163,6 +168,7 @@ static ssize_t dump_write(struct file *file, char const __user *buf, size_t coun static const struct file_operations dump_fops = { .write = dump_write, + .llseek = noop_llseek, }; void oprofile_create_files(struct super_block *sb, struct dentry *root) diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index 2766a6d3c2e..6b6a1f71957 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c @@ -117,12 +117,14 @@ static const struct file_operations ulong_fops = { .read = ulong_read_file, .write = ulong_write_file, .open = default_open, + .llseek = default_llseek, }; static const struct file_operations ulong_ro_fops = { .read = ulong_read_file, .open = default_open, + .llseek = default_llseek, }; @@ -183,6 +185,7 @@ static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t coun static const struct file_operations atomic_ro_fops = { .read = atomic_read_file, .open = default_open, + .llseek = default_llseek, }; diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index 909924692b8..b3cf6223f63 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c @@ -472,6 +472,7 @@ static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf, static const struct file_operations aer_inject_fops = { .write = aer_inject_write, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice aer_inject_device = { diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index e3154ff7a39..f200677851b 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -2360,6 +2360,7 @@ static const struct file_operations sonypi_misc_fops = { .release = sonypi_misc_release, .fasync = sonypi_misc_fasync, .unlocked_ioctl = sonypi_misc_ioctl, + .llseek = noop_llseek, }; static struct miscdevice sonypi_misc_device = { diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index d60557cae8e..ef151ce0443 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -748,6 +748,7 @@ static const struct file_operations wdt_fops = { .write = wdt_write, .open = wdt_open, .release = wdt_release, + .llseek = no_llseek, }; static struct miscdevice wdt_dev = { diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 7158f9528ec..c71d89dba30 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -670,6 +670,7 @@ static const struct file_operations dasd_eer_fops = { .read = &dasd_eer_read, .poll = &dasd_eer_poll, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice *dasd_eer_dev = NULL; diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 857dfcb7b35..eb28fb01a38 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -520,6 +520,7 @@ static const struct file_operations fs3270_fops = { .compat_ioctl = fs3270_ioctl, /* ioctl */ .open = fs3270_open, /* open */ .release = fs3270_close, /* release */ + .llseek = no_llseek, }; /* diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index e021ec663ef..5b8b8592d31 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -447,6 +447,7 @@ static const struct file_operations mon_fops = { .release = &mon_close, .read = &mon_read, .poll = &mon_poll, + .llseek = noop_llseek, }; static struct miscdevice mon_dev = { diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 572a1e7fd09..e0702d3ea33 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -274,6 +274,7 @@ static const struct file_operations monwrite_fops = { .open = &monwrite_open, .release = &monwrite_close, .write = &monwrite_write, + .llseek = noop_llseek, }; static struct miscdevice mon_dev = { diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 539045acaad..883e2db02bd 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -53,6 +53,7 @@ static const struct file_operations tape_fops = #endif .open = tapechar_open, .release = tapechar_release, + .llseek = no_llseek, }; static int tapechar_major = TAPECHAR_MAJOR; diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 04e532eec03..0e7cb1a8415 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -177,6 +177,7 @@ static const struct file_operations vmcp_fops = { .write = vmcp_write, .unlocked_ioctl = vmcp_ioctl, .compat_ioctl = vmcp_ioctl, + .llseek = no_llseek, }; static struct miscdevice vmcp_dev = { diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index e40a1b89286..0d6dc4b92cc 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -97,6 +97,7 @@ static const struct file_operations vmlogrdr_fops = { .open = vmlogrdr_open, .release = vmlogrdr_release, .read = vmlogrdr_read, + .llseek = no_llseek, }; diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index e13508c98b1..12ef9121d4f 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c @@ -297,6 +297,7 @@ static const struct file_operations vmwdt_fops = { .unlocked_ioctl = &vmwdt_ioctl, .write = &vmwdt_write, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice vmwdt_dev = { diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index f5ea3384a4b..3b94044027c 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -459,6 +459,7 @@ static const struct file_operations zcore_memmap_fops = { .read = zcore_memmap_read, .open = zcore_memmap_open, .release = zcore_memmap_release, + .llseek = no_llseek, }; static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, @@ -486,6 +487,7 @@ static const struct file_operations zcore_reipl_fops = { .write = zcore_reipl_write, .open = zcore_reipl_open, .release = zcore_reipl_release, + .llseek = no_llseek, }; #ifdef CONFIG_32BIT diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index a83877c664a..f2b77e7bfc6 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -806,6 +806,7 @@ static const struct file_operations chsc_fops = { .open = nonseekable_open, .unlocked_ioctl = chsc_ioctl, .compat_ioctl = chsc_ioctl, + .llseek = no_llseek, }; static struct miscdevice chsc_misc_device = { diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index ac94ac75145..ca8e1c240c3 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1067,6 +1067,7 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf, static const struct file_operations cio_settle_proc_fops = { .open = nonseekable_open, .write = cio_settle_write, + .llseek = no_llseek, }; static int __init cio_settle_init(void) diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 41e0aaefafd..f5221749d18 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -897,7 +897,8 @@ static const struct file_operations zcrypt_fops = { .compat_ioctl = zcrypt_compat_ioctl, #endif .open = zcrypt_open, - .release = zcrypt_release + .release = zcrypt_release, + .llseek = no_llseek, }; /* diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index fcbd2b756da..1838cda68ba 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c @@ -251,8 +251,9 @@ static const struct file_operations zfcp_cfdc_fops = { .open = nonseekable_open, .unlocked_ioctl = zfcp_cfdc_dev_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = zfcp_cfdc_dev_ioctl + .compat_ioctl = zfcp_cfdc_dev_ioctl, #endif + .llseek = no_llseek, }; struct miscdevice zfcp_cfdc_misc = { diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 1690e53fb84..55f71ea9c41 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c @@ -162,6 +162,7 @@ static const struct file_operations d7s_fops = { .compat_ioctl = d7s_ioctl, .open = d7s_open, .release = d7s_release, + .llseek = noop_llseek, }; static struct miscdevice d7s_miscdev = { diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 078e5f4520e..8ce414e3948 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c @@ -720,6 +720,7 @@ static const struct file_operations envctrl_fops = { #endif .open = envctrl_open, .release = envctrl_release, + .llseek = noop_llseek, }; static struct miscdevice envctrl_dev = { diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index e20b7bdd4c7..67aad69cfbc 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -222,7 +222,8 @@ static const struct file_operations twa_fops = { .owner = THIS_MODULE, .unlocked_ioctl = twa_chrdev_ioctl, .open = twa_chrdev_open, - .release = NULL + .release = NULL, + .llseek = noop_llseek, }; /* This function will complete an aen request from the isr */ diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index f481e734aad..7afac93d889 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -890,7 +890,8 @@ static const struct file_operations twl_fops = { .owner = THIS_MODULE, .unlocked_ioctl = twl_chrdev_ioctl, .open = twl_chrdev_open, - .release = NULL + .release = NULL, + .llseek = noop_llseek, }; /* This function passes sense data from firmware to scsi layer */ diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 30d735ad35b..5a233730603 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -1059,7 +1059,8 @@ static const struct file_operations tw_fops = { .owner = THIS_MODULE, .unlocked_ioctl = tw_chrdev_ioctl, .open = tw_chrdev_open, - .release = NULL + .release = NULL, + .llseek = noop_llseek, }; /* This function will free up device extension resources */ diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index cad6f9abaeb..13af86eec96 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1039,6 +1039,7 @@ static const struct file_operations aac_cfg_fops = { .compat_ioctl = aac_compat_cfg_ioctl, #endif .open = aac_cfg_open, + .llseek = noop_llseek, }; static struct scsi_host_template aac_driver_template = { diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index d6532187f61..e40c9f7a002 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -981,6 +981,7 @@ static const struct file_operations changer_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = ch_ioctl_compat, #endif + .llseek = noop_llseek, }; static int __init init_ch_module(void) diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index ffc1edf5e80..7a61206ed1c 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -126,6 +126,7 @@ static const struct file_operations adpt_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = compat_adpt_ioctl, #endif + .llseek = noop_llseek, }; /* Structures and definitions for synchronous message posting. diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index b860d650a56..9886586df01 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -372,6 +372,7 @@ static const struct file_operations gdth_fops = { .unlocked_ioctl = gdth_unlocked_ioctl, .open = gdth_open, .release = gdth_close, + .llseek = noop_llseek, }; #include "gdth_proc.h" diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 0b6e3228610..aac80f7bb99 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -101,6 +101,7 @@ static const struct file_operations megadev_fops = { .owner = THIS_MODULE, .unlocked_ioctl = megadev_unlocked_ioctl, .open = megadev_open, + .llseek = noop_llseek, }; /* diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index 41f82f76d88..ab801232d77 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -75,6 +75,7 @@ static const struct file_operations lsi_fops = { .compat_ioctl = mraid_mm_compat_ioctl, #endif .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice megaraid_mm_dev = { diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 99e4478c3f3..209cc87b9a3 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -3957,6 +3957,7 @@ static const struct file_operations megasas_mgmt_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = megasas_mgmt_compat_ioctl, #endif + .llseek = noop_llseek, }; /* diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index b774973f076..9a4e584ae3c 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -2952,6 +2952,7 @@ static const struct file_operations ctl_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = _ctl_ioctl_compat, #endif + .llseek = noop_llseek, }; static struct miscdevice ctl_dev = { diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index ffdd9fdb999..b31a8e3841d 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -182,6 +182,7 @@ static const struct file_operations osd_fops = { .open = osd_uld_open, .release = osd_uld_release, .unlocked_ioctl = osd_uld_ioctl, + .llseek = noop_llseek, }; struct osd_dev *osduld_path_lookup(const char *name) diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index ecc45c8b4e6..4b8765785ae 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -4165,6 +4165,7 @@ static const struct file_operations pmcraid_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = pmcraid_chr_ioctl, #endif + .llseek = noop_llseek, }; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1e4bff69525..9946fac5425 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3948,6 +3948,7 @@ static struct pci_driver qla2xxx_pci_driver = { static struct file_operations apidev_fops = { .owner = THIS_MODULE, + .llseek = noop_llseek, }; /** diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c index a87e21c35ef..bbb02f6e917 100644 --- a/drivers/scsi/scsi_tgt_if.c +++ b/drivers/scsi/scsi_tgt_if.c @@ -333,6 +333,7 @@ static const struct file_operations tgt_fops = { .poll = tgt_poll, .write = tgt_write, .mmap = tgt_mmap, + .llseek = noop_llseek, }; static struct miscdevice tgt_miscdev = { diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 78d616315d8..00baea1e2db 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1351,6 +1351,7 @@ static const struct file_operations sg_fops = { .mmap = sg_mmap, .release = sg_release, .fasync = sg_fasync, + .llseek = no_llseek, }; static struct class *sg_sysfs_class; diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c index bc9af503907..6703f3e802a 100644 --- a/drivers/serial/mfd.c +++ b/drivers/serial/mfd.c @@ -227,12 +227,14 @@ static const struct file_operations port_regs_ops = { .owner = THIS_MODULE, .open = hsu_show_regs_open, .read = port_show_regs, + .llseek = default_llseek, }; static const struct file_operations dma_regs_ops = { .owner = THIS_MODULE, .open = hsu_show_regs_open, .read = dma_show_regs, + .llseek = default_llseek, }; static int hsu_debugfs_init(struct hsu_port *hsu) diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c index d256cb00604..a32aa66d66b 100644 --- a/drivers/spi/dw_spi.c +++ b/drivers/spi/dw_spi.c @@ -131,6 +131,7 @@ static const struct file_operations mrst_spi_regs_ops = { .owner = THIS_MODULE, .open = spi_show_regs_open, .read = spi_show_regs, + .llseek = default_llseek, }; static int mrst_spi_debugfs_init(struct dw_spi *dws) diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index ea1bec3c9a1..4e6245e6799 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -545,6 +545,7 @@ static const struct file_operations spidev_fops = { .unlocked_ioctl = spidev_ioctl, .open = spidev_open, .release = spidev_release, + .llseek = no_llseek, }; /*-------------------------------------------------------------------------*/ diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 14091313ceb..fecb89e8c66 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -1922,6 +1922,7 @@ const struct file_operations comedi_fops = { .mmap = comedi_mmap, .poll = comedi_poll, .fasync = comedi_fasync, + .llseek = noop_llseek, }; struct class *comedi_class; diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c index fbb80f09a3d..af258991fe7 100644 --- a/drivers/staging/crystalhd/crystalhd_lnx.c +++ b/drivers/staging/crystalhd/crystalhd_lnx.c @@ -351,6 +351,7 @@ static const struct file_operations chd_dec_fops = { .unlocked_ioctl = chd_dec_ioctl, .open = chd_dec_open, .release = chd_dec_close, + .llseek = noop_llseek, }; static int __devinit chd_dec_init_chdev(struct crystalhd_adp *adp) diff --git a/drivers/staging/dream/camera/msm_camera.c b/drivers/staging/dream/camera/msm_camera.c index 81bd71fd816..de4ab61efd4 100644 --- a/drivers/staging/dream/camera/msm_camera.c +++ b/drivers/staging/dream/camera/msm_camera.c @@ -1941,6 +1941,7 @@ static const struct file_operations msm_fops_config = { .open = msm_open, .unlocked_ioctl = msm_ioctl_config, .release = msm_release_config, + .llseek = no_llseek, }; static const struct file_operations msm_fops_control = { @@ -1948,6 +1949,7 @@ static const struct file_operations msm_fops_control = { .open = msm_open_control, .unlocked_ioctl = msm_ioctl_control, .release = msm_release_control, + .llseek = no_llseek, }; static const struct file_operations msm_fops_frame = { @@ -1956,6 +1958,7 @@ static const struct file_operations msm_fops_frame = { .unlocked_ioctl = msm_ioctl_frame, .release = msm_release_frame, .poll = msm_poll_frame, + .llseek = no_llseek, }; static int msm_setup_cdev(struct msm_device *msm, diff --git a/drivers/staging/dream/pmem.c b/drivers/staging/dream/pmem.c index 7d6bbadd7fc..3640d1f2376 100644 --- a/drivers/staging/dream/pmem.c +++ b/drivers/staging/dream/pmem.c @@ -180,6 +180,7 @@ const struct file_operations pmem_fops = { .mmap = pmem_mmap, .open = pmem_open, .unlocked_ioctl = pmem_ioctl, + .llseek = noop_llseek, }; static int get_id(struct file *file) @@ -1204,6 +1205,7 @@ static ssize_t debug_read(struct file *file, char __user *buf, size_t count, static struct file_operations debug_fops = { .read = debug_read, .open = debug_open, + .llseek = default_llseek, }; #endif diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c index 8197765aae1..28a6f8da947 100644 --- a/drivers/staging/dream/qdsp5/adsp_driver.c +++ b/drivers/staging/dream/qdsp5/adsp_driver.c @@ -582,6 +582,7 @@ static struct file_operations adsp_fops = { .open = adsp_open, .unlocked_ioctl = adsp_ioctl, .release = adsp_release, + .llseek = no_llseek, }; static void adsp_create(struct adsp_device *adev, const char *name, diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c index a373f352238..45f4c78ab6e 100644 --- a/drivers/staging/dream/qdsp5/audio_aac.c +++ b/drivers/staging/dream/qdsp5/audio_aac.c @@ -1030,6 +1030,7 @@ static struct file_operations audio_aac_fops = { .read = audio_read, .write = audio_write, .unlocked_ioctl = audio_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_aac_misc = { diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c index 07b79d5836e..402bbc13281 100644 --- a/drivers/staging/dream/qdsp5/audio_amrnb.c +++ b/drivers/staging/dream/qdsp5/audio_amrnb.c @@ -841,6 +841,7 @@ static struct file_operations audio_amrnb_fops = { .read = audamrnb_read, .write = audamrnb_write, .unlocked_ioctl = audamrnb_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_amrnb_misc = { diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c index ad989ee8769..24a89264737 100644 --- a/drivers/staging/dream/qdsp5/audio_evrc.c +++ b/drivers/staging/dream/qdsp5/audio_evrc.c @@ -813,6 +813,7 @@ static struct file_operations audio_evrc_fops = { .read = audevrc_read, .write = audevrc_write, .unlocked_ioctl = audevrc_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_evrc_misc = { diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c index 6ae48e72d14..b51fa096074 100644 --- a/drivers/staging/dream/qdsp5/audio_in.c +++ b/drivers/staging/dream/qdsp5/audio_in.c @@ -921,12 +921,14 @@ static struct file_operations audio_fops = { .read = audio_in_read, .write = audio_in_write, .unlocked_ioctl = audio_in_ioctl, + .llseek = noop_llseek, }; static struct file_operations audpre_fops = { .owner = THIS_MODULE, .open = audpre_open, .unlocked_ioctl = audpre_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_in_misc = { diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c index 530e1f35eed..409a19ce603 100644 --- a/drivers/staging/dream/qdsp5/audio_mp3.c +++ b/drivers/staging/dream/qdsp5/audio_mp3.c @@ -948,6 +948,7 @@ static struct file_operations audio_mp3_fops = { .read = audio_read, .write = audio_write, .unlocked_ioctl = audio_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_mp3_misc = { diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c index 76d7fa5667d..d20e8954156 100644 --- a/drivers/staging/dream/qdsp5/audio_out.c +++ b/drivers/staging/dream/qdsp5/audio_out.c @@ -807,12 +807,14 @@ static struct file_operations audio_fops = { .read = audio_read, .write = audio_write, .unlocked_ioctl = audio_ioctl, + .llseek = noop_llseek, }; static struct file_operations audpp_fops = { .owner = THIS_MODULE, .open = audpp_open, .unlocked_ioctl = audpp_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_misc = { diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c index effa96f34fd..911bab416b8 100644 --- a/drivers/staging/dream/qdsp5/audio_qcelp.c +++ b/drivers/staging/dream/qdsp5/audio_qcelp.c @@ -824,6 +824,7 @@ static struct file_operations audio_qcelp_fops = { .read = audqcelp_read, .write = audqcelp_write, .unlocked_ioctl = audqcelp_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_qcelp_misc = { diff --git a/drivers/staging/dream/qdsp5/evlog.h b/drivers/staging/dream/qdsp5/evlog.h index 922ce670a32..e5ab86b9dd7 100644 --- a/drivers/staging/dream/qdsp5/evlog.h +++ b/drivers/staging/dream/qdsp5/evlog.h @@ -123,6 +123,7 @@ static int ev_log_open(struct inode *inode, struct file *file) static const struct file_operations ev_log_ops = { .read = ev_log_read, .open = ev_log_open, + .llseek = default_llseek, }; static int ev_log_init(struct ev_log *log) diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c index 037d7ffb7e6..e0f2f7bca29 100644 --- a/drivers/staging/dream/qdsp5/snd.c +++ b/drivers/staging/dream/qdsp5/snd.c @@ -247,6 +247,7 @@ static struct file_operations snd_fops = { .open = snd_open, .release = snd_release, .unlocked_ioctl = snd_ioctl, + .llseek = noop_llseek, }; struct miscdevice snd_misc = { diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c index 4e52105e607..689099b57fd 100644 --- a/drivers/staging/frontier/alphatrack.c +++ b/drivers/staging/frontier/alphatrack.c @@ -641,6 +641,7 @@ static const struct file_operations usb_alphatrack_fops = { .open = usb_alphatrack_open, .release = usb_alphatrack_release, .poll = usb_alphatrack_poll, + .llseek = no_llseek, }; /* diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c index eed74f0fe0b..3d12c1737ed 100644 --- a/drivers/staging/frontier/tranzport.c +++ b/drivers/staging/frontier/tranzport.c @@ -767,6 +767,7 @@ static const struct file_operations usb_tranzport_fops = { .open = usb_tranzport_open, .release = usb_tranzport_release, .poll = usb_tranzport_poll, + .llseek = no_llseek, }; /* diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c index dd4d87a8bca..92a212f064b 100644 --- a/drivers/staging/iio/industrialio-core.c +++ b/drivers/staging/iio/industrialio-core.c @@ -349,6 +349,7 @@ static const struct file_operations iio_event_chrdev_fileops = { .release = iio_event_chrdev_release, .open = iio_event_chrdev_open, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static void iio_event_dev_release(struct device *dev) diff --git a/drivers/staging/iio/industrialio-ring.c b/drivers/staging/iio/industrialio-ring.c index 6ab578e4f5f..1c5f67253b8 100644 --- a/drivers/staging/iio/industrialio-ring.c +++ b/drivers/staging/iio/industrialio-ring.c @@ -133,6 +133,7 @@ static const struct file_operations iio_ring_fileops = { .release = iio_ring_release, .open = iio_ring_open, .owner = THIS_MODULE, + .llseek = noop_llseek, }; /** diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/lirc/lirc_imon.c index 66493253042..ed5c5fe022c 100644 --- a/drivers/staging/lirc/lirc_imon.c +++ b/drivers/staging/lirc/lirc_imon.c @@ -115,7 +115,8 @@ static const struct file_operations display_fops = { .owner = THIS_MODULE, .open = &display_open, .write = &vfd_write, - .release = &display_close + .release = &display_close, + .llseek = noop_llseek, }; /* diff --git a/drivers/staging/lirc/lirc_it87.c b/drivers/staging/lirc/lirc_it87.c index ec11c0e949a..543c5c3bf90 100644 --- a/drivers/staging/lirc/lirc_it87.c +++ b/drivers/staging/lirc/lirc_it87.c @@ -342,6 +342,7 @@ static const struct file_operations lirc_fops = { .unlocked_ioctl = lirc_ioctl, .open = lirc_open, .release = lirc_close, + .llseek = noop_llseek, }; static int set_use_inc(void *data) diff --git a/drivers/staging/lirc/lirc_sasem.c b/drivers/staging/lirc/lirc_sasem.c index 73166c3f581..8f72a84f34e 100644 --- a/drivers/staging/lirc/lirc_sasem.c +++ b/drivers/staging/lirc/lirc_sasem.c @@ -125,6 +125,7 @@ static const struct file_operations vfd_fops = { .write = &vfd_write, .unlocked_ioctl = &vfd_ioctl, .release = &vfd_close, + .llseek = noop_llseek, }; /* USB Device ID for Sasem USB Control Board */ diff --git a/drivers/staging/memrar/memrar_handler.c b/drivers/staging/memrar/memrar_handler.c index a98b3f1f11e..cfcaa8e5b8e 100644 --- a/drivers/staging/memrar/memrar_handler.c +++ b/drivers/staging/memrar/memrar_handler.c @@ -890,6 +890,7 @@ static const struct file_operations memrar_fops = { .mmap = memrar_mmap, .open = memrar_open, .release = memrar_release, + .llseek = no_llseek, }; static struct miscdevice memrar_miscdev = { diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c index 3221814a856..6885f9a4660 100644 --- a/drivers/staging/panel/panel.c +++ b/drivers/staging/panel/panel.c @@ -1631,6 +1631,7 @@ static const struct file_operations keypad_fops = { .read = keypad_read, /* read */ .open = keypad_open, /* open */ .release = keypad_release, /* close */ + .llseek = default_llseek, }; static struct miscdevice keypad_dev = { diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c index 7ee89492a75..7b3a7d04a10 100644 --- a/drivers/staging/tidspbridge/rmgr/drv_interface.c +++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c @@ -144,6 +144,7 @@ static const struct file_operations bridge_fops = { .release = bridge_release, .unlocked_ioctl = bridge_ioctl, .mmap = bridge_mmap, + .llseek = noop_llseek, }; #ifdef CONFIG_PM diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c index b53deee25d7..5c6239e5aa2 100644 --- a/drivers/telephony/ixj.c +++ b/drivers/telephony/ixj.c @@ -6676,7 +6676,8 @@ static const struct file_operations ixj_fops = .poll = ixj_poll, .unlocked_ioctl = ixj_ioctl, .release = ixj_release, - .fasync = ixj_fasync + .fasync = ixj_fasync, + .llseek = default_llseek, }; static int ixj_linetest(IXJ *j) diff --git a/drivers/telephony/phonedev.c b/drivers/telephony/phonedev.c index f3873f650bb..1915af20117 100644 --- a/drivers/telephony/phonedev.c +++ b/drivers/telephony/phonedev.c @@ -130,6 +130,7 @@ static const struct file_operations phone_fops = { .owner = THIS_MODULE, .open = phone_open, + .llseek = noop_llseek, }; /* diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index bff1afbde5a..4d3a6fd1a15 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -740,6 +740,7 @@ static const struct file_operations uio_fops = { .mmap = uio_mmap, .poll = uio_poll, .fasync = uio_fasync, + .llseek = noop_llseek, }; static int uio_major_init(void) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 094c76b5de1..6ee4451bfe2 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -584,7 +584,8 @@ static const struct file_operations wdm_fops = { .open = wdm_open, .flush = wdm_flush, .release = wdm_release, - .poll = wdm_poll + .poll = wdm_poll, + .llseek = noop_llseek, }; static struct usb_class_driver wdm_class = { diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index e325162859b..9eca4053312 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -1043,6 +1043,7 @@ static const struct file_operations usblp_fops = { .compat_ioctl = usblp_ioctl, .open = usblp_open, .release = usblp_release, + .llseek = noop_llseek, }; static char *usblp_devnode(struct device *dev, mode_t *mode) diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 3e7c1b800eb..6a54634ab82 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -987,6 +987,7 @@ static const struct file_operations fops = { .open = usbtmc_open, .release = usbtmc_release, .unlocked_ioctl = usbtmc_ioctl, + .llseek = default_llseek, }; static struct usb_class_driver usbtmc_class = { diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index f06f5dbc8cd..580bcd39683 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -59,6 +59,7 @@ static int usb_open(struct inode * inode, struct file * file) static const struct file_operations usb_fops = { .owner = THIS_MODULE, .open = usb_open, + .llseek = noop_llseek, }; static struct usb_class { diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c index 53e120208e9..2b98bd26364 100644 --- a/drivers/usb/gadget/f_hid.c +++ b/drivers/usb/gadget/f_hid.c @@ -451,6 +451,7 @@ const struct file_operations f_hidg_fops = { .write = f_hidg_write, .read = f_hidg_read, .poll = f_hidg_poll, + .llseek = noop_llseek, }; static int __init hidg_bind(struct usb_configuration *c, struct usb_function *f) diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index cf241c371a7..327a92a137b 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c @@ -884,7 +884,8 @@ static const struct file_operations printer_io_operations = { .fsync = printer_fsync, .poll = printer_poll, .unlocked_ioctl = printer_ioctl, - .release = printer_close + .release = printer_close, + .llseek = noop_llseek, }; /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index 76b7fd2d838..86afdc73322 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c @@ -369,18 +369,21 @@ static const struct file_operations debug_async_fops = { .open = debug_async_open, .read = debug_output, .release = debug_close, + .llseek = default_llseek, }; static const struct file_operations debug_periodic_fops = { .owner = THIS_MODULE, .open = debug_periodic_open, .read = debug_output, .release = debug_close, + .llseek = default_llseek, }; static const struct file_operations debug_registers_fops = { .owner = THIS_MODULE, .open = debug_registers_open, .read = debug_output, .release = debug_close, + .llseek = default_llseek, }; static const struct file_operations debug_lpm_fops = { .owner = THIS_MODULE, @@ -388,6 +391,7 @@ static const struct file_operations debug_lpm_fops = { .read = debug_lpm_read, .write = debug_lpm_write, .release = debug_lpm_close, + .llseek = noop_llseek, }; static struct dentry *ehci_debug_root; diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c index 36abd2baa3e..d7d34492934 100644 --- a/drivers/usb/host/ohci-dbg.c +++ b/drivers/usb/host/ohci-dbg.c @@ -413,18 +413,21 @@ static const struct file_operations debug_async_fops = { .open = debug_async_open, .read = debug_output, .release = debug_close, + .llseek = default_llseek, }; static const struct file_operations debug_periodic_fops = { .owner = THIS_MODULE, .open = debug_periodic_open, .read = debug_output, .release = debug_close, + .llseek = default_llseek, }; static const struct file_operations debug_registers_fops = { .owner = THIS_MODULE, .open = debug_registers_open, .read = debug_output, .release = debug_close, + .llseek = default_llseek, }; static struct dentry *ohci_debug_root; diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c index e192e8f7c56..575b56c79e9 100644 --- a/drivers/usb/image/mdc800.c +++ b/drivers/usb/image/mdc800.c @@ -963,6 +963,7 @@ static const struct file_operations mdc800_device_ops = .write = mdc800_device_write, .open = mdc800_device_open, .release = mdc800_device_release, + .llseek = noop_llseek, }; diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index 801324af947..44f8b922505 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -679,6 +679,7 @@ static const struct file_operations adu_fops = { .write = adu_write, .open = adu_open, .release = adu_release, + .llseek = noop_llseek, }; /* diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index a54c3cb804c..c6184b4d169 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -105,6 +105,7 @@ static const struct file_operations idmouse_fops = { .read = idmouse_read, .open = idmouse_open, .release = idmouse_release, + .llseek = default_llseek, }; /* class driver information */ diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index bc88c79875a..9b50db25701 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -730,6 +730,7 @@ static const struct file_operations iowarrior_fops = { .open = iowarrior_open, .release = iowarrior_release, .poll = iowarrior_poll, + .llseek = noop_llseek, }; static char *iowarrior_devnode(struct device *dev, mode_t *mode) diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index dd41d871004..edffef64233 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -613,6 +613,7 @@ static const struct file_operations ld_usb_fops = { .open = ld_usb_open, .release = ld_usb_release, .poll = ld_usb_poll, + .llseek = no_llseek, }; /* diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index cc13ae61712..4e23d3841b4 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c @@ -439,6 +439,7 @@ static const struct file_operations usb_rio_fops = { .unlocked_ioctl = ioctl_rio, .open = open_rio, .release = close_rio, + .llseek = noop_llseek, }; static struct usb_class_driver usb_rio_class = { diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c index d00dde19194..51648154bb4 100644 --- a/drivers/usb/misc/usblcd.c +++ b/drivers/usb/misc/usblcd.c @@ -282,6 +282,7 @@ static const struct file_operations lcd_fops = { .open = lcd_open, .unlocked_ioctl = lcd_ioctl, .release = lcd_release, + .llseek = noop_llseek, }; /* diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c index 552679b8dbd..e24ce312307 100644 --- a/drivers/usb/usb-skeleton.c +++ b/drivers/usb/usb-skeleton.c @@ -507,6 +507,7 @@ static const struct file_operations skel_fops = { .open = skel_open, .release = skel_release, .flush = skel_flush, + .llseek = noop_llseek, }; /* diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 29e850a7a2f..c8523ce2e4c 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -869,6 +869,7 @@ static const struct file_operations vhost_net_fops = { .compat_ioctl = vhost_net_compat_ioctl, #endif .open = vhost_net_open, + .llseek = noop_llseek, }; static struct miscdevice vhost_net_misc = { diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index b06647517c0..42e303ff862 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1439,6 +1439,7 @@ static const struct file_operations fb_fops = { #ifdef CONFIG_FB_DEFERRED_IO .fsync = fb_deferred_io_fsync, #endif + .llseek = default_llseek, }; struct class *fb_class; diff --git a/drivers/video/mbx/mbxdebugfs.c b/drivers/video/mbx/mbxdebugfs.c index ecad9652457..12dec7634c5 100644 --- a/drivers/video/mbx/mbxdebugfs.c +++ b/drivers/video/mbx/mbxdebugfs.c @@ -175,36 +175,42 @@ static const struct file_operations sysconf_fops = { .read = sysconf_read_file, .write = write_file_dummy, .open = open_file_generic, + .llseek = default_llseek, }; static const struct file_operations clock_fops = { .read = clock_read_file, .write = write_file_dummy, .open = open_file_generic, + .llseek = default_llseek, }; static const struct file_operations display_fops = { .read = display_read_file, .write = write_file_dummy, .open = open_file_generic, + .llseek = default_llseek, }; static const struct file_operations gsctl_fops = { .read = gsctl_read_file, .write = write_file_dummy, .open = open_file_generic, + .llseek = default_llseek, }; static const struct file_operations sdram_fops = { .read = sdram_read_file, .write = write_file_dummy, .open = open_file_generic, + .llseek = default_llseek, }; static const struct file_operations misc_fops = { .read = misc_read_file, .write = write_file_dummy, .open = open_file_generic, + .llseek = default_llseek, }; static void __devinit mbxfb_debugfs_init(struct fb_info *fbi) diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c index c764c52412e..b2922178359 100644 --- a/drivers/watchdog/ar7_wdt.c +++ b/drivers/watchdog/ar7_wdt.c @@ -267,6 +267,7 @@ static const struct file_operations ar7_wdt_fops = { .unlocked_ioctl = ar7_wdt_ioctl, .open = ar7_wdt_open, .release = ar7_wdt_release, + .llseek = no_llseek, }; static struct miscdevice ar7_wdt_miscdev = { diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index 566343b3c13..51ae4e84f4f 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c @@ -524,6 +524,7 @@ static const struct file_operations cpwd_fops = { .write = cpwd_write, .read = cpwd_read, .release = cpwd_release, + .llseek = no_llseek, }; static int __devinit cpwd_probe(struct platform_device *op, diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c index 59359c9a5e0..726b7df61fd 100644 --- a/drivers/watchdog/ep93xx_wdt.c +++ b/drivers/watchdog/ep93xx_wdt.c @@ -188,6 +188,7 @@ static const struct file_operations ep93xx_wdt_fops = { .unlocked_ioctl = ep93xx_wdt_ioctl, .open = ep93xx_wdt_open, .release = ep93xx_wdt_release, + .llseek = no_llseek, }; static struct miscdevice ep93xx_wdt_miscdev = { diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index 76b58abf445..81e3d610089 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c @@ -258,6 +258,7 @@ static const struct file_operations omap_wdt_fops = { .unlocked_ioctl = omap_wdt_ioctl, .open = omap_wdt_open, .release = omap_wdt_release, + .llseek = no_llseek, }; static int __devinit omap_wdt_probe(struct platform_device *pdev) diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 66e185cfe92..fec6ba3c08a 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -467,6 +467,7 @@ static const struct file_operations evtchn_fops = { .fasync = evtchn_fasync, .open = evtchn_open, .release = evtchn_release, + .llseek = noop_llseek, }; static struct miscdevice evtchn_miscdev = { diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index 78bfab0700b..bd96340063c 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c @@ -35,6 +35,7 @@ static ssize_t capabilities_read(struct file *file, char __user *buf, static const struct file_operations capabilities_file_ops = { .read = capabilities_read, + .llseek = default_llseek, }; static int xenfs_fill_super(struct super_block *sb, void *data, int silent) diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c index 3b39c3752e2..1c1236087f7 100644 --- a/drivers/xen/xenfs/xenbus.c +++ b/drivers/xen/xenfs/xenbus.c @@ -594,4 +594,5 @@ const struct file_operations xenbus_file_ops = { .open = xenbus_file_open, .release = xenbus_file_release, .poll = xenbus_file_poll, + .llseek = no_llseek, }; diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 6d552686c49..6153417caf5 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -29,6 +29,7 @@ static void afs_mntpt_expiry_timed_out(struct work_struct *work); const struct file_operations afs_mntpt_file_operations = { .open = afs_mntpt_open, + .llseek = noop_llseek, }; const struct inode_operations afs_mntpt_inode_operations = { diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index ba4a38b9c22..eff9a419469 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -724,6 +724,7 @@ static const struct file_operations _dev_ioctl_fops = { .unlocked_ioctl = autofs_dev_ioctl, .compat_ioctl = autofs_dev_ioctl_compat, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice _autofs_dev_ioctl_misc = { diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index fd0cc0bf9a4..139fc8083f5 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -576,6 +576,7 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer, static const struct file_operations bm_entry_operations = { .read = bm_entry_read, .write = bm_entry_write, + .llseek = default_llseek, }; /* /register */ @@ -643,6 +644,7 @@ out: static const struct file_operations bm_register_operations = { .write = bm_register_write, + .llseek = noop_llseek, }; /* /status */ @@ -680,6 +682,7 @@ static ssize_t bm_status_write(struct file * file, const char __user * buffer, static const struct file_operations bm_status_operations = { .read = bm_status_read, .write = bm_status_write, + .llseek = default_llseek, }; /* Superblock handling */ diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 1776dbd8dc9..144f8a5730f 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -815,6 +815,7 @@ static const struct file_operations btrfs_ctl_fops = { .unlocked_ioctl = btrfs_control_ioctl, .compat_ioctl = btrfs_control_ioctl, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice btrfs_misc = { diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index 727caedcdd9..0a1467b1551 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -55,6 +55,7 @@ const struct file_operations cachefiles_daemon_fops = { .read = cachefiles_daemon_read, .write = cachefiles_daemon_write, .poll = cachefiles_daemon_poll, + .llseek = noop_llseek, }; struct cachefiles_daemon_cmd { diff --git a/fs/char_dev.c b/fs/char_dev.c index f80a4f25123..e4a3318b812 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -454,6 +454,7 @@ static void cdev_purge(struct cdev *cdev) */ const struct file_operations def_chr_fops = { .open = chrdev_open, + .llseek = noop_llseek, }; static struct kobject *exact_match(dev_t dev, int *part, void *data) diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c index ca25d96d45c..028a9a0f588 100644 --- a/fs/coda/pioctl.c +++ b/fs/coda/pioctl.c @@ -39,6 +39,7 @@ const struct inode_operations coda_ioctl_inode_operations = { const struct file_operations coda_ioctl_operations = { .owner = THIS_MODULE, .unlocked_ioctl = coda_pioctl, + .llseek = noop_llseek, }; /* the coda pioctl inode ops */ diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c index de89645777c..9fa280bfcff 100644 --- a/fs/coda/psdev.c +++ b/fs/coda/psdev.c @@ -346,6 +346,7 @@ static const struct file_operations coda_psdev_fops = { .unlocked_ioctl = coda_psdev_ioctl, .open = coda_psdev_open, .release = coda_psdev_release, + .llseek = noop_llseek, }; static int init_coda_psdev(void) diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 0210898458b..89d394d8fe2 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -43,6 +43,7 @@ const struct file_operations debugfs_file_operations = { .read = default_read_file, .write = default_write_file, .open = default_open, + .llseek = noop_llseek, }; static void *debugfs_follow_link(struct dentry *dentry, struct nameidata *nd) @@ -454,6 +455,7 @@ static const struct file_operations fops_bool = { .read = read_file_bool, .write = write_file_bool, .open = default_open, + .llseek = default_llseek, }; /** @@ -498,6 +500,7 @@ static ssize_t read_file_blob(struct file *file, char __user *user_buf, static const struct file_operations fops_blob = { .read = read_file_blob, .open = default_open, + .llseek = default_llseek, }; /** diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index c6cf2515874..6b42ba807df 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -643,7 +643,8 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf, static const struct file_operations waiters_fops = { .owner = THIS_MODULE, .open = waiters_open, - .read = waiters_read + .read = waiters_read, + .llseek = default_llseek, }; void dlm_delete_debug_file(struct dlm_ls *ls) diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index d45c02db694..30d8b85febb 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -412,7 +412,8 @@ static const struct file_operations dev_fops = { .read = dev_read, .write = dev_write, .poll = dev_poll, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice plock_dev_misc = { diff --git a/fs/dlm/user.c b/fs/dlm/user.c index b6272853130..66d6c16bf44 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -1009,6 +1009,7 @@ static const struct file_operations device_fops = { .write = device_write, .poll = device_poll, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static const struct file_operations ctl_device_fops = { @@ -1017,6 +1018,7 @@ static const struct file_operations ctl_device_fops = { .read = device_read, .write = device_write, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice ctl_device = { @@ -1029,6 +1031,7 @@ static const struct file_operations monitor_device_fops = { .open = monitor_device_open, .release = monitor_device_close, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice monitor_device = { diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 622c9514080..86e9da1c787 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -332,6 +332,7 @@ const struct file_operations ecryptfs_dir_fops = { .fsync = ecryptfs_fsync, .fasync = ecryptfs_fasync, .splice_read = generic_file_splice_read, + .llseek = default_llseek, }; const struct file_operations ecryptfs_main_fops = { diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c index 00208c3d7e9..940a82e63dc 100644 --- a/fs/ecryptfs/miscdev.c +++ b/fs/ecryptfs/miscdev.c @@ -482,6 +482,7 @@ static const struct file_operations ecryptfs_miscdev_fops = { .read = ecryptfs_miscdev_read, .write = ecryptfs_miscdev_write, .release = ecryptfs_miscdev_release, + .llseek = noop_llseek, }; static struct miscdevice ecryptfs_miscdev = { diff --git a/fs/eventfd.c b/fs/eventfd.c index 6bd3f76fdf8..e0194b3e14d 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -293,6 +293,7 @@ static const struct file_operations eventfd_fops = { .poll = eventfd_poll, .read = eventfd_read, .write = eventfd_write, + .llseek = noop_llseek, }; /** diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 3817149919c..256bb7bb102 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -674,7 +674,8 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) /* File callbacks that implement the eventpoll file behaviour */ static const struct file_operations eventpoll_fops = { .release = ep_eventpoll_release, - .poll = ep_eventpoll_poll + .poll = ep_eventpoll_poll, + .llseek = noop_llseek, }; /* Fast test to see if the file is an evenpoll file */ diff --git a/fs/fifo.c b/fs/fifo.c index 5d6606ffc2d..4e303c22d5e 100644 --- a/fs/fifo.c +++ b/fs/fifo.c @@ -151,4 +151,5 @@ err_nocleanup: */ const struct file_operations def_fifo_fops = { .open = fifo_open, /* will set read_ or write_pipefifo_fops */ + .llseek = noop_llseek, }; diff --git a/fs/fuse/control.c b/fs/fuse/control.c index 3773fd63d2f..7367e177186 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -179,23 +179,27 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file, static const struct file_operations fuse_ctl_abort_ops = { .open = nonseekable_open, .write = fuse_conn_abort_write, + .llseek = no_llseek, }; static const struct file_operations fuse_ctl_waiting_ops = { .open = nonseekable_open, .read = fuse_conn_waiting_read, + .llseek = no_llseek, }; static const struct file_operations fuse_conn_max_background_ops = { .open = nonseekable_open, .read = fuse_conn_max_background_read, .write = fuse_conn_max_background_write, + .llseek = no_llseek, }; static const struct file_operations fuse_conn_congestion_threshold_ops = { .open = nonseekable_open, .read = fuse_conn_congestion_threshold_read, .write = fuse_conn_congestion_threshold_write, + .llseek = no_llseek, }; static struct dentry *fuse_ctl_add_dentry(struct dentry *parent, diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index e1f8171278b..3e87cce5837 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -182,6 +182,7 @@ static const struct file_operations cuse_frontend_fops = { .unlocked_ioctl = cuse_file_ioctl, .compat_ioctl = cuse_file_compat_ioctl, .poll = fuse_file_poll, + .llseek = noop_llseek, }; diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 4edd662c823..55d25a68b49 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -771,6 +771,7 @@ const struct file_operations gfs2_dir_fops = { .fsync = gfs2_fsync, .lock = gfs2_lock, .flock = gfs2_flock, + .llseek = default_llseek, }; #endif /* CONFIG_GFS2_FS_LOCKING_DLM */ @@ -797,5 +798,6 @@ const struct file_operations gfs2_dir_fops_nolock = { .open = gfs2_open, .release = gfs2_close, .fsync = gfs2_fsync, + .llseek = default_llseek, }; diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index 7b027720d82..4e2a45ea614 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -598,6 +598,7 @@ static const struct file_operations hppfs_dir_fops = { .readdir = hppfs_readdir, .open = hppfs_dir_open, .fsync = hppfs_fsync, + .llseek = default_llseek, }; static int hppfs_statfs(struct dentry *dentry, struct kstatfs *sf) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 6e5bd42f386..113eba3d3c3 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -674,6 +674,7 @@ const struct file_operations hugetlbfs_file_operations = { .mmap = hugetlbfs_file_mmap, .fsync = noop_fsync, .get_unmapped_area = hugetlb_get_unmapped_area, + .llseek = default_llseek, }; static const struct inode_operations hugetlbfs_dir_inode_operations = { diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index 9777eb5b552..1eb4e89e045 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c @@ -827,4 +827,5 @@ const struct file_operations logfs_dir_fops = { .unlocked_ioctl = logfs_ioctl, .readdir = logfs_readdir, .read = generic_read_dir, + .llseek = default_llseek, }; diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index b53b1d042f1..06fa87e52e8 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -137,6 +137,7 @@ static const struct file_operations transaction_ops = { .write = nfsctl_transaction_write, .read = nfsctl_transaction_read, .release = simple_transaction_release, + .llseek = default_llseek, }; static int exports_open(struct inode *inode, struct file *file) diff --git a/fs/no-block.c b/fs/no-block.c index d269a93d346..6e40e42a43d 100644 --- a/fs/no-block.c +++ b/fs/no-block.c @@ -19,4 +19,5 @@ static int no_blkdev_open(struct inode * inode, struct file * filp) const struct file_operations def_blk_fops = { .open = no_blkdev_open, + .llseek = noop_llseek, }; diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 5ed8e58d7bf..bbcb98e7fcc 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -433,6 +433,7 @@ static const struct file_operations fanotify_fops = { .release = fanotify_release, .unlocked_ioctl = fanotify_ioctl, .compat_ioctl = fanotify_ioctl, + .llseek = noop_llseek, }; static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index bf7f6d776c3..24edc1185d5 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -344,6 +344,7 @@ static const struct file_operations inotify_fops = { .release = inotify_release, .unlocked_ioctl = inotify_ioctl, .compat_ioctl = inotify_ioctl, + .llseek = noop_llseek, }; diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index c2903b84bb7..a7ebd9d42dc 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -612,6 +612,7 @@ static const struct file_operations dlmfs_file_operations = { .poll = dlmfs_file_poll, .read = dlmfs_file_read, .write = dlmfs_file_write, + .llseek = default_llseek, }; static const struct inode_operations dlmfs_dir_inode_operations = { diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index 2dc57bca068..0dbc6dae1de 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -628,6 +628,7 @@ static const struct file_operations ocfs2_control_fops = { .read = ocfs2_control_read, .write = ocfs2_control_write, .owner = THIS_MODULE, + .llseek = default_llseek, }; static struct miscdevice ocfs2_control_device = { diff --git a/fs/proc/base.c b/fs/proc/base.c index a1c43e7c8a7..bc307d7a5b7 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1151,6 +1151,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, static const struct file_operations proc_oom_score_adj_operations = { .read = oom_score_adj_read, .write = oom_score_adj_write, + .llseek = default_llseek, }; #ifdef CONFIG_AUDITSYSCALL @@ -2039,11 +2040,13 @@ static ssize_t proc_fdinfo_read(struct file *file, char __user *buf, static const struct file_operations proc_fdinfo_file_operations = { .open = nonseekable_open, .read = proc_fdinfo_read, + .llseek = no_llseek, }; static const struct file_operations proc_fd_operations = { .read = generic_read_dir, .readdir = proc_readfd, + .llseek = default_llseek, }; /* @@ -2112,6 +2115,7 @@ static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir) static const struct file_operations proc_fdinfo_operations = { .read = generic_read_dir, .readdir = proc_readfdinfo, + .llseek = default_llseek, }; /* @@ -2343,6 +2347,7 @@ static int proc_attr_dir_readdir(struct file * filp, static const struct file_operations proc_attr_dir_operations = { .read = generic_read_dir, .readdir = proc_attr_dir_readdir, + .llseek = default_llseek, }; static struct dentry *proc_attr_dir_lookup(struct inode *dir, @@ -2751,6 +2756,7 @@ static int proc_tgid_base_readdir(struct file * filp, static const struct file_operations proc_tgid_base_operations = { .read = generic_read_dir, .readdir = proc_tgid_base_readdir, + .llseek = default_llseek, }; static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){ @@ -3088,6 +3094,7 @@ static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *den static const struct file_operations proc_tid_base_operations = { .read = generic_read_dir, .readdir = proc_tid_base_readdir, + .llseek = default_llseek, }; static const struct inode_operations proc_tid_base_inode_operations = { @@ -3324,4 +3331,5 @@ static const struct inode_operations proc_task_inode_operations = { static const struct file_operations proc_task_operations = { .read = generic_read_dir, .readdir = proc_task_readdir, + .llseek = default_llseek, }; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 5be436ea088..2fc52552271 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -364,6 +364,7 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct static const struct file_operations proc_sys_file_operations = { .read = proc_sys_read, .write = proc_sys_write, + .llseek = default_llseek, }; static const struct file_operations proc_sys_dir_file_operations = { diff --git a/fs/proc/root.c b/fs/proc/root.c index 4258384ed22..93d99b31632 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -179,6 +179,7 @@ static int proc_root_readdir(struct file * filp, static const struct file_operations proc_root_operations = { .read = generic_read_dir, .readdir = proc_root_readdir, + .llseek = default_llseek, }; /* diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 271afc48b9a..aa56920df33 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -539,6 +539,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, const struct file_operations proc_clear_refs_operations = { .write = clear_refs_write, + .llseek = noop_llseek, }; struct pagemapread { diff --git a/fs/romfs/super.c b/fs/romfs/super.c index 42d21354689..268580535c9 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c @@ -282,6 +282,7 @@ error: static const struct file_operations romfs_dir_operations = { .read = generic_read_dir, .readdir = romfs_readdir, + .llseek = default_llseek, }; static const struct inode_operations romfs_dir_inode_operations = { diff --git a/fs/signalfd.c b/fs/signalfd.c index 1c5a6add779..74047304b01 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -206,6 +206,7 @@ static const struct file_operations signalfd_fops = { .release = signalfd_release, .poll = signalfd_poll, .read = signalfd_read, + .llseek = noop_llseek, }; SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask, diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c index 12b933ac658..0dc340aa2be 100644 --- a/fs/squashfs/dir.c +++ b/fs/squashfs/dir.c @@ -230,5 +230,6 @@ failed_read: const struct file_operations squashfs_dir_ops = { .read = generic_read_dir, - .readdir = squashfs_readdir + .readdir = squashfs_readdir, + .llseek = default_llseek, }; diff --git a/fs/timerfd.c b/fs/timerfd.c index b86ab8eff79..8c4fc1425b3 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -144,6 +144,7 @@ static const struct file_operations timerfd_fops = { .release = timerfd_release, .poll = timerfd_poll, .read = timerfd_read, + .llseek = noop_llseek, }; static struct file *timerfd_fget(int fd) diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index c2a68baa782..c6c553fd0b3 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2625,6 +2625,7 @@ static const struct file_operations dfs_fops = { .open = open_debugfs_file, .write = write_debugfs_file, .owner = THIS_MODULE, + .llseek = default_llseek, }; /** diff --git a/ipc/mqueue.c b/ipc/mqueue.c index c60e519e291..e1e7b9635f5 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -1219,6 +1219,7 @@ static const struct file_operations mqueue_file_operations = { .flush = mqueue_flush_file, .poll = mqueue_poll_file, .read = mqueue_read_file, + .llseek = default_llseek, }; static const struct super_operations mqueue_super_ops = { diff --git a/ipc/shm.c b/ipc/shm.c index 52ed77eb971..7bc46a9fe1f 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -298,6 +298,7 @@ static const struct file_operations shm_file_operations = { #ifndef CONFIG_MMU .get_unmapped_area = shm_get_unmapped_area, #endif + .llseek = noop_llseek, }; static const struct file_operations shm_file_operations_huge = { @@ -305,6 +306,7 @@ static const struct file_operations shm_file_operations_huge = { .fsync = shm_fsync, .release = shm_release, .get_unmapped_area = shm_get_unmapped_area, + .llseek = noop_llseek, }; int is_file_shm_hugepages(struct file *file) diff --git a/kernel/configs.c b/kernel/configs.c index abaee684ecb..b4066b44a99 100644 --- a/kernel/configs.c +++ b/kernel/configs.c @@ -66,6 +66,7 @@ ikconfig_read_current(struct file *file, char __user *buf, static const struct file_operations ikconfig_file_ops = { .owner = THIS_MODULE, .read = ikconfig_read_current, + .llseek = default_llseek, }; static int __init ikconfig_init(void) diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index f83972b1656..9bd0934f6c3 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c @@ -561,6 +561,7 @@ static ssize_t reset_read(struct file *file, char __user *addr, size_t len, static const struct file_operations gcov_reset_fops = { .write = reset_write, .read = reset_read, + .llseek = noop_llseek, }; /* diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 282035f3ae9..8b5ff2655ae 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1992,6 +1992,7 @@ static ssize_t write_enabled_file_bool(struct file *file, static const struct file_operations fops_kp = { .read = read_enabled_file_bool, .write = write_enabled_file_bool, + .llseek = default_llseek, }; static int __kprobes debugfs_kprobe_init(void) diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index 645e541a45f..a96b850ba08 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c @@ -110,6 +110,7 @@ static const struct file_operations pm_qos_power_fops = { .write = pm_qos_power_write, .open = pm_qos_power_open, .release = pm_qos_power_release, + .llseek = noop_llseek, }; /* unlocked internal variant */ diff --git a/kernel/profile.c b/kernel/profile.c index b22a899934c..66f841b7fbd 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -555,6 +555,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf, static const struct file_operations proc_profile_operations = { .read = read_profile, .write = write_profile, + .llseek = default_llseek, }; #ifdef CONFIG_SMP diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 959f8d6c8cc..2d5f3a75731 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -326,6 +326,7 @@ static const struct file_operations blk_dropped_fops = { .owner = THIS_MODULE, .open = blk_dropped_open, .read = blk_dropped_read, + .llseek = default_llseek, }; static int blk_msg_open(struct inode *inode, struct file *filp) @@ -365,6 +366,7 @@ static const struct file_operations blk_msg_fops = { .owner = THIS_MODULE, .open = blk_msg_open, .write = blk_msg_write, + .llseek = noop_llseek, }; /* diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fa7ece649fe..5e1ad476309 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -800,6 +800,7 @@ static const struct file_operations ftrace_profile_fops = { .open = tracing_open_generic, .read = ftrace_profile_read, .write = ftrace_profile_write, + .llseek = default_llseek, }; /* used to initialize the real stat files */ @@ -2632,6 +2633,7 @@ static const struct file_operations ftrace_graph_fops = { .read = seq_read, .write = ftrace_graph_write, .release = ftrace_graph_release, + .llseek = seq_lseek, }; #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 492197e2f86..3aea966d16d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3965,6 +3965,7 @@ static const struct file_operations rb_simple_fops = { .open = tracing_open_generic, .read = rb_simple_read, .write = rb_simple_write, + .llseek = default_llseek, }; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 4c758f14632..0369c5e0998 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -951,6 +951,7 @@ static const struct file_operations ftrace_enable_fops = { .open = tracing_open_generic, .read = event_enable_read, .write = event_enable_write, + .llseek = default_llseek, }; static const struct file_operations ftrace_event_format_fops = { @@ -963,29 +964,34 @@ static const struct file_operations ftrace_event_format_fops = { static const struct file_operations ftrace_event_id_fops = { .open = tracing_open_generic, .read = event_id_read, + .llseek = default_llseek, }; static const struct file_operations ftrace_event_filter_fops = { .open = tracing_open_generic, .read = event_filter_read, .write = event_filter_write, + .llseek = default_llseek, }; static const struct file_operations ftrace_subsystem_filter_fops = { .open = tracing_open_generic, .read = subsystem_filter_read, .write = subsystem_filter_write, + .llseek = default_llseek, }; static const struct file_operations ftrace_system_enable_fops = { .open = tracing_open_generic, .read = system_enable_read, .write = system_enable_write, + .llseek = default_llseek, }; static const struct file_operations ftrace_show_header_fops = { .open = tracing_open_generic, .read = show_header, + .llseek = default_llseek, }; static struct dentry *event_trace_events_dir(void) diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index a6b7e0e0f3e..4c5dead0c23 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -195,6 +195,7 @@ static const struct file_operations stack_max_size_fops = { .open = tracing_open_generic, .read = stack_max_size_read, .write = stack_max_size_write, + .llseek = default_llseek, }; static void * diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 01e64270e24..4bfb0471f10 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -590,6 +590,7 @@ out_unlock: static const struct file_operations filter_fops = { .read = filter_read, .write = filter_write, + .llseek = default_llseek, }; static int dma_debug_fs_init(void) diff --git a/net/atm/proc.c b/net/atm/proc.c index 6262aeae398..f85da0779e5 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c @@ -38,6 +38,7 @@ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf, static const struct file_operations proc_atm_dev_ops = { .owner = THIS_MODULE, .read = proc_dev_atm_read, + .llseek = noop_llseek, }; static void add_stats(struct seq_file *seq, const char *aal, diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 078e48d442f..33d0e6297c2 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c @@ -149,6 +149,7 @@ static const struct file_operations dccpprobe_fops = { .owner = THIS_MODULE, .open = dccpprobe_open, .read = dccpprobe_read, + .llseek = noop_llseek, }; static __init int dccpprobe_init(void) diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index f8efada580e..6211e211417 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -214,6 +214,7 @@ static const struct file_operations tcpprobe_fops = { .owner = THIS_MODULE, .open = tcpprobe_open, .read = tcpprobe_read, + .llseek = noop_llseek, }; static __init int tcpprobe_init(void) diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 4a4d35c750c..b8b0ae79a74 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -102,7 +102,8 @@ static ssize_t tsf_write(struct file *file, static const struct file_operations tsf_ops = { .read = tsf_read, .write = tsf_write, - .open = mac80211_open_file_generic + .open = mac80211_open_file_generic, + .llseek = default_llseek, }; static ssize_t reset_write(struct file *file, const char __user *user_buf, @@ -121,6 +122,7 @@ static ssize_t reset_write(struct file *file, const char __user *user_buf, static const struct file_operations reset_ops = { .write = reset_write, .open = mac80211_open_file_generic, + .llseek = noop_llseek, }; static ssize_t noack_read(struct file *file, char __user *user_buf, @@ -156,7 +158,8 @@ static ssize_t noack_write(struct file *file, static const struct file_operations noack_ops = { .read = noack_read, .write = noack_write, - .open = mac80211_open_file_generic + .open = mac80211_open_file_generic, + .llseek = default_llseek, }; static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf, @@ -202,7 +205,8 @@ static ssize_t uapsd_queues_write(struct file *file, static const struct file_operations uapsd_queues_ops = { .read = uapsd_queues_read, .write = uapsd_queues_write, - .open = mac80211_open_file_generic + .open = mac80211_open_file_generic, + .llseek = default_llseek, }; static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf, @@ -248,7 +252,8 @@ static ssize_t uapsd_max_sp_len_write(struct file *file, static const struct file_operations uapsd_max_sp_len_ops = { .read = uapsd_max_sp_len_read, .write = uapsd_max_sp_len_write, - .open = mac80211_open_file_generic + .open = mac80211_open_file_generic, + .llseek = default_llseek, }; static ssize_t channel_type_read(struct file *file, char __user *user_buf, @@ -280,7 +285,8 @@ static ssize_t channel_type_read(struct file *file, char __user *user_buf, static const struct file_operations channel_type_ops = { .read = channel_type_read, - .open = mac80211_open_file_generic + .open = mac80211_open_file_generic, + .llseek = default_llseek, }; static ssize_t queues_read(struct file *file, char __user *user_buf, @@ -303,7 +309,8 @@ static ssize_t queues_read(struct file *file, char __user *user_buf, static const struct file_operations queues_ops = { .read = queues_read, - .open = mac80211_open_file_generic + .open = mac80211_open_file_generic, + .llseek = default_llseek, }; /* statistics stuff */ diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index be04d46110f..334cbd3d2aa 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -145,6 +145,7 @@ static ssize_t rcname_read(struct file *file, char __user *userbuf, static const struct file_operations rcname_ops = { .read = rcname_read, .open = mac80211_open_file_generic, + .llseek = default_llseek, }; #endif diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index 241e76f3fdf..a290ad231d7 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c @@ -122,6 +122,7 @@ static const struct file_operations minstrel_stat_fops = { .open = minstrel_stats_open, .read = minstrel_stats_read, .release = minstrel_stats_release, + .llseek = default_llseek, }; void diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c index 47438b4a9af..7905f79cc2e 100644 --- a/net/mac80211/rc80211_pid_debugfs.c +++ b/net/mac80211/rc80211_pid_debugfs.c @@ -206,6 +206,7 @@ static const struct file_operations rc_pid_fop_events = { .poll = rate_control_pid_events_poll, .open = rate_control_pid_events_open, .release = rate_control_pid_events_release, + .llseek = noop_llseek, }; void rate_control_pid_add_sta_debugfs(void *priv, void *priv_sta, diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 76aec6a4476..d2ff15a2412 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -567,6 +567,7 @@ static const struct file_operations recent_mt_fops = { .write = recent_mt_proc_write, .release = seq_release_private, .owner = THIS_MODULE, + .llseek = seq_lseek, }; static int __net_init recent_proc_net_init(struct net *net) diff --git a/net/nonet.c b/net/nonet.c index 92e76640c7c..b1a73fda9c1 100644 --- a/net/nonet.c +++ b/net/nonet.c @@ -22,4 +22,5 @@ static int sock_no_open(struct inode *irrelevant, struct file *dontcare) const struct file_operations bad_sock_fops = { .owner = THIS_MODULE, .open = sock_no_open, + .llseek = noop_llseek, }; diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 51875a0c5d4..04f599089e6 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -1241,6 +1241,7 @@ static const struct file_operations rfkill_fops = { .unlocked_ioctl = rfkill_fop_ioctl, .compat_ioctl = rfkill_fop_ioctl, #endif + .llseek = no_llseek, }; static struct miscdevice rfkill_miscdev = { diff --git a/net/sctp/probe.c b/net/sctp/probe.c index db3a42b8b34..289b1ba62ca 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c @@ -117,6 +117,7 @@ static const struct file_operations sctpprobe_fops = { .owner = THIS_MODULE, .open = sctpprobe_open, .read = sctpprobe_read, + .llseek = noop_llseek, }; sctp_disposition_t jsctp_sf_eat_sack(const struct sctp_endpoint *ep, diff --git a/net/socket.c b/net/socket.c index 2270b941bcc..9eac5c39413 100644 --- a/net/socket.c +++ b/net/socket.c @@ -502,6 +502,7 @@ static int sock_no_open(struct inode *irrelevant, struct file *dontcare) const struct file_operations bad_sock_fops = { .owner = THIS_MODULE, .open = sock_no_open, + .llseek = noop_llseek, }; /** diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 2b06410e584..6bc692582de 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -1441,6 +1441,7 @@ static const struct file_operations cache_flush_operations_procfs = { .read = read_flush_procfs, .write = write_flush_procfs, .release = release_flush_procfs, + .llseek = no_llseek, }; static void remove_cache_proc_entries(struct cache_detail *cd) @@ -1646,6 +1647,7 @@ const struct file_operations cache_flush_operations_pipefs = { .read = read_flush_pipefs, .write = write_flush_pipefs, .release = release_flush_pipefs, + .llseek = no_llseek, }; int sunrpc_cache_register_pipefs(struct dentry *parent, diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 3f9a57e9650..39765bcfb47 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c @@ -103,6 +103,7 @@ static ssize_t ht40allow_map_read(struct file *file, static const struct file_operations ht40allow_map_ops = { .read = ht40allow_map_read, .open = cfg80211_open_file_generic, + .llseek = default_llseek, }; #define DEBUGFS_ADD(name) \ diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c index 178061e87ff..cfe40addda7 100644 --- a/samples/kfifo/bytestream-example.c +++ b/samples/kfifo/bytestream-example.c @@ -148,6 +148,7 @@ static const struct file_operations fifo_fops = { .owner = THIS_MODULE, .read = fifo_read, .write = fifo_write, + .llseek = noop_llseek, }; static int __init example_init(void) diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c index 71b2aabca96..6f8e79e76c9 100644 --- a/samples/kfifo/inttype-example.c +++ b/samples/kfifo/inttype-example.c @@ -141,6 +141,7 @@ static const struct file_operations fifo_fops = { .owner = THIS_MODULE, .read = fifo_read, .write = fifo_write, + .llseek = noop_llseek, }; static int __init example_init(void) diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c index e68bd16a5da..2d7529eeb29 100644 --- a/samples/kfifo/record-example.c +++ b/samples/kfifo/record-example.c @@ -155,6 +155,7 @@ static const struct file_operations fifo_fops = { .owner = THIS_MODULE, .read = fifo_read, .write = fifo_write, + .llseek = noop_llseek, }; static int __init example_init(void) diff --git a/samples/tracepoints/tracepoint-sample.c b/samples/tracepoints/tracepoint-sample.c index 26fab33ffa8..f4d89e008c3 100644 --- a/samples/tracepoints/tracepoint-sample.c +++ b/samples/tracepoints/tracepoint-sample.c @@ -30,6 +30,7 @@ static int my_open(struct inode *inode, struct file *file) static const struct file_operations mark_ops = { .open = my_open, + .llseek = noop_llseek, }; static int __init sample_init(void) diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index 7320331b44a..a27086d16f0 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -86,7 +86,8 @@ static ssize_t profile_load(struct file *f, const char __user *buf, size_t size, } static const struct file_operations aa_fs_profile_load = { - .write = profile_load + .write = profile_load, + .llseek = default_llseek, }; /* .replace file hook fn to load and/or replace policy */ @@ -107,7 +108,8 @@ static ssize_t profile_replace(struct file *f, const char __user *buf, } static const struct file_operations aa_fs_profile_replace = { - .write = profile_replace + .write = profile_replace, + .llseek = default_llseek, }; /* .remove file hook fn to remove loaded policy */ @@ -134,7 +136,8 @@ static ssize_t profile_remove(struct file *f, const char __user *buf, } static const struct file_operations aa_fs_profile_remove = { - .write = profile_remove + .write = profile_remove, + .llseek = default_llseek, }; /** Base file system setup **/ diff --git a/security/inode.c b/security/inode.c index 8c777f022ad..88839866cbc 100644 --- a/security/inode.c +++ b/security/inode.c @@ -53,6 +53,7 @@ static const struct file_operations default_file_ops = { .read = default_read_file, .write = default_write_file, .open = default_open, + .llseek = noop_llseek, }; static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev) diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index a2b72d77f92..7512502d016 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -968,6 +968,7 @@ static ssize_t smk_write_doi(struct file *file, const char __user *buf, static const struct file_operations smk_doi_ops = { .read = smk_read_doi, .write = smk_write_doi, + .llseek = default_llseek, }; /** @@ -1031,6 +1032,7 @@ static ssize_t smk_write_direct(struct file *file, const char __user *buf, static const struct file_operations smk_direct_ops = { .read = smk_read_direct, .write = smk_write_direct, + .llseek = default_llseek, }; /** @@ -1112,6 +1114,7 @@ static ssize_t smk_write_ambient(struct file *file, const char __user *buf, static const struct file_operations smk_ambient_ops = { .read = smk_read_ambient, .write = smk_write_ambient, + .llseek = default_llseek, }; /** @@ -1191,6 +1194,7 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf, static const struct file_operations smk_onlycap_ops = { .read = smk_read_onlycap, .write = smk_write_onlycap, + .llseek = default_llseek, }; /** @@ -1255,6 +1259,7 @@ static ssize_t smk_write_logging(struct file *file, const char __user *buf, static const struct file_operations smk_logging_ops = { .read = smk_read_logging, .write = smk_write_logging, + .llseek = default_llseek, }; /** * smk_fill_super - fill the /smackfs superblock diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c index f25e3cc7ddf..a1f1a2f00cc 100644 --- a/sound/core/seq/oss/seq_oss.c +++ b/sound/core/seq/oss/seq_oss.c @@ -220,6 +220,7 @@ static const struct file_operations seq_oss_f_ops = .poll = odev_poll, .unlocked_ioctl = odev_ioctl, .compat_ioctl = odev_ioctl_compat, + .llseek = noop_llseek, }; static int __init diff --git a/sound/core/sound.c b/sound/core/sound.c index ac42af42b78..62a093efb45 100644 --- a/sound/core/sound.c +++ b/sound/core/sound.c @@ -184,7 +184,8 @@ static int snd_open(struct inode *inode, struct file *file) static const struct file_operations snd_fops = { .owner = THIS_MODULE, - .open = snd_open + .open = snd_open, + .llseek = noop_llseek, }; #ifdef CONFIG_SND_DYNAMIC_MINORS diff --git a/sound/oss/msnd_pinnacle.c b/sound/oss/msnd_pinnacle.c index 2e48b17667d..ca942f7cd23 100644 --- a/sound/oss/msnd_pinnacle.c +++ b/sound/oss/msnd_pinnacle.c @@ -1117,6 +1117,7 @@ static const struct file_operations dev_fileops = { .unlocked_ioctl = dev_ioctl, .open = dev_open, .release = dev_release, + .llseek = noop_llseek, }; static int reset_dsp(void) diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index acc91daa1c5..4057d35343b 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -223,6 +223,7 @@ static const struct file_operations codec_reg_fops = { .open = codec_reg_open_file, .read = codec_reg_read_file, .write = codec_reg_write_file, + .llseek = default_llseek, }; static void soc_init_codec_debugfs(struct snd_soc_codec *codec) diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 03cb7c05ebe..72a53d0a41e 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -1089,6 +1089,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file, static const struct file_operations dapm_widget_power_fops = { .open = dapm_widget_power_open_file, .read = dapm_widget_power_read_file, + .llseek = default_llseek, }; void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec) diff --git a/sound/sound_core.c b/sound/sound_core.c index cb61317df50..c03bbaefdbc 100644 --- a/sound/sound_core.c +++ b/sound/sound_core.c @@ -165,6 +165,7 @@ static const struct file_operations soundcore_fops = /* We must have an owner or the module locking fails */ .owner = THIS_MODULE, .open = soundcore_open, + .llseek = noop_llseek, }; /* diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d4853a54771..e039f641d66 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1305,6 +1305,7 @@ static struct file_operations kvm_vcpu_fops = { .unlocked_ioctl = kvm_vcpu_ioctl, .compat_ioctl = kvm_vcpu_ioctl, .mmap = kvm_vcpu_mmap, + .llseek = noop_llseek, }; /* @@ -1774,6 +1775,7 @@ static struct file_operations kvm_vm_fops = { .compat_ioctl = kvm_vm_compat_ioctl, #endif .mmap = kvm_vm_mmap, + .llseek = noop_llseek, }; static int kvm_dev_ioctl_create_vm(void) @@ -1867,6 +1869,7 @@ out: static struct file_operations kvm_chardev_ops = { .unlocked_ioctl = kvm_dev_ioctl, .compat_ioctl = kvm_dev_ioctl, + .llseek = noop_llseek, }; static struct miscdevice kvm_dev = { -- cgit v1.2.3-70-g09d2 From f996fc9671d088bd5f52a70f18c64bfe3d0e418f Mon Sep 17 00:00:00 2001 From: Bojan Smojver Date: Thu, 9 Sep 2010 23:06:23 +0200 Subject: PM / Hibernate: Compress hibernation image with LZO Compress hibernation image with LZO in order to save on I/O and therefore time to hibernate/thaw. [rjw: Added hibernate=nocompress command line option instead of just nocompress which would be confusing, fixed a couple of compiler warnings, fixed kerneldoc comments, minor cleanups.] Signed-off-by: Bojan Smojver Signed-off-by: Rafael J. Wysocki --- Documentation/kernel-parameters.txt | 5 + Documentation/power/swsusp.txt | 3 +- kernel/power/Kconfig | 2 + kernel/power/hibernate.c | 13 ++ kernel/power/power.h | 1 + kernel/power/swap.c | 290 +++++++++++++++++++++++++++++++++++- 6 files changed, 306 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 8dd7248508a..2c98b18864c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2165,6 +2165,11 @@ and is between 256 and 4096 characters. It is defined in the file in units (needed only for swap files). See Documentation/power/swsusp-and-swap-files.txt + hibernate= [HIBERNATION] + noresume Don't check if there's a hibernation image + present during boot. + nocompress Don't compress/decompress hibernation images. + retain_initrd [RAM] Keep initrd memory after extraction rhash_entries= [KNL,NET] diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt index 9d60ab717a7..ea718891a66 100644 --- a/Documentation/power/swsusp.txt +++ b/Documentation/power/swsusp.txt @@ -66,7 +66,8 @@ swsusp saves the state of the machine into active swaps and then reboots or powerdowns. You must explicitly specify the swap partition to resume from with ``resume='' kernel option. If signature is found it loads and restores saved state. If the option ``noresume'' is specified as a boot parameter, it skips -the resuming. +the resuming. If the option ``hibernate=nocompress'' is specified as a boot +parameter, it saves hibernation image without compression. In the meantime while the system is suspended you should not add/remove any of the hardware, write to the filesystems, etc. diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index ca6066a6952..cb57eb99215 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -137,6 +137,8 @@ config SUSPEND_FREEZER config HIBERNATION bool "Hibernation (aka 'suspend to disk')" depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE + select LZO_COMPRESS + select LZO_DECOMPRESS select SUSPEND_NVS if HAS_IOMEM ---help--- Enable the suspend to disk (STD) functionality, which is usually diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 8dc31e02ae1..6c9c9dc48c7 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -29,6 +29,7 @@ #include "power.h" +static int nocompress = 0; static int noresume = 0; static char resume_file[256] = CONFIG_PM_STD_PARTITION; dev_t swsusp_resume_device; @@ -638,6 +639,8 @@ int hibernate(void) if (hibernation_mode == HIBERNATION_PLATFORM) flags |= SF_PLATFORM_MODE; + if (nocompress) + flags |= SF_NOCOMPRESS_MODE; pr_debug("PM: writing image.\n"); error = swsusp_write(flags); swsusp_free(); @@ -1004,6 +1007,15 @@ static int __init resume_offset_setup(char *str) return 1; } +static int __init hibernate_setup(char *str) +{ + if (!strncmp(str, "noresume", 8)) + noresume = 1; + else if (!strncmp(str, "nocompress", 10)) + nocompress = 1; + return 1; +} + static int __init noresume_setup(char *str) { noresume = 1; @@ -1013,3 +1025,4 @@ static int __init noresume_setup(char *str) __setup("noresume", noresume_setup); __setup("resume_offset=", resume_offset_setup); __setup("resume=", resume_setup); +__setup("hibernate=", hibernate_setup); diff --git a/kernel/power/power.h b/kernel/power/power.h index 006270fe382..c7e42e47eb0 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -134,6 +134,7 @@ extern int swsusp_swap_in_use(void); * the image header. */ #define SF_PLATFORM_MODE 1 +#define SF_NOCOMPRESS_MODE 2 /* kernel/power/hibernate.c */ extern int swsusp_check(void); diff --git a/kernel/power/swap.c b/kernel/power/swap.c index e6a5bdf61a3..3dc0552cddf 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include "power.h" @@ -357,6 +359,18 @@ static int swap_writer_finish(struct swap_map_handle *handle, return error; } +/* We need to remember how much compressed data we need to read. */ +#define LZO_HEADER sizeof(size_t) + +/* Number of pages/bytes we'll compress at one time. */ +#define LZO_UNC_PAGES 32 +#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE) + +/* Number of pages/bytes we need for compressed data (worst case). */ +#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \ + LZO_HEADER, PAGE_SIZE) +#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) + /** * save_image - save the suspend image data */ @@ -404,6 +418,137 @@ static int save_image(struct swap_map_handle *handle, return ret; } + +/** + * save_image_lzo - Save the suspend image data compressed with LZO. + * @handle: Swap mam handle to use for saving the image. + * @snapshot: Image to read data from. + * @nr_to_write: Number of pages to save. + */ +static int save_image_lzo(struct swap_map_handle *handle, + struct snapshot_handle *snapshot, + unsigned int nr_to_write) +{ + unsigned int m; + int ret = 0; + int nr_pages; + int err2; + struct bio *bio; + struct timeval start; + struct timeval stop; + size_t off, unc_len, cmp_len; + unsigned char *unc, *cmp, *wrk, *page; + + page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); + if (!page) { + printk(KERN_ERR "PM: Failed to allocate LZO page\n"); + return -ENOMEM; + } + + wrk = vmalloc(LZO1X_1_MEM_COMPRESS); + if (!wrk) { + printk(KERN_ERR "PM: Failed to allocate LZO workspace\n"); + free_page((unsigned long)page); + return -ENOMEM; + } + + unc = vmalloc(LZO_UNC_SIZE); + if (!unc) { + printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); + vfree(wrk); + free_page((unsigned long)page); + return -ENOMEM; + } + + cmp = vmalloc(LZO_CMP_SIZE); + if (!cmp) { + printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); + vfree(unc); + vfree(wrk); + free_page((unsigned long)page); + return -ENOMEM; + } + + printk(KERN_INFO + "PM: Compressing and saving image data (%u pages) ... ", + nr_to_write); + m = nr_to_write / 100; + if (!m) + m = 1; + nr_pages = 0; + bio = NULL; + do_gettimeofday(&start); + for (;;) { + for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { + ret = snapshot_read_next(snapshot); + if (ret < 0) + goto out_finish; + + if (!ret) + break; + + memcpy(unc + off, data_of(*snapshot), PAGE_SIZE); + + if (!(nr_pages % m)) + printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); + nr_pages++; + } + + if (!off) + break; + + unc_len = off; + ret = lzo1x_1_compress(unc, unc_len, + cmp + LZO_HEADER, &cmp_len, wrk); + if (ret < 0) { + printk(KERN_ERR "PM: LZO compression failed\n"); + break; + } + + if (unlikely(!cmp_len || + cmp_len > lzo1x_worst_compress(unc_len))) { + printk(KERN_ERR "PM: Invalid LZO compressed length\n"); + ret = -1; + break; + } + + *(size_t *)cmp = cmp_len; + + /* + * Given we are writing one page at a time to disk, we copy + * that much from the buffer, although the last bit will likely + * be smaller than full page. This is OK - we saved the length + * of the compressed data, so any garbage at the end will be + * discarded when we read it. + */ + for (off = 0; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) { + memcpy(page, cmp + off, PAGE_SIZE); + + ret = swap_write_page(handle, page, &bio); + if (ret) + goto out_finish; + } + } + +out_finish: + err2 = hib_wait_on_bio_chain(&bio); + do_gettimeofday(&stop); + if (!ret) + ret = err2; + if (!ret) + printk(KERN_CONT "\b\b\b\bdone\n"); + else + printk(KERN_CONT "\n"); + swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); + + vfree(cmp); + vfree(unc); + vfree(wrk); + free_page((unsigned long)page); + + return ret; +} + /** * enough_swap - Make sure we have enough swap to save the image. * @@ -411,12 +556,16 @@ static int save_image(struct swap_map_handle *handle, * space avaiable from the resume partition. */ -static int enough_swap(unsigned int nr_pages) +static int enough_swap(unsigned int nr_pages, unsigned int flags) { unsigned int free_swap = count_swap_pages(root_swap, 1); + unsigned int required; pr_debug("PM: Free swap pages: %u\n", free_swap); - return free_swap > nr_pages + PAGES_FOR_IO; + + required = PAGES_FOR_IO + ((flags & SF_NOCOMPRESS_MODE) ? + nr_pages : (nr_pages * LZO_CMP_PAGES) / LZO_UNC_PAGES + 1); + return free_swap > required; } /** @@ -443,7 +592,7 @@ int swsusp_write(unsigned int flags) printk(KERN_ERR "PM: Cannot get swap writer\n"); return error; } - if (!enough_swap(pages)) { + if (!enough_swap(pages, flags)) { printk(KERN_ERR "PM: Not enough free swap\n"); error = -ENOSPC; goto out_finish; @@ -458,8 +607,11 @@ int swsusp_write(unsigned int flags) } header = (struct swsusp_info *)data_of(snapshot); error = swap_write_page(&handle, header, NULL); - if (!error) - error = save_image(&handle, &snapshot, pages - 1); + if (!error) { + error = (flags & SF_NOCOMPRESS_MODE) ? + save_image(&handle, &snapshot, pages - 1) : + save_image_lzo(&handle, &snapshot, pages - 1); + } out_finish: error = swap_writer_finish(&handle, flags, error); return error; @@ -589,6 +741,127 @@ static int load_image(struct swap_map_handle *handle, return error; } +/** + * load_image_lzo - Load compressed image data and decompress them with LZO. + * @handle: Swap map handle to use for loading data. + * @snapshot: Image to copy uncompressed data into. + * @nr_to_read: Number of pages to load. + */ +static int load_image_lzo(struct swap_map_handle *handle, + struct snapshot_handle *snapshot, + unsigned int nr_to_read) +{ + unsigned int m; + int error = 0; + struct timeval start; + struct timeval stop; + unsigned nr_pages; + size_t off, unc_len, cmp_len; + unsigned char *unc, *cmp, *page; + + page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); + if (!page) { + printk(KERN_ERR "PM: Failed to allocate LZO page\n"); + return -ENOMEM; + } + + unc = vmalloc(LZO_UNC_SIZE); + if (!unc) { + printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); + free_page((unsigned long)page); + return -ENOMEM; + } + + cmp = vmalloc(LZO_CMP_SIZE); + if (!cmp) { + printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); + vfree(unc); + free_page((unsigned long)page); + return -ENOMEM; + } + + printk(KERN_INFO + "PM: Loading and decompressing image data (%u pages) ... ", + nr_to_read); + m = nr_to_read / 100; + if (!m) + m = 1; + nr_pages = 0; + do_gettimeofday(&start); + + error = snapshot_write_next(snapshot); + if (error <= 0) + goto out_finish; + + for (;;) { + error = swap_read_page(handle, page, NULL); /* sync */ + if (error) + break; + + cmp_len = *(size_t *)page; + if (unlikely(!cmp_len || + cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) { + printk(KERN_ERR "PM: Invalid LZO compressed length\n"); + error = -1; + break; + } + + memcpy(cmp, page, PAGE_SIZE); + for (off = PAGE_SIZE; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) { + error = swap_read_page(handle, page, NULL); /* sync */ + if (error) + goto out_finish; + + memcpy(cmp + off, page, PAGE_SIZE); + } + + unc_len = LZO_UNC_SIZE; + error = lzo1x_decompress_safe(cmp + LZO_HEADER, cmp_len, + unc, &unc_len); + if (error < 0) { + printk(KERN_ERR "PM: LZO decompression failed\n"); + break; + } + + if (unlikely(!unc_len || + unc_len > LZO_UNC_SIZE || + unc_len & (PAGE_SIZE - 1))) { + printk(KERN_ERR "PM: Invalid LZO uncompressed length\n"); + error = -1; + break; + } + + for (off = 0; off < unc_len; off += PAGE_SIZE) { + memcpy(data_of(*snapshot), unc + off, PAGE_SIZE); + + if (!(nr_pages % m)) + printk("\b\b\b\b%3d%%", nr_pages / m); + nr_pages++; + + error = snapshot_write_next(snapshot); + if (error <= 0) + goto out_finish; + } + } + +out_finish: + do_gettimeofday(&stop); + if (!error) { + printk("\b\b\b\bdone\n"); + snapshot_write_finalize(snapshot); + if (!snapshot_image_loaded(snapshot)) + error = -ENODATA; + } else + printk("\n"); + swsusp_show_speed(&start, &stop, nr_to_read, "Read"); + + vfree(cmp); + vfree(unc); + free_page((unsigned long)page); + + return error; +} + /** * swsusp_read - read the hibernation image. * @flags_p: flags passed by the "frozen" kernel in the image header should @@ -612,8 +885,11 @@ int swsusp_read(unsigned int *flags_p) goto end; if (!error) error = swap_read_page(&handle, header, NULL); - if (!error) - error = load_image(&handle, &snapshot, header->pages - 1); + if (!error) { + error = (*flags_p & SF_NOCOMPRESS_MODE) ? + load_image(&handle, &snapshot, header->pages - 1) : + load_image_lzo(&handle, &snapshot, header->pages - 1); + } swap_reader_finish(&handle); end: if (!error) -- cgit v1.2.3-70-g09d2 From ede890c2c069d611ece0e184103a6b9236ce416a Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 12 Sep 2010 21:40:01 +0200 Subject: PM: Fix unmet dependency warning from kconfig Fix the following build warning: warning: (PM_SLEEP_SMP && SMP && (ARCH_SUSPEND_POSSIBLE || \ ARCH_HIBERNATION_POSSIBLE) && PM_SLEEP) selects HOTPLUG_CPU which \ has unmet direct dependencies (SMP && HOTPLUG) by selecting HOTPLUG along with CPU_HOTPLUG. Signed-off-by: Rafael J. Wysocki Acked-by: Randy Dunlap --- kernel/power/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index cb57eb99215..e45894c696e 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -86,6 +86,7 @@ config PM_SLEEP_SMP depends on SMP depends on ARCH_SUSPEND_POSSIBLE || ARCH_HIBERNATION_POSSIBLE depends on PM_SLEEP + select HOTPLUG select HOTPLUG_CPU default y -- cgit v1.2.3-70-g09d2 From bcb5ba8b4e8a5ae14b27351bdf499dd4c3bcc944 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 20 Sep 2010 19:44:17 +0200 Subject: PM / Runtime: Use alloc_workqueue() for creating the PM workqueue Although we need the PM workqueue to be freezable, we don't need it to be singlethread. Also, the number of concurrent work items running on a single CPU need not be constrained. For these reasons use alloc_workqueue() directly, with suitable arguments, instead of create_freezeable_workqueue(), to create the runtime PM workqueue. Signed-off-by: Rafael J. Wysocki Acked-by: Tejun Heo --- kernel/power/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/power/main.c b/kernel/power/main.c index 62b0bc6e498..0a28d4db359 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -308,7 +308,7 @@ EXPORT_SYMBOL_GPL(pm_wq); static int __init pm_start_workqueue(void) { - pm_wq = create_freezeable_workqueue("pm"); + pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); return pm_wq ? 0 : -ENOMEM; } -- cgit v1.2.3-70-g09d2 From 266f1a25eff5ff98c498d7754a419aacfd88f71c Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 20 Sep 2010 19:44:38 +0200 Subject: PM / Hibernate: Improve comments in hibernate_preallocate_memory() One comment in hibernate_preallocate_memory() is wrong, so fix it and add one more comment to clarify the meaning of the fixed one. Signed-off-by: Rafael J. Wysocki --- kernel/power/snapshot.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index d3f795f01bb..d9191b40cf6 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1318,12 +1318,14 @@ int hibernate_preallocate_memory(void) /* Compute the maximum number of saveable pages to leave in memory. */ max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES; + /* Compute the desired number of image pages specified by image_size. */ size = DIV_ROUND_UP(image_size, PAGE_SIZE); if (size > max_size) size = max_size; /* - * If the maximum is not less than the current number of saveable pages - * in memory, allocate page frames for the image and we're done. + * If the desired number of image pages is at least as large as the + * current number of saveable pages in memory, allocate page frames for + * the image and we're done. */ if (size >= saveable) { pages = preallocate_image_highmem(save_highmem); -- cgit v1.2.3-70-g09d2 From ac5c24ec1e983313ef0015258fba6f630e54e7cf Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 20 Sep 2010 19:44:56 +0200 Subject: PM / Hibernate: Make default image size depend on total RAM size The default hibernation image size is currently hard coded and euqal to 500 MB, which is not a reasonable default on many contemporary systems. Make it equal 2/5 of the total RAM size (this is slightly below the maximum, i.e. 1/2 of the total RAM size, and seems to be generally suitable). Signed-off-by: Rafael J. Wysocki Tested-by: M. Vefa Bicakci --- Documentation/power/interface.txt | 2 +- kernel/power/main.c | 1 + kernel/power/power.h | 9 ++++++++- kernel/power/snapshot.c | 7 ++++++- 4 files changed, 16 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt index e67211fe0ee..c537834af00 100644 --- a/Documentation/power/interface.txt +++ b/Documentation/power/interface.txt @@ -57,7 +57,7 @@ smallest image possible. In particular, if "0" is written to this file, the suspend image will be as small as possible. Reading from this file will display the current image size limit, which -is set to 500 MB by default. +is set to 2/5 of available RAM by default. /sys/power/pm_trace controls the code which saves the last PM event point in the RTC across reboots, so that you can debug a machine that just hangs diff --git a/kernel/power/main.c b/kernel/power/main.c index 0a28d4db359..f06ad6eff37 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -321,6 +321,7 @@ static int __init pm_init(void) int error = pm_start_workqueue(); if (error) return error; + hibernate_image_size_init(); power_kobj = kobject_create_and_add("power", NULL); if (!power_kobj) return -ENOMEM; diff --git a/kernel/power/power.h b/kernel/power/power.h index c7e42e47eb0..03634be55f6 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -14,6 +14,9 @@ struct swsusp_info { } __attribute__((aligned(PAGE_SIZE))); #ifdef CONFIG_HIBERNATION +/* kernel/power/snapshot.c */ +extern void __init hibernate_image_size_init(void); + #ifdef CONFIG_ARCH_HIBERNATION_HEADER /* Maximum size of architecture specific data in a hibernation header */ #define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4) @@ -49,7 +52,11 @@ static inline char *check_image_kernel(struct swsusp_info *info) extern int hibernation_snapshot(int platform_mode); extern int hibernation_restore(int platform_mode); extern int hibernation_platform_enter(void); -#endif + +#else /* !CONFIG_HIBERNATION */ + +static inline void hibernate_image_size_init(void) {} +#endif /* !CONFIG_HIBERNATION */ extern int pfn_is_nosave(unsigned long); diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index d9191b40cf6..ac7eb109f19 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -46,7 +46,12 @@ static void swsusp_unset_page_forbidden(struct page *); * size will not exceed N bytes, but if that is impossible, it will * try to create the smallest image possible. */ -unsigned long image_size = 500 * 1024 * 1024; +unsigned long image_size; + +void __init hibernate_image_size_init(void) +{ + image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE; +} /* List of PBEs needed for restoring the pages that were allocated before * the suspend and included in the suspend image, but have also been -- cgit v1.2.3-70-g09d2 From 074037ec79bea73edf1b1ec72fef1010e83e3cc5 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 22 Sep 2010 22:09:10 +0200 Subject: PM / Wakeup: Introduce wakeup source objects and event statistics (v3) Introduce struct wakeup_source for representing system wakeup sources within the kernel and for collecting statistics related to them. Make the recently introduced helper functions pm_wakeup_event(), pm_stay_awake() and pm_relax() use struct wakeup_source objects internally, so that wakeup statistics associated with wakeup devices can be collected and reported in a consistent way (the definition of pm_relax() is changed, which is harmless, because this function is not called directly by anyone yet). Introduce new wakeup-related sysfs device attributes in /sys/devices/.../power for reporting the device wakeup statistics. Change the global wakeup events counters event_count and events_in_progress into atomic variables, so that it is not necessary to acquire a global spinlock in pm_wakeup_event(), pm_stay_awake() and pm_relax(), which should allow us to avoid lock contention in these functions on SMP systems with many wakeup devices. Signed-off-by: Rafael J. Wysocki Acked-by: Greg Kroah-Hartman --- Documentation/ABI/testing/sysfs-devices-power | 70 ++++ drivers/base/power/main.c | 4 +- drivers/base/power/power.h | 1 + drivers/base/power/runtime.c | 2 - drivers/base/power/sysfs.c | 121 +++++- drivers/base/power/wakeup.c | 528 ++++++++++++++++++++++---- include/linux/pm.h | 16 +- include/linux/pm_wakeup.h | 127 +++++-- include/linux/suspend.h | 4 +- kernel/power/main.c | 8 +- 10 files changed, 756 insertions(+), 125 deletions(-) (limited to 'kernel') diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power index 6123c523bfd..6bb2dd3c3a7 100644 --- a/Documentation/ABI/testing/sysfs-devices-power +++ b/Documentation/ABI/testing/sysfs-devices-power @@ -77,3 +77,73 @@ Description: devices this attribute is set to "enabled" by bus type code or device drivers and in that cases it should be safe to leave the default value. + +What: /sys/devices/.../power/wakeup_count +Date: September 2010 +Contact: Rafael J. Wysocki +Description: + The /sys/devices/.../wakeup_count attribute contains the number + of signaled wakeup events associated with the device. This + attribute is read-only. If the device is not enabled to wake up + the system from sleep states, this attribute is empty. + +What: /sys/devices/.../power/wakeup_active_count +Date: September 2010 +Contact: Rafael J. Wysocki +Description: + The /sys/devices/.../wakeup_active_count attribute contains the + number of times the processing of wakeup events associated with + the device was completed (at the kernel level). This attribute + is read-only. If the device is not enabled to wake up the + system from sleep states, this attribute is empty. + +What: /sys/devices/.../power/wakeup_hit_count +Date: September 2010 +Contact: Rafael J. Wysocki +Description: + The /sys/devices/.../wakeup_hit_count attribute contains the + number of times the processing of a wakeup event associated with + the device might prevent the system from entering a sleep state. + This attribute is read-only. If the device is not enabled to + wake up the system from sleep states, this attribute is empty. + +What: /sys/devices/.../power/wakeup_active +Date: September 2010 +Contact: Rafael J. Wysocki +Description: + The /sys/devices/.../wakeup_active attribute contains either 1, + or 0, depending on whether or not a wakeup event associated with + the device is being processed (1). This attribute is read-only. + If the device is not enabled to wake up the system from sleep + states, this attribute is empty. + +What: /sys/devices/.../power/wakeup_total_time_ms +Date: September 2010 +Contact: Rafael J. Wysocki +Description: + The /sys/devices/.../wakeup_total_time_ms attribute contains + the total time of processing wakeup events associated with the + device, in milliseconds. This attribute is read-only. If the + device is not enabled to wake up the system from sleep states, + this attribute is empty. + +What: /sys/devices/.../power/wakeup_max_time_ms +Date: September 2010 +Contact: Rafael J. Wysocki +Description: + The /sys/devices/.../wakeup_max_time_ms attribute contains + the maximum time of processing a single wakeup event associated + with the device, in milliseconds. This attribute is read-only. + If the device is not enabled to wake up the system from sleep + states, this attribute is empty. + +What: /sys/devices/.../power/wakeup_last_time_ms +Date: September 2010 +Contact: Rafael J. Wysocki +Description: + The /sys/devices/.../wakeup_last_time_ms attribute contains + the value of the monotonic clock corresponding to the time of + signaling the last wakeup event associated with the device, in + milliseconds. This attribute is read-only. If the device is + not enabled to wake up the system from sleep states, this + attribute is empty. diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index db677199403..7ae6fe414c3 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -60,7 +60,8 @@ void device_pm_init(struct device *dev) dev->power.status = DPM_ON; init_completion(&dev->power.completion); complete_all(&dev->power.completion); - dev->power.wakeup_count = 0; + dev->power.wakeup = NULL; + spin_lock_init(&dev->power.lock); pm_runtime_init(dev); } @@ -120,6 +121,7 @@ void device_pm_remove(struct device *dev) mutex_lock(&dpm_list_mtx); list_del_init(&dev->power.entry); mutex_unlock(&dpm_list_mtx); + device_wakeup_disable(dev); pm_runtime_remove(dev); } diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index c0bd03c83b9..8b2745c69e2 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -34,6 +34,7 @@ extern void device_pm_move_last(struct device *); static inline void device_pm_init(struct device *dev) { + spin_lock_init(&dev->power.lock); pm_runtime_init(dev); } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b78c401ffa7..e9520ad2d71 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1099,8 +1099,6 @@ EXPORT_SYMBOL_GPL(pm_runtime_allow); */ void pm_runtime_init(struct device *dev) { - spin_lock_init(&dev->power.lock); - dev->power.runtime_status = RPM_SUSPENDED; dev->power.idle_notification = false; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index e56b4388fe6..8859780817e 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -210,11 +210,122 @@ static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); static ssize_t wakeup_count_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%lu\n", dev->power.wakeup_count); + unsigned long count = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->event_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); } static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); -#endif + +static ssize_t wakeup_active_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long count = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->active_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); + +static ssize_t wakeup_hit_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long count = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->hit_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL); + +static ssize_t wakeup_active_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned int active = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + active = dev->power.wakeup->active; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL); + +static ssize_t wakeup_total_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + s64 msec = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->total_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL); + +static ssize_t wakeup_max_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + s64 msec = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->max_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL); + +static ssize_t wakeup_last_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + s64 msec = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->last_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); +#endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_ADVANCED_DEBUG #ifdef CONFIG_PM_RUNTIME @@ -288,6 +399,12 @@ static struct attribute * power_attrs[] = { &dev_attr_wakeup.attr, #ifdef CONFIG_PM_SLEEP &dev_attr_wakeup_count.attr, + &dev_attr_wakeup_active_count.attr, + &dev_attr_wakeup_hit_count.attr, + &dev_attr_wakeup_active.attr, + &dev_attr_wakeup_total_time_ms.attr, + &dev_attr_wakeup_max_time_ms.attr, + &dev_attr_wakeup_last_time_ms.attr, #endif #ifdef CONFIG_PM_ADVANCED_DEBUG &dev_attr_async.attr, diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index eb594facfc3..03751a01dba 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -11,7 +11,10 @@ #include #include #include -#include + +#include "power.h" + +#define TIMEOUT 100 /* * If set, the suspend/hibernate code will abort transitions to a sleep state @@ -20,18 +23,244 @@ bool events_check_enabled; /* The counter of registered wakeup events. */ -static unsigned long event_count; +static atomic_t event_count = ATOMIC_INIT(0); /* A preserved old value of event_count. */ -static unsigned long saved_event_count; +static unsigned int saved_count; /* The counter of wakeup events being processed. */ -static unsigned long events_in_progress; +static atomic_t events_in_progress = ATOMIC_INIT(0); static DEFINE_SPINLOCK(events_lock); static void pm_wakeup_timer_fn(unsigned long data); -static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0); -static unsigned long events_timer_expires; +static LIST_HEAD(wakeup_sources); + +/** + * wakeup_source_create - Create a struct wakeup_source object. + * @name: Name of the new wakeup source. + */ +struct wakeup_source *wakeup_source_create(const char *name) +{ + struct wakeup_source *ws; + + ws = kzalloc(sizeof(*ws), GFP_KERNEL); + if (!ws) + return NULL; + + spin_lock_init(&ws->lock); + if (name) + ws->name = kstrdup(name, GFP_KERNEL); + + return ws; +} +EXPORT_SYMBOL_GPL(wakeup_source_create); + +/** + * wakeup_source_destroy - Destroy a struct wakeup_source object. + * @ws: Wakeup source to destroy. + */ +void wakeup_source_destroy(struct wakeup_source *ws) +{ + if (!ws) + return; + + spin_lock_irq(&ws->lock); + while (ws->active) { + spin_unlock_irq(&ws->lock); + + schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); + + spin_lock_irq(&ws->lock); + } + spin_unlock_irq(&ws->lock); + + kfree(ws->name); + kfree(ws); +} +EXPORT_SYMBOL_GPL(wakeup_source_destroy); + +/** + * wakeup_source_add - Add given object to the list of wakeup sources. + * @ws: Wakeup source object to add to the list. + */ +void wakeup_source_add(struct wakeup_source *ws) +{ + if (WARN_ON(!ws)) + return; + + setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); + ws->active = false; + + spin_lock_irq(&events_lock); + list_add_rcu(&ws->entry, &wakeup_sources); + spin_unlock_irq(&events_lock); + synchronize_rcu(); +} +EXPORT_SYMBOL_GPL(wakeup_source_add); + +/** + * wakeup_source_remove - Remove given object from the wakeup sources list. + * @ws: Wakeup source object to remove from the list. + */ +void wakeup_source_remove(struct wakeup_source *ws) +{ + if (WARN_ON(!ws)) + return; + + spin_lock_irq(&events_lock); + list_del_rcu(&ws->entry); + spin_unlock_irq(&events_lock); + synchronize_rcu(); +} +EXPORT_SYMBOL_GPL(wakeup_source_remove); + +/** + * wakeup_source_register - Create wakeup source and add it to the list. + * @name: Name of the wakeup source to register. + */ +struct wakeup_source *wakeup_source_register(const char *name) +{ + struct wakeup_source *ws; + + ws = wakeup_source_create(name); + if (ws) + wakeup_source_add(ws); + + return ws; +} +EXPORT_SYMBOL_GPL(wakeup_source_register); + +/** + * wakeup_source_unregister - Remove wakeup source from the list and remove it. + * @ws: Wakeup source object to unregister. + */ +void wakeup_source_unregister(struct wakeup_source *ws) +{ + wakeup_source_remove(ws); + wakeup_source_destroy(ws); +} +EXPORT_SYMBOL_GPL(wakeup_source_unregister); + +/** + * device_wakeup_attach - Attach a wakeup source object to a device object. + * @dev: Device to handle. + * @ws: Wakeup source object to attach to @dev. + * + * This causes @dev to be treated as a wakeup device. + */ +static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) +{ + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + spin_unlock_irq(&dev->power.lock); + return -EEXIST; + } + dev->power.wakeup = ws; + spin_unlock_irq(&dev->power.lock); + return 0; +} + +/** + * device_wakeup_enable - Enable given device to be a wakeup source. + * @dev: Device to handle. + * + * Create a wakeup source object, register it and attach it to @dev. + */ +int device_wakeup_enable(struct device *dev) +{ + struct wakeup_source *ws; + int ret; + + if (!dev || !dev->power.can_wakeup) + return -EINVAL; + + ws = wakeup_source_register(dev_name(dev)); + if (!ws) + return -ENOMEM; + + ret = device_wakeup_attach(dev, ws); + if (ret) + wakeup_source_unregister(ws); + + return ret; +} +EXPORT_SYMBOL_GPL(device_wakeup_enable); + +/** + * device_wakeup_detach - Detach a device's wakeup source object from it. + * @dev: Device to detach the wakeup source object from. + * + * After it returns, @dev will not be treated as a wakeup device any more. + */ +static struct wakeup_source *device_wakeup_detach(struct device *dev) +{ + struct wakeup_source *ws; + + spin_lock_irq(&dev->power.lock); + ws = dev->power.wakeup; + dev->power.wakeup = NULL; + spin_unlock_irq(&dev->power.lock); + return ws; +} + +/** + * device_wakeup_disable - Do not regard a device as a wakeup source any more. + * @dev: Device to handle. + * + * Detach the @dev's wakeup source object from it, unregister this wakeup source + * object and destroy it. + */ +int device_wakeup_disable(struct device *dev) +{ + struct wakeup_source *ws; + + if (!dev || !dev->power.can_wakeup) + return -EINVAL; + + ws = device_wakeup_detach(dev); + if (ws) + wakeup_source_unregister(ws); + + return 0; +} +EXPORT_SYMBOL_GPL(device_wakeup_disable); + +/** + * device_init_wakeup - Device wakeup initialization. + * @dev: Device to handle. + * @enable: Whether or not to enable @dev as a wakeup device. + * + * By default, most devices should leave wakeup disabled. The exceptions are + * devices that everyone expects to be wakeup sources: keyboards, power buttons, + * possibly network interfaces, etc. + */ +int device_init_wakeup(struct device *dev, bool enable) +{ + int ret = 0; + + if (enable) { + device_set_wakeup_capable(dev, true); + ret = device_wakeup_enable(dev); + } else { + device_set_wakeup_capable(dev, false); + } + + return ret; +} +EXPORT_SYMBOL_GPL(device_init_wakeup); + +/** + * device_set_wakeup_enable - Enable or disable a device to wake up the system. + * @dev: Device to handle. + */ +int device_set_wakeup_enable(struct device *dev, bool enable) +{ + if (!dev || !dev->power.can_wakeup) + return -EINVAL; + + return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); +} +EXPORT_SYMBOL_GPL(device_set_wakeup_enable); /* * The functions below use the observation that each wakeup event starts a @@ -55,118 +284,259 @@ static unsigned long events_timer_expires; * knowledge, however, may not be available to it, so it can simply specify time * to wait before the system can be suspended and pass it as the second * argument of pm_wakeup_event(). + * + * It is valid to call pm_relax() after pm_wakeup_event(), in which case the + * "no suspend" period will be ended either by the pm_relax(), or by the timer + * function executed when the timer expires, whichever comes first. */ +/** + * wakup_source_activate - Mark given wakeup source as active. + * @ws: Wakeup source to handle. + * + * Update the @ws' statistics and, if @ws has just been activated, notify the PM + * core of the event by incrementing the counter of of wakeup events being + * processed. + */ +static void wakeup_source_activate(struct wakeup_source *ws) +{ + ws->active = true; + ws->active_count++; + ws->timer_expires = jiffies; + ws->last_time = ktime_get(); + + atomic_inc(&events_in_progress); +} + +/** + * __pm_stay_awake - Notify the PM core of a wakeup event. + * @ws: Wakeup source object associated with the source of the event. + * + * It is safe to call this function from interrupt context. + */ +void __pm_stay_awake(struct wakeup_source *ws) +{ + unsigned long flags; + + if (!ws) + return; + + spin_lock_irqsave(&ws->lock, flags); + ws->event_count++; + if (!ws->active) + wakeup_source_activate(ws); + spin_unlock_irqrestore(&ws->lock, flags); +} +EXPORT_SYMBOL_GPL(__pm_stay_awake); + /** * pm_stay_awake - Notify the PM core that a wakeup event is being processed. * @dev: Device the wakeup event is related to. * - * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the - * counter of wakeup events being processed. If @dev is not NULL, the counter - * of wakeup events related to @dev is incremented too. + * Notify the PM core of a wakeup event (signaled by @dev) by calling + * __pm_stay_awake for the @dev's wakeup source object. * * Call this function after detecting of a wakeup event if pm_relax() is going * to be called directly after processing the event (and possibly passing it to * user space for further processing). - * - * It is safe to call this function from interrupt context. */ void pm_stay_awake(struct device *dev) { unsigned long flags; - spin_lock_irqsave(&events_lock, flags); - if (dev) - dev->power.wakeup_count++; + if (!dev) + return; - events_in_progress++; - spin_unlock_irqrestore(&events_lock, flags); + spin_lock_irqsave(&dev->power.lock, flags); + __pm_stay_awake(dev->power.wakeup); + spin_unlock_irqrestore(&dev->power.lock, flags); } +EXPORT_SYMBOL_GPL(pm_stay_awake); /** - * pm_relax - Notify the PM core that processing of a wakeup event has ended. + * wakup_source_deactivate - Mark given wakeup source as inactive. + * @ws: Wakeup source to handle. * - * Notify the PM core that a wakeup event has been processed by decrementing - * the counter of wakeup events being processed and incrementing the counter - * of registered wakeup events. + * Update the @ws' statistics and notify the PM core that the wakeup source has + * become inactive by decrementing the counter of wakeup events being processed + * and incrementing the counter of registered wakeup events. + */ +static void wakeup_source_deactivate(struct wakeup_source *ws) +{ + ktime_t duration; + ktime_t now; + + ws->relax_count++; + /* + * __pm_relax() may be called directly or from a timer function. + * If it is called directly right after the timer function has been + * started, but before the timer function calls __pm_relax(), it is + * possible that __pm_stay_awake() will be called in the meantime and + * will set ws->active. Then, ws->active may be cleared immediately + * by the __pm_relax() called from the timer function, but in such a + * case ws->relax_count will be different from ws->active_count. + */ + if (ws->relax_count != ws->active_count) { + ws->relax_count--; + return; + } + + ws->active = false; + + now = ktime_get(); + duration = ktime_sub(now, ws->last_time); + ws->total_time = ktime_add(ws->total_time, duration); + if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) + ws->max_time = duration; + + del_timer(&ws->timer); + + /* + * event_count has to be incremented before events_in_progress is + * modified, so that the callers of pm_check_wakeup_events() and + * pm_save_wakeup_count() don't see the old value of event_count and + * events_in_progress equal to zero at the same time. + */ + atomic_inc(&event_count); + smp_mb__before_atomic_dec(); + atomic_dec(&events_in_progress); +} + +/** + * __pm_relax - Notify the PM core that processing of a wakeup event has ended. + * @ws: Wakeup source object associated with the source of the event. * * Call this function for wakeup events whose processing started with calling - * pm_stay_awake(). + * __pm_stay_awake(). * * It is safe to call it from interrupt context. */ -void pm_relax(void) +void __pm_relax(struct wakeup_source *ws) { unsigned long flags; - spin_lock_irqsave(&events_lock, flags); - if (events_in_progress) { - events_in_progress--; - event_count++; - } - spin_unlock_irqrestore(&events_lock, flags); + if (!ws) + return; + + spin_lock_irqsave(&ws->lock, flags); + if (ws->active) + wakeup_source_deactivate(ws); + spin_unlock_irqrestore(&ws->lock, flags); +} +EXPORT_SYMBOL_GPL(__pm_relax); + +/** + * pm_relax - Notify the PM core that processing of a wakeup event has ended. + * @dev: Device that signaled the event. + * + * Execute __pm_relax() for the @dev's wakeup source object. + */ +void pm_relax(struct device *dev) +{ + unsigned long flags; + + if (!dev) + return; + + spin_lock_irqsave(&dev->power.lock, flags); + __pm_relax(dev->power.wakeup); + spin_unlock_irqrestore(&dev->power.lock, flags); } +EXPORT_SYMBOL_GPL(pm_relax); /** * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. + * @data: Address of the wakeup source object associated with the event source. * - * Decrease the counter of wakeup events being processed after it was increased - * by pm_wakeup_event(). + * Call __pm_relax() for the wakeup source whose address is stored in @data. */ static void pm_wakeup_timer_fn(unsigned long data) +{ + __pm_relax((struct wakeup_source *)data); +} + +/** + * __pm_wakeup_event - Notify the PM core of a wakeup event. + * @ws: Wakeup source object associated with the event source. + * @msec: Anticipated event processing time (in milliseconds). + * + * Notify the PM core of a wakeup event whose source is @ws that will take + * approximately @msec milliseconds to be processed by the kernel. If @ws is + * not active, activate it. If @msec is nonzero, set up the @ws' timer to + * execute pm_wakeup_timer_fn() in future. + * + * It is safe to call this function from interrupt context. + */ +void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) { unsigned long flags; + unsigned long expires; - spin_lock_irqsave(&events_lock, flags); - if (events_timer_expires - && time_before_eq(events_timer_expires, jiffies)) { - events_in_progress--; - events_timer_expires = 0; + if (!ws) + return; + + spin_lock_irqsave(&ws->lock, flags); + + ws->event_count++; + if (!ws->active) + wakeup_source_activate(ws); + + if (!msec) { + wakeup_source_deactivate(ws); + goto unlock; } - spin_unlock_irqrestore(&events_lock, flags); + + expires = jiffies + msecs_to_jiffies(msec); + if (!expires) + expires = 1; + + if (time_after(expires, ws->timer_expires)) { + mod_timer(&ws->timer, expires); + ws->timer_expires = expires; + } + + unlock: + spin_unlock_irqrestore(&ws->lock, flags); } +EXPORT_SYMBOL_GPL(__pm_wakeup_event); + /** * pm_wakeup_event - Notify the PM core of a wakeup event. * @dev: Device the wakeup event is related to. * @msec: Anticipated event processing time (in milliseconds). * - * Notify the PM core of a wakeup event (signaled by @dev) that will take - * approximately @msec milliseconds to be processed by the kernel. Increment - * the counter of registered wakeup events and (if @msec is nonzero) set up - * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the - * timer has not been set up already, increment the counter of wakeup events - * being processed). If @dev is not NULL, the counter of wakeup events related - * to @dev is incremented too. - * - * It is safe to call this function from interrupt context. + * Call __pm_wakeup_event() for the @dev's wakeup source object. */ void pm_wakeup_event(struct device *dev, unsigned int msec) { unsigned long flags; - spin_lock_irqsave(&events_lock, flags); - event_count++; - if (dev) - dev->power.wakeup_count++; - - if (msec) { - unsigned long expires; + if (!dev) + return; - expires = jiffies + msecs_to_jiffies(msec); - if (!expires) - expires = 1; + spin_lock_irqsave(&dev->power.lock, flags); + __pm_wakeup_event(dev->power.wakeup, msec); + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(pm_wakeup_event); - if (!events_timer_expires - || time_after(expires, events_timer_expires)) { - if (!events_timer_expires) - events_in_progress++; +/** + * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources. + */ +static void pm_wakeup_update_hit_counts(void) +{ + unsigned long flags; + struct wakeup_source *ws; - mod_timer(&events_timer, expires); - events_timer_expires = expires; - } + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + spin_lock_irqsave(&ws->lock, flags); + if (ws->active) + ws->hit_count++; + spin_unlock_irqrestore(&ws->lock, flags); } - spin_unlock_irqrestore(&events_lock, flags); + rcu_read_unlock(); } /** @@ -184,10 +554,13 @@ bool pm_check_wakeup_events(void) spin_lock_irqsave(&events_lock, flags); if (events_check_enabled) { - ret = (event_count == saved_event_count) && !events_in_progress; + ret = ((unsigned int)atomic_read(&event_count) == saved_count) + && !atomic_read(&events_in_progress); events_check_enabled = ret; } spin_unlock_irqrestore(&events_lock, flags); + if (!ret) + pm_wakeup_update_hit_counts(); return ret; } @@ -202,24 +575,20 @@ bool pm_check_wakeup_events(void) * drop down to zero has been interrupted by a signal (and the current number * of wakeup events being processed is still nonzero). Otherwise return true. */ -bool pm_get_wakeup_count(unsigned long *count) +bool pm_get_wakeup_count(unsigned int *count) { bool ret; - spin_lock_irq(&events_lock); if (capable(CAP_SYS_ADMIN)) events_check_enabled = false; - while (events_in_progress && !signal_pending(current)) { - spin_unlock_irq(&events_lock); - - schedule_timeout_interruptible(msecs_to_jiffies(100)); - - spin_lock_irq(&events_lock); + while (atomic_read(&events_in_progress) && !signal_pending(current)) { + pm_wakeup_update_hit_counts(); + schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); } - *count = event_count; - ret = !events_in_progress; - spin_unlock_irq(&events_lock); + + ret = !atomic_read(&events_in_progress); + *count = atomic_read(&event_count); return ret; } @@ -232,16 +601,19 @@ bool pm_get_wakeup_count(unsigned long *count) * old number of registered wakeup events to be used by pm_check_wakeup_events() * and return true. Otherwise return false. */ -bool pm_save_wakeup_count(unsigned long count) +bool pm_save_wakeup_count(unsigned int count) { bool ret = false; spin_lock_irq(&events_lock); - if (count == event_count && !events_in_progress) { - saved_event_count = count; + if (count == (unsigned int)atomic_read(&event_count) + && !atomic_read(&events_in_progress)) { + saved_count = count; events_check_enabled = true; ret = true; } spin_unlock_irq(&events_lock); + if (!ret) + pm_wakeup_update_hit_counts(); return ret; } diff --git a/include/linux/pm.h b/include/linux/pm.h index 52e8c55ff31..a84118911ce 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -448,23 +448,24 @@ enum rpm_request { RPM_REQ_RESUME, }; +struct wakeup_source; + struct dev_pm_info { pm_message_t power_state; unsigned int can_wakeup:1; - unsigned int should_wakeup:1; unsigned async_suspend:1; enum dpm_state status; /* Owned by the PM core */ + spinlock_t lock; #ifdef CONFIG_PM_SLEEP struct list_head entry; struct completion completion; - unsigned long wakeup_count; + struct wakeup_source *wakeup; #endif #ifdef CONFIG_PM_RUNTIME struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; - spinlock_t lock; atomic_t usage_count; atomic_t child_count; unsigned int disable_depth:3; @@ -559,11 +560,6 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); } while (0) extern void device_pm_wait_for_dev(struct device *sub, struct device *dev); - -/* drivers/base/power/wakeup.c */ -extern void pm_wakeup_event(struct device *dev, unsigned int msec); -extern void pm_stay_awake(struct device *dev); -extern void pm_relax(void); #else /* !CONFIG_PM_SLEEP */ #define device_pm_lock() do {} while (0) @@ -577,10 +573,6 @@ static inline int dpm_suspend_start(pm_message_t state) #define suspend_report_result(fn, ret) do {} while (0) static inline void device_pm_wait_for_dev(struct device *a, struct device *b) {} - -static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} -static inline void pm_stay_awake(struct device *dev) {} -static inline void pm_relax(void) {} #endif /* !CONFIG_PM_SLEEP */ /* How to reorder dpm_list after device_move() */ diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 76aca48722a..9cff00dd6b6 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -2,6 +2,7 @@ * pm_wakeup.h - Power management wakeup interface * * Copyright (C) 2008 Alan Stern + * Copyright (C) 2010 Rafael J. Wysocki, Novell Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,19 +28,77 @@ #include -#ifdef CONFIG_PM - -/* Changes to device_may_wakeup take effect on the next pm state change. +/** + * struct wakeup_source - Representation of wakeup sources * - * By default, most devices should leave wakeup disabled. The exceptions - * are devices that everyone expects to be wakeup sources: keyboards, - * power buttons, possibly network interfaces, etc. + * @total_time: Total time this wakeup source has been active. + * @max_time: Maximum time this wakeup source has been continuously active. + * @last_time: Monotonic clock when the wakeup source's was activated last time. + * @event_count: Number of signaled wakeup events. + * @active_count: Number of times the wakeup sorce was activated. + * @relax_count: Number of times the wakeup sorce was deactivated. + * @hit_count: Number of times the wakeup sorce might abort system suspend. + * @active: Status of the wakeup source. */ -static inline void device_init_wakeup(struct device *dev, bool val) +struct wakeup_source { + char *name; + struct list_head entry; + spinlock_t lock; + struct timer_list timer; + unsigned long timer_expires; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + unsigned long event_count; + unsigned long active_count; + unsigned long relax_count; + unsigned long hit_count; + unsigned int active:1; +}; + +#ifdef CONFIG_PM_SLEEP + +/* + * Changes to device_may_wakeup take effect on the next pm state change. + */ + +static inline void device_set_wakeup_capable(struct device *dev, bool capable) +{ + dev->power.can_wakeup = capable; +} + +static inline bool device_can_wakeup(struct device *dev) +{ + return dev->power.can_wakeup; +} + + + +static inline bool device_may_wakeup(struct device *dev) { - dev->power.can_wakeup = dev->power.should_wakeup = val; + return dev->power.can_wakeup && !!dev->power.wakeup; } +/* drivers/base/power/wakeup.c */ +extern struct wakeup_source *wakeup_source_create(const char *name); +extern void wakeup_source_destroy(struct wakeup_source *ws); +extern void wakeup_source_add(struct wakeup_source *ws); +extern void wakeup_source_remove(struct wakeup_source *ws); +extern struct wakeup_source *wakeup_source_register(const char *name); +extern void wakeup_source_unregister(struct wakeup_source *ws); +extern int device_wakeup_enable(struct device *dev); +extern int device_wakeup_disable(struct device *dev); +extern int device_init_wakeup(struct device *dev, bool val); +extern int device_set_wakeup_enable(struct device *dev, bool enable); +extern void __pm_stay_awake(struct wakeup_source *ws); +extern void pm_stay_awake(struct device *dev); +extern void __pm_relax(struct wakeup_source *ws); +extern void pm_relax(struct device *dev); +extern void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec); +extern void pm_wakeup_event(struct device *dev, unsigned int msec); + +#else /* !CONFIG_PM_SLEEP */ + static inline void device_set_wakeup_capable(struct device *dev, bool capable) { dev->power.can_wakeup = capable; @@ -50,43 +109,63 @@ static inline bool device_can_wakeup(struct device *dev) return dev->power.can_wakeup; } -static inline void device_set_wakeup_enable(struct device *dev, bool enable) +static inline bool device_may_wakeup(struct device *dev) { - dev->power.should_wakeup = enable; + return false; } -static inline bool device_may_wakeup(struct device *dev) +static inline struct wakeup_source *wakeup_source_create(const char *name) { - return dev->power.can_wakeup && dev->power.should_wakeup; + return NULL; } -#else /* !CONFIG_PM */ +static inline void wakeup_source_destroy(struct wakeup_source *ws) {} + +static inline void wakeup_source_add(struct wakeup_source *ws) {} -/* For some reason the following routines work even without CONFIG_PM */ -static inline void device_init_wakeup(struct device *dev, bool val) +static inline void wakeup_source_remove(struct wakeup_source *ws) {} + +static inline struct wakeup_source *wakeup_source_register(const char *name) { - dev->power.can_wakeup = val; + return NULL; } -static inline void device_set_wakeup_capable(struct device *dev, bool capable) +static inline void wakeup_source_unregister(struct wakeup_source *ws) {} + +static inline int device_wakeup_enable(struct device *dev) { - dev->power.can_wakeup = capable; + return -EINVAL; } -static inline bool device_can_wakeup(struct device *dev) +static inline int device_wakeup_disable(struct device *dev) { - return dev->power.can_wakeup; + return 0; } -static inline void device_set_wakeup_enable(struct device *dev, bool enable) +static inline int device_init_wakeup(struct device *dev, bool val) { + dev->power.can_wakeup = val; + return val ? -EINVAL : 0; } -static inline bool device_may_wakeup(struct device *dev) + +static inline int device_set_wakeup_enable(struct device *dev, bool enable) { - return false; + return -EINVAL; } -#endif /* !CONFIG_PM */ +static inline void __pm_stay_awake(struct wakeup_source *ws) {} + +static inline void pm_stay_awake(struct device *dev) {} + +static inline void __pm_relax(struct wakeup_source *ws) {} + +static inline void pm_relax(struct device *dev) {} + +static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) {} + +static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} + +#endif /* !CONFIG_PM_SLEEP */ #endif /* _LINUX_PM_WAKEUP_H */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 4af270ec220..6b1712c5110 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -293,8 +293,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb); extern bool events_check_enabled; extern bool pm_check_wakeup_events(void); -extern bool pm_get_wakeup_count(unsigned long *count); -extern bool pm_save_wakeup_count(unsigned long count); +extern bool pm_get_wakeup_count(unsigned int *count); +extern bool pm_save_wakeup_count(unsigned int count); #else /* !CONFIG_PM_SLEEP */ static inline int register_pm_notifier(struct notifier_block *nb) diff --git a/kernel/power/main.c b/kernel/power/main.c index f06ad6eff37..6b12a0cf4d9 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -237,18 +237,18 @@ static ssize_t wakeup_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - unsigned long val; + unsigned int val; - return pm_get_wakeup_count(&val) ? sprintf(buf, "%lu\n", val) : -EINTR; + return pm_get_wakeup_count(&val) ? sprintf(buf, "%u\n", val) : -EINTR; } static ssize_t wakeup_count_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { - unsigned long val; + unsigned int val; - if (sscanf(buf, "%lu", &val) == 1) { + if (sscanf(buf, "%u", &val) == 1) { if (pm_save_wakeup_count(val)) return n; } -- cgit v1.2.3-70-g09d2 From d0941ead3fdd31aafff992d211bcefdbff1eaedb Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 28 Sep 2010 23:31:22 +0200 Subject: PM / Hibernate: Make some boot messages look less scary The hibernate resume code checks if there is an image to resume from on every boot and, if the kernel is built with CONFIG_PM_DEBUG set and the image is not present, it prints some scary messages suggesting there was a boot error of some sort. Apparently, some users are confused by them, so make them look less scary and adjust the other hibernate resume debug messages to match them. Signed-off-by: Rafael J. Wysocki --- kernel/power/hibernate.c | 12 ++++++------ kernel/power/swap.c | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 6c9c9dc48c7..657272e91d0 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -708,7 +708,7 @@ static int software_resume(void) goto Unlock; } - pr_debug("PM: Checking image partition %s\n", resume_file); + pr_debug("PM: Checking hibernation image partition %s\n", resume_file); /* Check if the device is there */ swsusp_resume_device = name_to_dev_t(resume_file); @@ -733,10 +733,10 @@ static int software_resume(void) } Check_image: - pr_debug("PM: Resume from partition %d:%d\n", + pr_debug("PM: Hibernation image partition %d:%d present\n", MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); - pr_debug("PM: Checking hibernation image.\n"); + pr_debug("PM: Looking for hibernation image.\n"); error = swsusp_check(); if (error) goto Unlock; @@ -768,14 +768,14 @@ static int software_resume(void) goto Done; } - pr_debug("PM: Reading hibernation image.\n"); + pr_debug("PM: Loading hibernation image.\n"); error = swsusp_read(&flags); swsusp_close(FMODE_READ); if (!error) hibernation_restore(flags & SF_PLATFORM_MODE); - printk(KERN_ERR "PM: Restore failed, recovering.\n"); + printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n"); swsusp_free(); thaw_processes(); Done: @@ -788,7 +788,7 @@ static int software_resume(void) /* For success case, the suspend path will release the lock */ Unlock: mutex_unlock(&pm_mutex); - pr_debug("PM: Resume from disk failed.\n"); + pr_debug("PM: Hibernation image not present or could not be loaded.\n"); return error; close_finish: swsusp_close(FMODE_READ); diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 3dc0552cddf..2dfa87869b4 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -929,13 +929,13 @@ put: if (error) blkdev_put(hib_resume_bdev, FMODE_READ); else - pr_debug("PM: Signature found, resuming\n"); + pr_debug("PM: Image signature found, resuming\n"); } else { error = PTR_ERR(hib_resume_bdev); } if (error) - pr_debug("PM: Error %d checking image file\n", error); + pr_debug("PM: Image not found (code %d)\n", error); return error; } -- cgit v1.2.3-70-g09d2 From dbeeec5fe868f2e2e92fe94daa2c5a047240fdc4 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 4 Oct 2010 22:07:32 +0200 Subject: PM: Allow wakeup events to abort freezing of tasks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If there is a wakeup event during the freezing of tasks, suspend or hibernation will fail anyway. Since try_to_freeze_tasks() can take up to 20 seconds to complete or fail, aborting it as soon as a wakeup event is detected improves the worst case wakeup latency. Based on a patch from Arve HjønnevÃ¥g. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek --- include/linux/suspend.h | 2 ++ kernel/power/process.c | 11 +++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 6b1712c5110..26697514c5e 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -308,6 +308,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) } #define pm_notifier(fn, pri) do { (void)(fn); } while (0) + +static inline bool pm_check_wakeup_events(void) { return true; } #endif /* !CONFIG_PM_SLEEP */ extern struct mutex pm_mutex; diff --git a/kernel/power/process.c b/kernel/power/process.c index 028a99598f4..e50b4c1b2a0 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -40,6 +40,7 @@ static int try_to_freeze_tasks(bool sig_only) struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; + bool wakeup = false; do_gettimeofday(&start); @@ -78,6 +79,11 @@ static int try_to_freeze_tasks(bool sig_only) if (!todo || time_after(jiffies, end_time)) break; + if (!pm_check_wakeup_events()) { + wakeup = true; + break; + } + /* * We need to retry, but first give the freezing tasks some * time to enter the regrigerator. @@ -97,8 +103,9 @@ static int try_to_freeze_tasks(bool sig_only) * but it cleans up leftover PF_FREEZE requests. */ printk("\n"); - printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " + printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " "(%d tasks refusing to freeze, wq_busy=%d):\n", + wakeup ? "aborted" : "failed", elapsed_csecs / 100, elapsed_csecs % 100, todo - wq_busy, wq_busy); @@ -107,7 +114,7 @@ static int try_to_freeze_tasks(bool sig_only) read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); - if (freezing(p) && !freezer_should_skip(p)) + if (!wakeup && freezing(p) && !freezer_should_skip(p)) sched_show_task(p); cancel_freezing(p); task_unlock(p); -- cgit v1.2.3-70-g09d2 From 3624eb04c24861ab296842414f9752a393e68372 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 4 Oct 2010 22:08:12 +0200 Subject: PM / Hibernate: Modify signature used to mark swap Since we are adding compression to the kernel's hibernate code, change signature used by it to mark swap spaces, so that earlier kernels don't attempt to restore compressed images they cannot handle. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek --- kernel/power/swap.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 2dfa87869b4..916eaa79039 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -29,7 +29,7 @@ #include "power.h" -#define SWSUSP_SIG "S1SUSPEND" +#define HIBERNATE_SIG "LINHIB0001" /* * The swap map is a data structure used for keeping track of each page @@ -195,7 +195,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); - memcpy(swsusp_header->sig,SWSUSP_SIG, 10); + memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); swsusp_header->image = handle->first_sector; swsusp_header->flags = flags; error = hib_bio_write_page(swsusp_resume_block, @@ -916,7 +916,7 @@ int swsusp_check(void) if (error) goto put; - if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) { + if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); /* Reset swap signature now */ error = hib_bio_write_page(swsusp_resume_block, -- cgit v1.2.3-70-g09d2 From d33ac60beaf2c7dee5cd90aba7c1eb385dd70937 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 12 Oct 2010 00:00:25 +0200 Subject: PM: Add sysfs attr for rechecking dev hash from PM trace If the device which fails to resume is part of a loadable kernel module it won't be checked at startup against the magic number stored in the RTC. Add a read-only sysfs attribute /sys/power/pm_trace_dev_match which contains a list of newline separated devices (usually just the one) which currently match the last magic number. This allows the device which is failing to resume to be found after the modules are loaded again. Signed-off-by: James Hogan Signed-off-by: Rafael J. Wysocki --- Documentation/ABI/testing/sysfs-power | 29 +++++++++++++++++++++++++++++ Documentation/power/s2ram.txt | 7 +++++++ drivers/base/power/trace.c | 31 +++++++++++++++++++++++++++++++ include/linux/resume-trace.h | 2 ++ kernel/power/main.c | 18 ++++++++++++++++++ 5 files changed, 87 insertions(+) (limited to 'kernel') diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power index 2875f1f74a0..194ca446ac2 100644 --- a/Documentation/ABI/testing/sysfs-power +++ b/Documentation/ABI/testing/sysfs-power @@ -99,9 +99,38 @@ Description: dmesg -s 1000000 | grep 'hash matches' + If you do not get any matches (or they appear to be false + positives), it is possible that the last PM event point + referred to a device created by a loadable kernel module. In + this case cat /sys/power/pm_trace_dev_match (see below) after + your system is started up and the kernel modules are loaded. + CAUTION: Using it will cause your machine's real-time (CMOS) clock to be set to a random invalid time after a resume. +What; /sys/power/pm_trace_dev_match +Date: October 2010 +Contact: James Hogan +Description: + The /sys/power/pm_trace_dev_match file contains the name of the + device associated with the last PM event point saved in the RTC + across reboots when pm_trace has been used. More precisely it + contains the list of current devices (including those + registered by loadable kernel modules since boot) which match + the device hash in the RTC at boot, with a newline after each + one. + + The advantage of this file over the hash matches printed to the + kernel log (see /sys/power/pm_trace), is that it includes + devices created after boot by loadable kernel modules. + + Due to the small hash size necessary to fit in the RTC, it is + possible that more than one device matches the hash, in which + case further investigation is required to determine which + device is causing the problem. Note that genuine RTC clock + values (such as when pm_trace has not been used), can still + match a device and output it's name here. + What: /sys/power/pm_async Date: January 2009 Contact: Rafael J. Wysocki diff --git a/Documentation/power/s2ram.txt b/Documentation/power/s2ram.txt index 514b94fc931..1bdfa044377 100644 --- a/Documentation/power/s2ram.txt +++ b/Documentation/power/s2ram.txt @@ -49,6 +49,13 @@ machine that doesn't boot) is: device (lspci and /sys/devices/pci* is your friend), and see if you can fix it, disable it, or trace into its resume function. + If no device matches the hash (or any matches appear to be false positives), + the culprit may be a device from a loadable kernel module that is not loaded + until after the hash is checked. You can check the hash against the current + devices again after more modules are loaded using sysfs: + + cat /sys/power/pm_trace_dev_match + For example, the above happens to be the VGA device on my EVO, which I used to run with "radeonfb" (it's an ATI Radeon mobility). It turns out that "radeonfb" simply cannot resume that device - it tries to set the diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 17e24e3f442..9f4258df4cf 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -207,6 +207,37 @@ static int show_dev_hash(unsigned int value) static unsigned int hash_value_early_read; +int show_trace_dev_match(char *buf, size_t size) +{ + unsigned int value = hash_value_early_read / (USERHASH * FILEHASH); + int ret = 0; + struct list_head *entry; + + /* + * It's possible that multiple devices will match the hash and we can't + * tell which is the culprit, so it's best to output them all. + */ + device_pm_lock(); + entry = dpm_list.prev; + while (size && entry != &dpm_list) { + struct device *dev = to_device(entry); + unsigned int hash = hash_string(DEVSEED, dev_name(dev), + DEVHASH); + if (hash == value) { + int len = snprintf(buf, size, "%s\n", + dev_driver_string(dev)); + if (len > size) + len = size; + buf += len; + ret += len; + size -= len; + } + entry = entry->prev; + } + device_pm_unlock(); + return ret; +} + static int early_resume_init(void) { hash_value_early_read = read_magic_time(); diff --git a/include/linux/resume-trace.h b/include/linux/resume-trace.h index bc8c3881c72..f31db236878 100644 --- a/include/linux/resume-trace.h +++ b/include/linux/resume-trace.h @@ -3,6 +3,7 @@ #ifdef CONFIG_PM_TRACE #include +#include extern int pm_trace_enabled; @@ -14,6 +15,7 @@ static inline int pm_trace_is_enabled(void) struct device; extern void set_trace_device(struct device *); extern void generate_resume_trace(const void *tracedata, unsigned int user); +extern int show_trace_dev_match(char *buf, size_t size); #define TRACE_DEVICE(dev) do { \ if (pm_trace_enabled) \ diff --git a/kernel/power/main.c b/kernel/power/main.c index 6b12a0cf4d9..7b5db6a8561 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -281,12 +281,30 @@ pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr, } power_attr(pm_trace); + +static ssize_t pm_trace_dev_match_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return show_trace_dev_match(buf, PAGE_SIZE); +} + +static ssize_t +pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t n) +{ + return -EINVAL; +} + +power_attr(pm_trace_dev_match); + #endif /* CONFIG_PM_TRACE */ static struct attribute * g[] = { &state_attr.attr, #ifdef CONFIG_PM_TRACE &pm_trace_attr.attr, + &pm_trace_dev_match_attr.attr, #endif #ifdef CONFIG_PM_SLEEP &pm_async_attr.attr, -- cgit v1.2.3-70-g09d2 From e1f60b292ffd61151403327aa19ff7a1871820bd Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Wed, 13 Oct 2010 00:13:10 +0200 Subject: PM: Introduce library for device-specific OPPs (v7) SoCs have a standard set of tuples consisting of frequency and voltage pairs that the device will support per voltage domain. These are called Operating Performance Points or OPPs. The actual definitions of OPP varies over silicon versions. For a specific domain, we can have a set of {frequency, voltage} pairs. As the kernel boots and more information is available, a default set of these are activated based on the precise nature of device. Further on operation, based on conditions prevailing in the system (such as temperature), some OPP availability may be temporarily controlled by the SoC frameworks. To implement an OPP, some sort of power management support is necessary hence this library depends on CONFIG_PM. Contributions include: Sanjeev Premi for the initial concept: http://patchwork.kernel.org/patch/50998/ Kevin Hilman for converting original design to device-based. Kevin Hilman and Paul Walmsey for cleaning up many of the function abstractions, improvements and data structure handling. Romit Dasgupta for using enums instead of opp pointers. Thara Gopinath, Eduardo Valentin and Vishwanath BS for fixes and cleanups. Linus Walleij for recommending this layer be made generic for usage in other architectures beyond OMAP and ARM. Mark Brown, Andrew Morton, Rafael J. Wysocki, Paul E. McKenney for valuable improvements. Discussions and comments from: http://marc.info/?l=linux-omap&m=126033945313269&w=2 http://marc.info/?l=linux-omap&m=125482970102327&w=2 http://marc.info/?t=125809247500002&r=1&w=2 http://marc.info/?l=linux-omap&m=126025973426007&w=2 http://marc.info/?t=128152609200064&r=1&w=2 http://marc.info/?t=128468723000002&r=1&w=2 incorporated. v1: http://marc.info/?t=128468723000002&r=1&w=2 Signed-off-by: Nishanth Menon Signed-off-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki --- Documentation/power/00-INDEX | 2 + Documentation/power/opp.txt | 375 ++++++++++++++++++++++++++ drivers/base/power/Makefile | 1 + drivers/base/power/opp.c | 628 +++++++++++++++++++++++++++++++++++++++++++ include/linux/opp.h | 105 ++++++++ kernel/power/Kconfig | 14 + 6 files changed, 1125 insertions(+) create mode 100644 Documentation/power/opp.txt create mode 100644 drivers/base/power/opp.c create mode 100644 include/linux/opp.h (limited to 'kernel') diff --git a/Documentation/power/00-INDEX b/Documentation/power/00-INDEX index fb742c213c9..45e9d4a9128 100644 --- a/Documentation/power/00-INDEX +++ b/Documentation/power/00-INDEX @@ -14,6 +14,8 @@ interface.txt - Power management user interface in /sys/power notifiers.txt - Registering suspend notifiers in device drivers +opp.txt + - Operating Performance Point library pci.txt - How the PCI Subsystem Does Power Management pm_qos_interface.txt diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt new file mode 100644 index 00000000000..44d87ad3cea --- /dev/null +++ b/Documentation/power/opp.txt @@ -0,0 +1,375 @@ +*=============* +* OPP Library * +*=============* + +(C) 2009-2010 Nishanth Menon , Texas Instruments Incorporated + +Contents +-------- +1. Introduction +2. Initial OPP List Registration +3. OPP Search Functions +4. OPP Availability Control Functions +5. OPP Data Retrieval Functions +6. Cpufreq Table Generation +7. Data Structures + +1. Introduction +=============== +Complex SoCs of today consists of a multiple sub-modules working in conjunction. +In an operational system executing varied use cases, not all modules in the SoC +need to function at their highest performing frequency all the time. To +facilitate this, sub-modules in a SoC are grouped into domains, allowing some +domains to run at lower voltage and frequency while other domains are loaded +more. The set of discrete tuples consisting of frequency and voltage pairs that +the device will support per domain are called Operating Performance Points or +OPPs. + +OPP library provides a set of helper functions to organize and query the OPP +information. The library is located in drivers/base/power/opp.c and the header +is located in include/linux/opp.h. OPP library can be enabled by enabling +CONFIG_PM_OPP from power management menuconfig menu. OPP library depends on +CONFIG_PM as certain SoCs such as Texas Instrument's OMAP framework allows to +optionally boot at a certain OPP without needing cpufreq. + +Typical usage of the OPP library is as follows: +(users) -> registers a set of default OPPs -> (library) +SoC framework -> modifies on required cases certain OPPs -> OPP layer + -> queries to search/retrieve information -> + +OPP layer expects each domain to be represented by a unique device pointer. SoC +framework registers a set of initial OPPs per device with the OPP layer. This +list is expected to be an optimally small number typically around 5 per device. +This initial list contains a set of OPPs that the framework expects to be safely +enabled by default in the system. + +Note on OPP Availability: +------------------------ +As the system proceeds to operate, SoC framework may choose to make certain +OPPs available or not available on each device based on various external +factors. Example usage: Thermal management or other exceptional situations where +SoC framework might choose to disable a higher frequency OPP to safely continue +operations until that OPP could be re-enabled if possible. + +OPP library facilitates this concept in it's implementation. The following +operational functions operate only on available opps: +opp_find_freq_{ceil, floor}, opp_get_voltage, opp_get_freq, opp_get_opp_count +and opp_init_cpufreq_table + +opp_find_freq_exact is meant to be used to find the opp pointer which can then +be used for opp_enable/disable functions to make an opp available as required. + +WARNING: Users of OPP library should refresh their availability count using +get_opp_count if opp_enable/disable functions are invoked for a device, the +exact mechanism to trigger these or the notification mechanism to other +dependent subsystems such as cpufreq are left to the discretion of the SoC +specific framework which uses the OPP library. Similar care needs to be taken +care to refresh the cpufreq table in cases of these operations. + +WARNING on OPP List locking mechanism: +------------------------------------------------- +OPP library uses RCU for exclusivity. RCU allows the query functions to operate +in multiple contexts and this synchronization mechanism is optimal for a read +intensive operations on data structure as the OPP library caters to. + +To ensure that the data retrieved are sane, the users such as SoC framework +should ensure that the section of code operating on OPP queries are locked +using RCU read locks. The opp_find_freq_{exact,ceil,floor}, +opp_get_{voltage, freq, opp_count} fall into this category. + +opp_{add,enable,disable} are updaters which use mutex and implement it's own +RCU locking mechanisms. opp_init_cpufreq_table acts as an updater and uses +mutex to implment RCU updater strategy. These functions should *NOT* be called +under RCU locks and other contexts that prevent blocking functions in RCU or +mutex operations from working. + +2. Initial OPP List Registration +================================ +The SoC implementation calls opp_add function iteratively to add OPPs per +device. It is expected that the SoC framework will register the OPP entries +optimally- typical numbers range to be less than 5. The list generated by +registering the OPPs is maintained by OPP library throughout the device +operation. The SoC framework can subsequently control the availability of the +OPPs dynamically using the opp_enable / disable functions. + +opp_add - Add a new OPP for a specific domain represented by the device pointer. + The OPP is defined using the frequency and voltage. Once added, the OPP + is assumed to be available and control of it's availability can be done + with the opp_enable/disable functions. OPP library internally stores + and manages this information in the opp struct. This function may be + used by SoC framework to define a optimal list as per the demands of + SoC usage environment. + + WARNING: Do not use this function in interrupt context. + + Example: + soc_pm_init() + { + /* Do things */ + r = opp_add(mpu_dev, 1000000, 900000); + if (!r) { + pr_err("%s: unable to register mpu opp(%d)\n", r); + goto no_cpufreq; + } + /* Do cpufreq things */ + no_cpufreq: + /* Do remaining things */ + } + +3. OPP Search Functions +======================= +High level framework such as cpufreq operates on frequencies. To map the +frequency back to the corresponding OPP, OPP library provides handy functions +to search the OPP list that OPP library internally manages. These search +functions return the matching pointer representing the opp if a match is +found, else returns error. These errors are expected to be handled by standard +error checks such as IS_ERR() and appropriate actions taken by the caller. + +opp_find_freq_exact - Search for an OPP based on an *exact* frequency and + availability. This function is especially useful to enable an OPP which + is not available by default. + Example: In a case when SoC framework detects a situation where a + higher frequency could be made available, it can use this function to + find the OPP prior to call the opp_enable to actually make it available. + rcu_read_lock(); + opp = opp_find_freq_exact(dev, 1000000000, false); + rcu_read_unlock(); + /* dont operate on the pointer.. just do a sanity check.. */ + if (IS_ERR(opp)) { + pr_err("frequency not disabled!\n"); + /* trigger appropriate actions.. */ + } else { + opp_enable(dev,1000000000); + } + + NOTE: This is the only search function that operates on OPPs which are + not available. + +opp_find_freq_floor - Search for an available OPP which is *at most* the + provided frequency. This function is useful while searching for a lesser + match OR operating on OPP information in the order of decreasing + frequency. + Example: To find the highest opp for a device: + freq = ULONG_MAX; + rcu_read_lock(); + opp_find_freq_floor(dev, &freq); + rcu_read_unlock(); + +opp_find_freq_ceil - Search for an available OPP which is *at least* the + provided frequency. This function is useful while searching for a + higher match OR operating on OPP information in the order of increasing + frequency. + Example 1: To find the lowest opp for a device: + freq = 0; + rcu_read_lock(); + opp_find_freq_ceil(dev, &freq); + rcu_read_unlock(); + Example 2: A simplified implementation of a SoC cpufreq_driver->target: + soc_cpufreq_target(..) + { + /* Do stuff like policy checks etc. */ + /* Find the best frequency match for the req */ + rcu_read_lock(); + opp = opp_find_freq_ceil(dev, &freq); + rcu_read_unlock(); + if (!IS_ERR(opp)) + soc_switch_to_freq_voltage(freq); + else + /* do something when we cant satisfy the req */ + /* do other stuff */ + } + +4. OPP Availability Control Functions +===================================== +A default OPP list registered with the OPP library may not cater to all possible +situation. The OPP library provides a set of functions to modify the +availability of a OPP within the OPP list. This allows SoC frameworks to have +fine grained dynamic control of which sets of OPPs are operationally available. +These functions are intended to *temporarily* remove an OPP in conditions such +as thermal considerations (e.g. don't use OPPx until the temperature drops). + +WARNING: Do not use these functions in interrupt context. + +opp_enable - Make a OPP available for operation. + Example: Lets say that 1GHz OPP is to be made available only if the + SoC temperature is lower than a certain threshold. The SoC framework + implementation might choose to do something as follows: + if (cur_temp < temp_low_thresh) { + /* Enable 1GHz if it was disabled */ + rcu_read_lock(); + opp = opp_find_freq_exact(dev, 1000000000, false); + rcu_read_unlock(); + /* just error check */ + if (!IS_ERR(opp)) + ret = opp_enable(dev, 1000000000); + else + goto try_something_else; + } + +opp_disable - Make an OPP to be not available for operation + Example: Lets say that 1GHz OPP is to be disabled if the temperature + exceeds a threshold value. The SoC framework implementation might + choose to do something as follows: + if (cur_temp > temp_high_thresh) { + /* Disable 1GHz if it was enabled */ + rcu_read_lock(); + opp = opp_find_freq_exact(dev, 1000000000, true); + rcu_read_unlock(); + /* just error check */ + if (!IS_ERR(opp)) + ret = opp_disable(dev, 1000000000); + else + goto try_something_else; + } + +5. OPP Data Retrieval Functions +=============================== +Since OPP library abstracts away the OPP information, a set of functions to pull +information from the OPP structure is necessary. Once an OPP pointer is +retrieved using the search functions, the following functions can be used by SoC +framework to retrieve the information represented inside the OPP layer. + +opp_get_voltage - Retrieve the voltage represented by the opp pointer. + Example: At a cpufreq transition to a different frequency, SoC + framework requires to set the voltage represented by the OPP using + the regulator framework to the Power Management chip providing the + voltage. + soc_switch_to_freq_voltage(freq) + { + /* do things */ + rcu_read_lock(); + opp = opp_find_freq_ceil(dev, &freq); + v = opp_get_voltage(opp); + rcu_read_unlock(); + if (v) + regulator_set_voltage(.., v); + /* do other things */ + } + +opp_get_freq - Retrieve the freq represented by the opp pointer. + Example: Lets say the SoC framework uses a couple of helper functions + we could pass opp pointers instead of doing additional parameters to + handle quiet a bit of data parameters. + soc_cpufreq_target(..) + { + /* do things.. */ + max_freq = ULONG_MAX; + rcu_read_lock(); + max_opp = opp_find_freq_floor(dev,&max_freq); + requested_opp = opp_find_freq_ceil(dev,&freq); + if (!IS_ERR(max_opp) && !IS_ERR(requested_opp)) + r = soc_test_validity(max_opp, requested_opp); + rcu_read_unlock(); + /* do other things */ + } + soc_test_validity(..) + { + if(opp_get_voltage(max_opp) < opp_get_voltage(requested_opp)) + return -EINVAL; + if(opp_get_freq(max_opp) < opp_get_freq(requested_opp)) + return -EINVAL; + /* do things.. */ + } + +opp_get_opp_count - Retrieve the number of available opps for a device + Example: Lets say a co-processor in the SoC needs to know the available + frequencies in a table, the main processor can notify as following: + soc_notify_coproc_available_frequencies() + { + /* Do things */ + rcu_read_lock(); + num_available = opp_get_opp_count(dev); + speeds = kzalloc(sizeof(u32) * num_available, GFP_KERNEL); + /* populate the table in increasing order */ + freq = 0; + while (!IS_ERR(opp = opp_find_freq_ceil(dev, &freq))) { + speeds[i] = freq; + freq++; + i++; + } + rcu_read_unlock(); + + soc_notify_coproc(AVAILABLE_FREQs, speeds, num_available); + /* Do other things */ + } + +6. Cpufreq Table Generation +=========================== +opp_init_cpufreq_table - cpufreq framework typically is initialized with + cpufreq_frequency_table_cpuinfo which is provided with the list of + frequencies that are available for operation. This function provides + a ready to use conversion routine to translate the OPP layer's internal + information about the available frequencies into a format readily + providable to cpufreq. + + WARNING: Do not use this function in interrupt context. + + Example: + soc_pm_init() + { + /* Do things */ + r = opp_init_cpufreq_table(dev, &freq_table); + if (!r) + cpufreq_frequency_table_cpuinfo(policy, freq_table); + /* Do other things */ + } + + NOTE: This function is available only if CONFIG_CPU_FREQ is enabled in + addition to CONFIG_PM as power management feature is required to + dynamically scale voltage and frequency in a system. + +7. Data Structures +================== +Typically an SoC contains multiple voltage domains which are variable. Each +domain is represented by a device pointer. The relationship to OPP can be +represented as follows: +SoC + |- device 1 + | |- opp 1 (availability, freq, voltage) + | |- opp 2 .. + ... ... + | `- opp n .. + |- device 2 + ... + `- device m + +OPP library maintains a internal list that the SoC framework populates and +accessed by various functions as described above. However, the structures +representing the actual OPPs and domains are internal to the OPP library itself +to allow for suitable abstraction reusable across systems. + +struct opp - The internal data structure of OPP library which is used to + represent an OPP. In addition to the freq, voltage, availability + information, it also contains internal book keeping information required + for the OPP library to operate on. Pointer to this structure is + provided back to the users such as SoC framework to be used as a + identifier for OPP in the interactions with OPP layer. + + WARNING: The struct opp pointer should not be parsed or modified by the + users. The defaults of for an instance is populated by opp_add, but the + availability of the OPP can be modified by opp_enable/disable functions. + +struct device - This is used to identify a domain to the OPP layer. The + nature of the device and it's implementation is left to the user of + OPP library such as the SoC framework. + +Overall, in a simplistic view, the data structure operations is represented as +following: + +Initialization / modification: + +-----+ /- opp_enable +opp_add --> | opp | <------- + | +-----+ \- opp_disable + \-------> domain_info(device) + +Search functions: + /-- opp_find_freq_ceil ---\ +-----+ +domain_info<---- opp_find_freq_exact -----> | opp | + \-- opp_find_freq_floor ---/ +-----+ + +Retrieval functions: ++-----+ /- opp_get_voltage +| opp | <--- ++-----+ \- opp_get_freq + +domain_info <- opp_get_opp_count diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index cbccf9a3cee..abe46edfe5b 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_OPS) += generic_ops.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o +obj-$(CONFIG_PM_OPP) += opp.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c new file mode 100644 index 00000000000..2bb9b4cf59d --- /dev/null +++ b/drivers/base/power/opp.c @@ -0,0 +1,628 @@ +/* + * Generic OPP Interface + * + * Copyright (C) 2009-2010 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Internal data structure organization with the OPP layer library is as + * follows: + * dev_opp_list (root) + * |- device 1 (represents voltage domain 1) + * | |- opp 1 (availability, freq, voltage) + * | |- opp 2 .. + * ... ... + * | `- opp n .. + * |- device 2 (represents the next voltage domain) + * ... + * `- device m (represents mth voltage domain) + * device 1, 2.. are represented by dev_opp structure while each opp + * is represented by the opp structure. + */ + +/** + * struct opp - Generic OPP description structure + * @node: opp list node. The nodes are maintained throughout the lifetime + * of boot. It is expected only an optimal set of OPPs are + * added to the library by the SoC framework. + * RCU usage: opp list is traversed with RCU locks. node + * modification is possible realtime, hence the modifications + * are protected by the dev_opp_list_lock for integrity. + * IMPORTANT: the opp nodes should be maintained in increasing + * order. + * @available: true/false - marks if this OPP as available or not + * @rate: Frequency in hertz + * @u_volt: Nominal voltage in microvolts corresponding to this OPP + * @dev_opp: points back to the device_opp struct this opp belongs to + * + * This structure stores the OPP information for a given device. + */ +struct opp { + struct list_head node; + + bool available; + unsigned long rate; + unsigned long u_volt; + + struct device_opp *dev_opp; +}; + +/** + * struct device_opp - Device opp structure + * @node: list node - contains the devices with OPPs that + * have been registered. Nodes once added are not modified in this + * list. + * RCU usage: nodes are not modified in the list of device_opp, + * however addition is possible and is secured by dev_opp_list_lock + * @dev: device pointer + * @opp_list: list of opps + * + * This is an internal data structure maintaining the link to opps attached to + * a device. This structure is not meant to be shared to users as it is + * meant for book keeping and private to OPP library + */ +struct device_opp { + struct list_head node; + + struct device *dev; + struct list_head opp_list; +}; + +/* + * The root of the list of all devices. All device_opp structures branch off + * from here, with each device_opp containing the list of opp it supports in + * various states of availability. + */ +static LIST_HEAD(dev_opp_list); +/* Lock to allow exclusive modification to the device and opp lists */ +static DEFINE_MUTEX(dev_opp_list_lock); + +/** + * find_device_opp() - find device_opp struct using device pointer + * @dev: device pointer used to lookup device OPPs + * + * Search list of device OPPs for one containing matching device. Does a RCU + * reader operation to grab the pointer needed. + * + * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or + * -EINVAL based on type of error. + * + * Locking: This function must be called under rcu_read_lock(). device_opp + * is a RCU protected pointer. This means that device_opp is valid as long + * as we are under RCU lock. + */ +static struct device_opp *find_device_opp(struct device *dev) +{ + struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); + + if (unlikely(IS_ERR_OR_NULL(dev))) { + pr_err("%s: Invalid parameters\n", __func__); + return ERR_PTR(-EINVAL); + } + + list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { + if (tmp_dev_opp->dev == dev) { + dev_opp = tmp_dev_opp; + break; + } + } + + return dev_opp; +} + +/** + * opp_get_voltage() - Gets the voltage corresponding to an available opp + * @opp: opp for which voltage has to be returned for + * + * Return voltage in micro volt corresponding to the opp, else + * return 0 + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +unsigned long opp_get_voltage(struct opp *opp) +{ + struct opp *tmp_opp; + unsigned long v = 0; + + tmp_opp = rcu_dereference(opp); + if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) + pr_err("%s: Invalid parameters\n", __func__); + else + v = tmp_opp->u_volt; + + return v; +} + +/** + * opp_get_freq() - Gets the frequency corresponding to an available opp + * @opp: opp for which frequency has to be returned for + * + * Return frequency in hertz corresponding to the opp, else + * return 0 + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +unsigned long opp_get_freq(struct opp *opp) +{ + struct opp *tmp_opp; + unsigned long f = 0; + + tmp_opp = rcu_dereference(opp); + if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) + pr_err("%s: Invalid parameters\n", __func__); + else + f = tmp_opp->rate; + + return f; +} + +/** + * opp_get_opp_count() - Get number of opps available in the opp list + * @dev: device for which we do this operation + * + * This function returns the number of available opps if there are any, + * else returns 0 if none or the corresponding error value. + * + * Locking: This function must be called under rcu_read_lock(). This function + * internally references two RCU protected structures: device_opp and opp which + * are safe as long as we are under a common RCU locked section. + */ +int opp_get_opp_count(struct device *dev) +{ + struct device_opp *dev_opp; + struct opp *temp_opp; + int count = 0; + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int r = PTR_ERR(dev_opp); + dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); + return r; + } + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available) + count++; + } + + return count; +} + +/** + * opp_find_freq_exact() - search for an exact frequency + * @dev: device for which we do this operation + * @freq: frequency to search for + * @is_available: true/false - match for available opp + * + * Searches for exact match in the opp list and returns pointer to the matching + * opp if found, else returns ERR_PTR in case of error and should be handled + * using IS_ERR. + * + * Note: available is a modifier for the search. if available=true, then the + * match is for exact matching frequency and is available in the stored OPP + * table. if false, the match is for exact frequency which is not available. + * + * This provides a mechanism to enable an opp which is not available currently + * or the opposite as well. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, + bool available) +{ + struct device_opp *dev_opp; + struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int r = PTR_ERR(dev_opp); + dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); + return ERR_PTR(r); + } + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available == available && + temp_opp->rate == freq) { + opp = temp_opp; + break; + } + } + + return opp; +} + +/** + * opp_find_freq_ceil() - Search for an rounded ceil freq + * @dev: device for which we do this operation + * @freq: Start frequency + * + * Search for the matching ceil *available* OPP from a starting freq + * for a device. + * + * Returns matching *opp and refreshes *freq accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) +{ + struct device_opp *dev_opp; + struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); + + if (!dev || !freq) { + dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); + return ERR_PTR(-EINVAL); + } + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) + return opp; + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available && temp_opp->rate >= *freq) { + opp = temp_opp; + *freq = opp->rate; + break; + } + } + + return opp; +} + +/** + * opp_find_freq_floor() - Search for a rounded floor freq + * @dev: device for which we do this operation + * @freq: Start frequency + * + * Search for the matching floor *available* OPP from a starting freq + * for a device. + * + * Returns matching *opp and refreshes *freq accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) +{ + struct device_opp *dev_opp; + struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); + + if (!dev || !freq) { + dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); + return ERR_PTR(-EINVAL); + } + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) + return opp; + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available) { + /* go to the next node, before choosing prev */ + if (temp_opp->rate > *freq) + break; + else + opp = temp_opp; + } + } + if (!IS_ERR(opp)) + *freq = opp->rate; + + return opp; +} + +/** + * opp_add() - Add an OPP table from a table definitions + * @dev: device for which we do this operation + * @freq: Frequency in Hz for this OPP + * @u_volt: Voltage in uVolts for this OPP + * + * This function adds an opp definition to the opp list and returns status. + * The opp is made available by default and it can be controlled using + * opp_enable/disable functions. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +{ + struct device_opp *dev_opp = NULL; + struct opp *opp, *new_opp; + struct list_head *head; + + /* allocate new OPP node */ + new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); + if (!new_opp) { + dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); + return -ENOMEM; + } + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + /* Check for existing list for 'dev' */ + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) { + /* + * Allocate a new device OPP table. In the infrequent case + * where a new device is needed to be added, we pay this + * penalty. + */ + dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL); + if (!dev_opp) { + mutex_unlock(&dev_opp_list_lock); + kfree(new_opp); + dev_warn(dev, + "%s: Unable to create device OPP structure\n", + __func__); + return -ENOMEM; + } + + dev_opp->dev = dev; + INIT_LIST_HEAD(&dev_opp->opp_list); + + /* Secure the device list modification */ + list_add_rcu(&dev_opp->node, &dev_opp_list); + } + + /* populate the opp table */ + new_opp->dev_opp = dev_opp; + new_opp->rate = freq; + new_opp->u_volt = u_volt; + new_opp->available = true; + + /* Insert new OPP in order of increasing frequency */ + head = &dev_opp->opp_list; + list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { + if (new_opp->rate < opp->rate) + break; + else + head = &opp->node; + } + + list_add_rcu(&new_opp->node, head); + mutex_unlock(&dev_opp_list_lock); + + return 0; +} + +/** + * opp_set_availability() - helper to set the availability of an opp + * @dev: device for which we do this operation + * @freq: OPP frequency to modify availability + * @availability_req: availability status requested for this opp + * + * Set the availability of an OPP with an RCU operation, opp_{enable,disable} + * share a common logic which is isolated here. + * + * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the + * copy operation, returns 0 if no modifcation was done OR modification was + * successful. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks to + * keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + */ +static int opp_set_availability(struct device *dev, unsigned long freq, + bool availability_req) +{ + struct device_opp *tmp_dev_opp, *dev_opp = NULL; + struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); + int r = 0; + + /* keep the node allocated */ + new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); + if (!new_opp) { + dev_warn(dev, "%s: Unable to create OPP\n", __func__); + return -ENOMEM; + } + + mutex_lock(&dev_opp_list_lock); + + /* Find the device_opp */ + list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) { + if (dev == tmp_dev_opp->dev) { + dev_opp = tmp_dev_opp; + break; + } + } + if (IS_ERR(dev_opp)) { + r = PTR_ERR(dev_opp); + dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); + goto unlock; + } + + /* Do we have the frequency? */ + list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { + if (tmp_opp->rate == freq) { + opp = tmp_opp; + break; + } + } + if (IS_ERR(opp)) { + r = PTR_ERR(opp); + goto unlock; + } + + /* Is update really needed? */ + if (opp->available == availability_req) + goto unlock; + /* copy the old data over */ + *new_opp = *opp; + + /* plug in new node */ + new_opp->available = availability_req; + + list_replace_rcu(&opp->node, &new_opp->node); + mutex_unlock(&dev_opp_list_lock); + synchronize_rcu(); + + /* clean up old opp */ + new_opp = opp; + goto out; + +unlock: + mutex_unlock(&dev_opp_list_lock); +out: + kfree(new_opp); + return r; +} + +/** + * opp_enable() - Enable a specific OPP + * @dev: device for which we do this operation + * @freq: OPP frequency to enable + * + * Enables a provided opp. If the operation is valid, this returns 0, else the + * corresponding error value. It is meant to be used for users an OPP available + * after being temporarily made unavailable with opp_disable. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function indirectly uses RCU and mutex locks to keep the + * integrity of the internal data structures. Callers should ensure that + * this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + */ +int opp_enable(struct device *dev, unsigned long freq) +{ + return opp_set_availability(dev, freq, true); +} + +/** + * opp_disable() - Disable a specific OPP + * @dev: device for which we do this operation + * @freq: OPP frequency to disable + * + * Disables a provided opp. If the operation is valid, this returns + * 0, else the corresponding error value. It is meant to be a temporary + * control by users to make this OPP not available until the circumstances are + * right to make it available again (with a call to opp_enable). + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function indirectly uses RCU and mutex locks to keep the + * integrity of the internal data structures. Callers should ensure that + * this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + */ +int opp_disable(struct device *dev, unsigned long freq) +{ + return opp_set_availability(dev, freq, false); +} + +#ifdef CONFIG_CPU_FREQ +/** + * opp_init_cpufreq_table() - create a cpufreq table for a device + * @dev: device for which we do this operation + * @table: Cpufreq table returned back to caller + * + * Generate a cpufreq table for a provided device- this assumes that the + * opp list is already initialized and ready for usage. + * + * This function allocates required memory for the cpufreq table. It is + * expected that the caller does the required maintenance such as freeing + * the table as required. + * + * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM + * if no memory available for the operation (table is not populated), returns 0 + * if successful and table is populated. + * + * WARNING: It is important for the callers to ensure refreshing their copy of + * the table if any of the mentioned functions have been invoked in the interim. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * To simplify the logic, we pretend we are updater and hold relevant mutex here + * Callers should ensure that this function is *NOT* called under RCU protection + * or in contexts where mutex locking cannot be used. + */ +int opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + struct device_opp *dev_opp; + struct opp *opp; + struct cpufreq_frequency_table *freq_table; + int i = 0; + + /* Pretend as if I am an updater */ + mutex_lock(&dev_opp_list_lock); + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int r = PTR_ERR(dev_opp); + mutex_unlock(&dev_opp_list_lock); + dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); + return r; + } + + freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * + (opp_get_opp_count(dev) + 1), GFP_KERNEL); + if (!freq_table) { + mutex_unlock(&dev_opp_list_lock); + dev_warn(dev, "%s: Unable to allocate frequency table\n", + __func__); + return -ENOMEM; + } + + list_for_each_entry(opp, &dev_opp->opp_list, node) { + if (opp->available) { + freq_table[i].index = i; + freq_table[i].frequency = opp->rate / 1000; + i++; + } + } + mutex_unlock(&dev_opp_list_lock); + + freq_table[i].index = i; + freq_table[i].frequency = CPUFREQ_TABLE_END; + + *table = &freq_table[0]; + + return 0; +} +#endif /* CONFIG_CPU_FREQ */ diff --git a/include/linux/opp.h b/include/linux/opp.h new file mode 100644 index 00000000000..5449945d589 --- /dev/null +++ b/include/linux/opp.h @@ -0,0 +1,105 @@ +/* + * Generic OPP Interface + * + * Copyright (C) 2009-2010 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_OPP_H__ +#define __LINUX_OPP_H__ + +#include +#include + +struct opp; + +#if defined(CONFIG_PM_OPP) + +unsigned long opp_get_voltage(struct opp *opp); + +unsigned long opp_get_freq(struct opp *opp); + +int opp_get_opp_count(struct device *dev); + +struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, + bool available); + +struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq); + +struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq); + +int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt); + +int opp_enable(struct device *dev, unsigned long freq); + +int opp_disable(struct device *dev, unsigned long freq); + +#else +static inline unsigned long opp_get_voltage(struct opp *opp) +{ + return 0; +} + +static inline unsigned long opp_get_freq(struct opp *opp) +{ + return 0; +} + +static inline int opp_get_opp_count(struct device *dev) +{ + return 0; +} + +static inline struct opp *opp_find_freq_exact(struct device *dev, + unsigned long freq, bool available) +{ + return ERR_PTR(-EINVAL); +} + +static inline struct opp *opp_find_freq_floor(struct device *dev, + unsigned long *freq) +{ + return ERR_PTR(-EINVAL); +} + +static inline struct opp *opp_find_freq_ceil(struct device *dev, + unsigned long *freq) +{ + return ERR_PTR(-EINVAL); +} + +static inline int opp_add(struct device *dev, unsigned long freq, + unsigned long u_volt) +{ + return -EINVAL; +} + +static inline int opp_enable(struct device *dev, unsigned long freq) +{ + return 0; +} + +static inline int opp_disable(struct device *dev, unsigned long freq) +{ + return 0; +} +#endif /* CONFIG_PM */ + +#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) +int opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table); +#else +static inline int opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + return -EINVAL; +} +#endif /* CONFIG_CPU_FREQ */ + +#endif /* __LINUX_OPP_H__ */ diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index e45894c696e..29bff6117ab 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -245,3 +245,17 @@ config PM_OPS bool depends on PM_SLEEP || PM_RUNTIME default y + +config PM_OPP + bool "Operating Performance Point (OPP) Layer library" + depends on PM + ---help--- + SOCs have a standard set of tuples consisting of frequency and + voltage pairs that the device will support per voltage domain. This + is called Operating Performance Point or OPP. The actual definitions + of OPP varies over silicon within the same family of devices. + + OPP layer organizes the data internally using device pointers + representing individual voltage domains and provides SOC + implementations a ready to use framework to manage OPPs. + For more information, read -- cgit v1.2.3-70-g09d2 From 907f27840985fe6a0c62e43cd4702c6e04b4bcc7 Mon Sep 17 00:00:00 2001 From: matt mooney Date: Mon, 27 Sep 2010 19:04:53 -0700 Subject: tracing/trivial: Remove cast from void* Unnecessary cast from void* in assignment. Signed-off-by: matt mooney Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 4 ++-- kernel/trace/trace.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 65fb077ea79..ebd80d50c47 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1638,8 +1638,8 @@ ftrace_failures_open(struct inode *inode, struct file *file) ret = ftrace_avail_open(inode, file); if (!ret) { - m = (struct seq_file *)file->private_data; - iter = (struct ftrace_iterator *)m->private; + m = file->private_data; + iter = m->private; iter->flags = FTRACE_ITER_FAILURES; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9ec59f54115..001bcd2ccf4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2196,7 +2196,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp) static int tracing_release(struct inode *inode, struct file *file) { - struct seq_file *m = (struct seq_file *)file->private_data; + struct seq_file *m = file->private_data; struct trace_iterator *iter; int cpu; -- cgit v1.2.3-70-g09d2 From a9d61173dc1cb63e660ae89e874e51ba4fd2f991 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 24 Sep 2010 17:41:02 +0200 Subject: tracing: Add proper check for irq_depth routines The check_irq_entry and check_irq_return could be called from graph event context. In such case there's no graph private data allocated. Adding checks to handle this case. Signed-off-by: Jiri Olsa LKML-Reference: <20100924154102.GB1818@jolsa.brq.redhat.com> [ Fixed some grammar in the comments ] Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions_graph.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index ef49e9370b2..4c58ccc6427 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -888,12 +888,20 @@ check_irq_entry(struct trace_iterator *iter, u32 flags, unsigned long addr, int depth) { int cpu = iter->cpu; + int *depth_irq; struct fgraph_data *data = iter->private; - int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); - if (flags & TRACE_GRAPH_PRINT_IRQS) + /* + * If we are either displaying irqs, or we got called as + * a graph event and private data does not exist, + * then we bypass the irq check. + */ + if ((flags & TRACE_GRAPH_PRINT_IRQS) || + (!data)) return 0; + depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); + /* * We are inside the irq code */ @@ -926,12 +934,20 @@ static int check_irq_return(struct trace_iterator *iter, u32 flags, int depth) { int cpu = iter->cpu; + int *depth_irq; struct fgraph_data *data = iter->private; - int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); - if (flags & TRACE_GRAPH_PRINT_IRQS) + /* + * If we are either displaying irqs, or we got called as + * a graph event and private data does not exist, + * then we bypass the irq check. + */ + if ((flags & TRACE_GRAPH_PRINT_IRQS) || + (!data)) return 0; + depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); + /* * We are not inside the irq code. */ -- cgit v1.2.3-70-g09d2 From 0a772620a2e21fb55a02f70fe38d4b5c3a5fbbbf Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 23 Sep 2010 14:00:52 +0200 Subject: tracing: Make graph related irqs/preemptsoff functions global Move trace_graph_function() and print_graph_headers_flags() functions to the trace_function_graph.c to be globaly available. Signed-off-by: Jiri Olsa LKML-Reference: <1285243253-7372-3-git-send-email-jolsa@redhat.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace.h | 4 +++ kernel/trace/trace_functions_graph.c | 63 ++++++++++++++++++++++++++++++++++-- kernel/trace/trace_irqsoff.c | 56 ++++---------------------------- 3 files changed, 71 insertions(+), 52 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index d39b3c5454a..9021f8c0c0c 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -343,6 +343,10 @@ void trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc); +void trace_graph_function(struct trace_array *tr, + unsigned long ip, + unsigned long parent_ip, + unsigned long flags, int pc); void trace_default_header(struct seq_file *m); void print_trace_header(struct seq_file *m, struct trace_iterator *iter); int trace_empty(struct trace_iterator *iter); diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 4c58ccc6427..6f8fe28acba 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -262,6 +262,35 @@ int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) return trace_graph_entry(trace); } +static void +__trace_graph_function(struct trace_array *tr, + unsigned long ip, unsigned long flags, int pc) +{ + u64 time = trace_clock_local(); + struct ftrace_graph_ent ent = { + .func = ip, + .depth = 0, + }; + struct ftrace_graph_ret ret = { + .func = ip, + .depth = 0, + .calltime = time, + .rettime = time, + }; + + __trace_graph_entry(tr, &ent, flags, pc); + __trace_graph_return(tr, &ret, flags, pc); +} + +void +trace_graph_function(struct trace_array *tr, + unsigned long ip, unsigned long parent_ip, + unsigned long flags, int pc) +{ + __trace_graph_function(tr, parent_ip, flags, pc); + __trace_graph_function(tr, ip, flags, pc); +} + void __trace_graph_return(struct trace_array *tr, struct ftrace_graph_ret *trace, unsigned long flags, @@ -1179,7 +1208,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, enum print_line_t -print_graph_function_flags(struct trace_iterator *iter, u32 flags) +__print_graph_function_flags(struct trace_iterator *iter, u32 flags) { struct ftrace_graph_ent_entry *field; struct fgraph_data *data = iter->private; @@ -1242,7 +1271,18 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags) static enum print_line_t print_graph_function(struct trace_iterator *iter) { - return print_graph_function_flags(iter, tracer_flags.val); + return __print_graph_function_flags(iter, tracer_flags.val); +} + +enum print_line_t print_graph_function_flags(struct trace_iterator *iter, + u32 flags) +{ + if (trace_flags & TRACE_ITER_LATENCY_FMT) + flags |= TRACE_GRAPH_PRINT_DURATION; + else + flags |= TRACE_GRAPH_PRINT_ABS_TIME; + + return __print_graph_function_flags(iter, flags); } static enum print_line_t @@ -1274,7 +1314,7 @@ static void print_lat_header(struct seq_file *s, u32 flags) seq_printf(s, "#%.*s|||| / \n", size, spaces); } -void print_graph_headers_flags(struct seq_file *s, u32 flags) +static void __print_graph_headers_flags(struct seq_file *s, u32 flags) { int lat = trace_flags & TRACE_ITER_LATENCY_FMT; @@ -1315,6 +1355,23 @@ void print_graph_headers(struct seq_file *s) print_graph_headers_flags(s, tracer_flags.val); } +void print_graph_headers_flags(struct seq_file *s, u32 flags) +{ + struct trace_iterator *iter = s->private; + + if (trace_flags & TRACE_ITER_LATENCY_FMT) { + /* print nothing if the buffers are empty */ + if (trace_empty(iter)) + return; + + print_trace_header(s, iter); + flags |= TRACE_GRAPH_PRINT_DURATION; + } else + flags |= TRACE_GRAPH_PRINT_ABS_TIME; + + __print_graph_headers_flags(s, flags); +} + void graph_trace_open(struct trace_iterator *iter) { /* pid and depth on the last trace processed */ diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 73a6b0601f2..4047e98afcb 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -229,75 +229,33 @@ static void irqsoff_trace_close(struct trace_iterator *iter) static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) { - u32 flags = GRAPH_TRACER_FLAGS; - - if (trace_flags & TRACE_ITER_LATENCY_FMT) - flags |= TRACE_GRAPH_PRINT_DURATION; - else - flags |= TRACE_GRAPH_PRINT_ABS_TIME; - /* * In graph mode call the graph tracer output function, * otherwise go with the TRACE_FN event handler */ if (is_graph()) - return print_graph_function_flags(iter, flags); + return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); return TRACE_TYPE_UNHANDLED; } static void irqsoff_print_header(struct seq_file *s) { - if (is_graph()) { - struct trace_iterator *iter = s->private; - u32 flags = GRAPH_TRACER_FLAGS; - - if (trace_flags & TRACE_ITER_LATENCY_FMT) { - /* print nothing if the buffers are empty */ - if (trace_empty(iter)) - return; - - print_trace_header(s, iter); - flags |= TRACE_GRAPH_PRINT_DURATION; - } else - flags |= TRACE_GRAPH_PRINT_ABS_TIME; - - print_graph_headers_flags(s, flags); - } else + if (is_graph()) + print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); + else trace_default_header(s); } -static void -trace_graph_function(struct trace_array *tr, - unsigned long ip, unsigned long flags, int pc) -{ - u64 time = trace_clock_local(); - struct ftrace_graph_ent ent = { - .func = ip, - .depth = 0, - }; - struct ftrace_graph_ret ret = { - .func = ip, - .depth = 0, - .calltime = time, - .rettime = time, - }; - - __trace_graph_entry(tr, &ent, flags, pc); - __trace_graph_return(tr, &ret, flags, pc); -} - static void __trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { - if (!is_graph()) + if (is_graph()) + trace_graph_function(tr, ip, parent_ip, flags, pc); + else trace_function(tr, ip, parent_ip, flags, pc); - else { - trace_graph_function(tr, parent_ip, flags, pc); - trace_graph_function(tr, ip, flags, pc); - } } #else -- cgit v1.2.3-70-g09d2 From 7495a5beaa22f190f4888aa8cbe4827c16575d0a Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 23 Sep 2010 14:00:53 +0200 Subject: tracing: Graph support for wakeup tracer Add function graph support for wakeup latency tracer. The graph output is enabled by setting the 'display-graph' trace option. Signed-off-by: Jiri Olsa LKML-Reference: <1285243253-7372-4-git-send-email-jolsa@redhat.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace_sched_wakeup.c | 231 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 221 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 4086eae6e81..033510dbb32 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -31,13 +31,33 @@ static int wakeup_rt; static arch_spinlock_t wakeup_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; +static void wakeup_reset(struct trace_array *tr); static void __wakeup_reset(struct trace_array *tr); +static int wakeup_graph_entry(struct ftrace_graph_ent *trace); +static void wakeup_graph_return(struct ftrace_graph_ret *trace); static int save_lat_flag; +#define TRACE_DISPLAY_GRAPH 1 + +static struct tracer_opt trace_opts[] = { +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* display latency trace as call graph */ + { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, +#endif + { } /* Empty entry */ +}; + +static struct tracer_flags tracer_flags = { + .val = 0, + .opts = trace_opts, +}; + +#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) + #ifdef CONFIG_FUNCTION_TRACER /* - * irqsoff uses its own tracer function to keep the overhead down: + * wakeup uses its own tracer function to keep the overhead down: */ static void wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) @@ -80,8 +100,191 @@ static struct ftrace_ops trace_ops __read_mostly = { .func = wakeup_tracer_call, }; + +static int start_func_tracer(int graph) +{ + int ret; + + if (!graph) + ret = register_ftrace_function(&trace_ops); + else + ret = register_ftrace_graph(&wakeup_graph_return, + &wakeup_graph_entry); + + if (!ret && tracing_is_enabled()) + tracer_enabled = 1; + else + tracer_enabled = 0; + + return ret; +} + +static void stop_func_tracer(int graph) +{ + tracer_enabled = 0; + + if (!graph) + unregister_ftrace_function(&trace_ops); + else + unregister_ftrace_graph(); +} + #endif /* CONFIG_FUNCTION_TRACER */ +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static int wakeup_set_flag(u32 old_flags, u32 bit, int set) +{ + + if (!(bit & TRACE_DISPLAY_GRAPH)) + return -EINVAL; + + if (!(is_graph() ^ set)) + return 0; + + stop_func_tracer(!set); + + wakeup_reset(wakeup_trace); + tracing_max_latency = 0; + + return start_func_tracer(set); +} + +static int wakeup_graph_entry(struct ftrace_graph_ent *trace) +{ + struct trace_array *tr = wakeup_trace; + struct trace_array_cpu *data; + unsigned long flags; + long disabled; + int cpu, pc, ret = 0; + + if (likely(!wakeup_task)) + return 0; + + pc = preempt_count(); + preempt_disable_notrace(); + + cpu = raw_smp_processor_id(); + if (cpu != wakeup_current_cpu) + goto out_enable; + + data = tr->data[cpu]; + disabled = atomic_inc_return(&data->disabled); + if (unlikely(disabled != 1)) + goto out; + + local_save_flags(flags); + ret = __trace_graph_entry(tr, trace, flags, pc); + +out: + atomic_dec(&data->disabled); + +out_enable: + preempt_enable_notrace(); + return ret; +} + +static void wakeup_graph_return(struct ftrace_graph_ret *trace) +{ + struct trace_array *tr = wakeup_trace; + struct trace_array_cpu *data; + unsigned long flags; + long disabled; + int cpu, pc; + + if (likely(!wakeup_task)) + return; + + pc = preempt_count(); + preempt_disable_notrace(); + + cpu = raw_smp_processor_id(); + if (cpu != wakeup_current_cpu) + goto out_enable; + + data = tr->data[cpu]; + disabled = atomic_inc_return(&data->disabled); + if (unlikely(disabled != 1)) + goto out; + + local_save_flags(flags); + __trace_graph_return(tr, trace, flags, pc); + +out: + atomic_dec(&data->disabled); + +out_enable: + preempt_enable_notrace(); + return; +} + +static void wakeup_trace_open(struct trace_iterator *iter) +{ + if (is_graph()) + graph_trace_open(iter); +} + +static void wakeup_trace_close(struct trace_iterator *iter) +{ + if (iter->private) + graph_trace_close(iter); +} + +#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC) + +static enum print_line_t wakeup_print_line(struct trace_iterator *iter) +{ + /* + * In graph mode call the graph tracer output function, + * otherwise go with the TRACE_FN event handler + */ + if (is_graph()) + return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); + + return TRACE_TYPE_UNHANDLED; +} + +static void wakeup_print_header(struct seq_file *s) +{ + if (is_graph()) + print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); + else + trace_default_header(s); +} + +static void +__trace_function(struct trace_array *tr, + unsigned long ip, unsigned long parent_ip, + unsigned long flags, int pc) +{ + if (is_graph()) + trace_graph_function(tr, ip, parent_ip, flags, pc); + else + trace_function(tr, ip, parent_ip, flags, pc); +} +#else +#define __trace_function trace_function + +static int wakeup_set_flag(u32 old_flags, u32 bit, int set) +{ + return -EINVAL; +} + +static int wakeup_graph_entry(struct ftrace_graph_ent *trace) +{ + return -1; +} + +static enum print_line_t wakeup_print_line(struct trace_iterator *iter) +{ + return TRACE_TYPE_UNHANDLED; +} + +static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } +static void wakeup_print_header(struct seq_file *s) { } +static void wakeup_trace_open(struct trace_iterator *iter) { } +static void wakeup_trace_close(struct trace_iterator *iter) { } +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + /* * Should this new latency be reported/recorded? */ @@ -152,7 +355,7 @@ probe_wakeup_sched_switch(void *ignore, /* The task we are waiting for is waking up */ data = wakeup_trace->data[wakeup_cpu]; - trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); + __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); T0 = data->preempt_timestamp; @@ -252,7 +455,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) * is not called by an assembly function (where as schedule is) * it should be safe to use it here. */ - trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); + __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: arch_spin_unlock(&wakeup_lock); @@ -303,12 +506,8 @@ static void start_wakeup_tracer(struct trace_array *tr) */ smp_wmb(); - register_ftrace_function(&trace_ops); - - if (tracing_is_enabled()) - tracer_enabled = 1; - else - tracer_enabled = 0; + if (start_func_tracer(is_graph())) + printk(KERN_ERR "failed to start wakeup tracer\n"); return; fail_deprobe_wake_new: @@ -320,7 +519,7 @@ fail_deprobe: static void stop_wakeup_tracer(struct trace_array *tr) { tracer_enabled = 0; - unregister_ftrace_function(&trace_ops); + stop_func_tracer(is_graph()); unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); unregister_trace_sched_wakeup_new(probe_wakeup, NULL); unregister_trace_sched_wakeup(probe_wakeup, NULL); @@ -379,9 +578,15 @@ static struct tracer wakeup_tracer __read_mostly = .start = wakeup_tracer_start, .stop = wakeup_tracer_stop, .print_max = 1, + .print_header = wakeup_print_header, + .print_line = wakeup_print_line, + .flags = &tracer_flags, + .set_flag = wakeup_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif + .open = wakeup_trace_open, + .close = wakeup_trace_close, .use_max_tr = 1, }; @@ -394,9 +599,15 @@ static struct tracer wakeup_rt_tracer __read_mostly = .stop = wakeup_tracer_stop, .wait_pipe = poll_wait_pipe, .print_max = 1, + .print_header = wakeup_print_header, + .print_line = wakeup_print_line, + .flags = &tracer_flags, + .set_flag = wakeup_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif + .open = wakeup_trace_open, + .close = wakeup_trace_close, .use_max_tr = 1, }; -- cgit v1.2.3-70-g09d2 From 542181d3769d001c59cd17573dd4381e87d215f2 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 5 Oct 2010 16:38:49 -0400 Subject: tracing: Use one prologue for the wakeup tracer function tracers The wakeup tracer has three types of function tracers. Normal function tracer, function graph entry, and function graph return. Each of these use a complex dance to prevent recursion and whether to trace the data or not (depending on the wake_task variable). This patch moves the duplicate code into a single routine, to prevent future mistakes with modifying duplicate complex code. Cc: Jiri Olsa Signed-off-by: Steven Rostedt --- kernel/trace/trace_sched_wakeup.c | 102 +++++++++++++++++++------------------- 1 file changed, 50 insertions(+), 52 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 033510dbb32..31689d2df7f 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -56,43 +56,73 @@ static struct tracer_flags tracer_flags = { #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) #ifdef CONFIG_FUNCTION_TRACER + /* - * wakeup uses its own tracer function to keep the overhead down: + * Prologue for the wakeup function tracers. + * + * Returns 1 if it is OK to continue, and preemption + * is disabled and data->disabled is incremented. + * 0 if the trace is to be ignored, and preemption + * is not disabled and data->disabled is + * kept the same. + * + * Note, this function is also used outside this ifdef but + * inside the #ifdef of the function graph tracer below. + * This is OK, since the function graph tracer is + * dependent on the function tracer. */ -static void -wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) +static int +func_prolog_preempt_disable(struct trace_array *tr, + struct trace_array_cpu **data, + int *pc) { - struct trace_array *tr = wakeup_trace; - struct trace_array_cpu *data; - unsigned long flags; long disabled; int cpu; - int pc; if (likely(!wakeup_task)) - return; + return 0; - pc = preempt_count(); + *pc = preempt_count(); preempt_disable_notrace(); cpu = raw_smp_processor_id(); if (cpu != wakeup_current_cpu) goto out_enable; - data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); + *data = tr->data[cpu]; + disabled = atomic_inc_return(&(*data)->disabled); if (unlikely(disabled != 1)) goto out; - local_irq_save(flags); + return 1; - trace_function(tr, ip, parent_ip, flags, pc); +out: + atomic_dec(&(*data)->disabled); + +out_enable: + preempt_enable_notrace(); + return 0; +} + +/* + * wakeup uses its own tracer function to keep the overhead down: + */ +static void +wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) +{ + struct trace_array *tr = wakeup_trace; + struct trace_array_cpu *data; + unsigned long flags; + int pc; + + if (!func_prolog_preempt_disable(tr, &data, &pc)) + return; + local_irq_save(flags); + trace_function(tr, ip, parent_ip, flags, pc); local_irq_restore(flags); - out: atomic_dec(&data->disabled); - out_enable: preempt_enable_notrace(); } @@ -154,32 +184,16 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace) struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; - long disabled; - int cpu, pc, ret = 0; + int pc, ret = 0; - if (likely(!wakeup_task)) + if (!func_prolog_preempt_disable(tr, &data, &pc)) return 0; - pc = preempt_count(); - preempt_disable_notrace(); - - cpu = raw_smp_processor_id(); - if (cpu != wakeup_current_cpu) - goto out_enable; - - data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - if (unlikely(disabled != 1)) - goto out; - local_save_flags(flags); ret = __trace_graph_entry(tr, trace, flags, pc); - -out: atomic_dec(&data->disabled); - -out_enable: preempt_enable_notrace(); + return ret; } @@ -188,31 +202,15 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace) struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; - long disabled; - int cpu, pc; + int pc; - if (likely(!wakeup_task)) + if (!func_prolog_preempt_disable(tr, &data, &pc)) return; - pc = preempt_count(); - preempt_disable_notrace(); - - cpu = raw_smp_processor_id(); - if (cpu != wakeup_current_cpu) - goto out_enable; - - data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - if (unlikely(disabled != 1)) - goto out; - local_save_flags(flags); __trace_graph_return(tr, trace, flags, pc); - -out: atomic_dec(&data->disabled); -out_enable: preempt_enable_notrace(); return; } -- cgit v1.2.3-70-g09d2 From 5e6d2b9cfa3a6e7fe62fc0135bc1bd778f5db564 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 5 Oct 2010 19:41:43 -0400 Subject: tracing: Use one prologue for the preempt irqs off tracer function tracers The preempt and irqsoff tracers have three types of function tracers. Normal function tracer, function graph entry, and function graph return. Each of these use a complex dance to prevent recursion and whether to trace the data or not (depending if interrupts are enabled or not). This patch moves the duplicate code into a single routine, to prevent future mistakes with modifying duplicate complex code. Cc: Jiri Olsa Signed-off-by: Steven Rostedt --- kernel/trace/trace_irqsoff.c | 96 ++++++++++++++++++++++---------------------- 1 file changed, 48 insertions(+), 48 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 4047e98afcb..5cf8c602b88 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -87,14 +87,22 @@ static __cacheline_aligned_in_smp unsigned long max_sequence; #ifdef CONFIG_FUNCTION_TRACER /* - * irqsoff uses its own tracer function to keep the overhead down: + * Prologue for the preempt and irqs off function tracers. + * + * Returns 1 if it is OK to continue, and data->disabled is + * incremented. + * 0 if the trace is to be ignored, and data->disabled + * is kept the same. + * + * Note, this function is also used outside this ifdef but + * inside the #ifdef of the function graph tracer below. + * This is OK, since the function graph tracer is + * dependent on the function tracer. */ -static void -irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) +static int func_prolog_dec(struct trace_array *tr, + struct trace_array_cpu **data, + unsigned long *flags) { - struct trace_array *tr = irqsoff_trace; - struct trace_array_cpu *data; - unsigned long flags; long disabled; int cpu; @@ -106,18 +114,38 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) */ cpu = raw_smp_processor_id(); if (likely(!per_cpu(tracing_cpu, cpu))) - return; + return 0; - local_save_flags(flags); + local_save_flags(*flags); /* slight chance to get a false positive on tracing_cpu */ - if (!irqs_disabled_flags(flags)) - return; + if (!irqs_disabled_flags(*flags)) + return 0; - data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); + *data = tr->data[cpu]; + disabled = atomic_inc_return(&(*data)->disabled); if (likely(disabled == 1)) - trace_function(tr, ip, parent_ip, flags, preempt_count()); + return 1; + + atomic_dec(&(*data)->disabled); + + return 0; +} + +/* + * irqsoff uses its own tracer function to keep the overhead down: + */ +static void +irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) +{ + struct trace_array *tr = irqsoff_trace; + struct trace_array_cpu *data; + unsigned long flags; + + if (!func_prolog_dec(tr, &data, &flags)) + return; + + trace_function(tr, ip, parent_ip, flags, preempt_count()); atomic_dec(&data->disabled); } @@ -155,30 +183,16 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; - long disabled; int ret; - int cpu; int pc; - cpu = raw_smp_processor_id(); - if (likely(!per_cpu(tracing_cpu, cpu))) - return 0; - - local_save_flags(flags); - /* slight chance to get a false positive on tracing_cpu */ - if (!irqs_disabled_flags(flags)) + if (!func_prolog_dec(tr, &data, &flags)) return 0; - data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - - if (likely(disabled == 1)) { - pc = preempt_count(); - ret = __trace_graph_entry(tr, trace, flags, pc); - } else - ret = 0; - + pc = preempt_count(); + ret = __trace_graph_entry(tr, trace, flags, pc); atomic_dec(&data->disabled); + return ret; } @@ -187,27 +201,13 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace) struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; - long disabled; - int cpu; int pc; - cpu = raw_smp_processor_id(); - if (likely(!per_cpu(tracing_cpu, cpu))) - return; - - local_save_flags(flags); - /* slight chance to get a false positive on tracing_cpu */ - if (!irqs_disabled_flags(flags)) + if (!func_prolog_dec(tr, &data, &flags)) return; - data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - - if (likely(disabled == 1)) { - pc = preempt_count(); - __trace_graph_return(tr, trace, flags, pc); - } - + pc = preempt_count(); + __trace_graph_return(tr, trace, flags, pc); atomic_dec(&data->disabled); } -- cgit v1.2.3-70-g09d2 From 78c89ba121221d9224a5747803d7fffe51cd6e44 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 5 Oct 2010 23:22:19 -0400 Subject: tracing: Remove parent recording in latency tracer graph options Even though the parent is recorded with the normal function tracing of the latency tracers (irqsoff and wakeup), the function graph recording is bogus. This is due to the function graph messing with the return stack. The latency tracers pass in as the parent CALLER_ADDR0, which works fine for plain function tracing. But this causes bogus output with the graph tracer: 3) -0 | d.s3. 0.000 us | return_to_handler(); 3) -0 | d.s3. 0.000 us | _raw_spin_unlock_irqrestore(); 3) -0 | d.s3. 0.000 us | return_to_handler(); 3) -0 | d.s3. 0.000 us | trace_hardirqs_on(); The "return_to_handle()" call is the trampoline of the function graph tracer, and is meaningless in this context. Cc: Jiri Olsa Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions_graph.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 6f8fe28acba..76b05980225 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -287,7 +287,6 @@ trace_graph_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { - __trace_graph_function(tr, parent_ip, flags, pc); __trace_graph_function(tr, ip, flags, pc); } -- cgit v1.2.3-70-g09d2 From 4924627423d5e286136ad2520f5be536345ae590 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 17 Oct 2010 21:46:10 +0200 Subject: sched: Unindent labels Labels should be on column 0. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/sched.c | 12 ++++++------ kernel/sched_rt.c | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 2111491f642..7f522832250 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4891,7 +4891,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) cpuset_cpus_allowed(p, cpus_allowed); cpumask_and(new_mask, in_mask, cpus_allowed); - again: +again: retval = set_cpus_allowed_ptr(p, new_mask); if (!retval) { @@ -8141,9 +8141,9 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) return 1; - err_free_rq: +err_free_rq: kfree(cfs_rq); - err: +err: return 0; } @@ -8231,9 +8231,9 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) return 1; - err_free_rq: +err_free_rq: kfree(rt_rq); - err: +err: return 0; } @@ -8591,7 +8591,7 @@ static int tg_set_bandwidth(struct task_group *tg, raw_spin_unlock(&rt_rq->rt_runtime_lock); } raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); - unlock: +unlock: read_unlock(&tasklist_lock); mutex_unlock(&rt_constraints_mutex); diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index baef30f0840..ab77aa00b7b 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1140,7 +1140,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) for_each_leaf_rt_rq(rt_rq, rq) { array = &rt_rq->active; idx = sched_find_first_bit(array->bitmap); - next_idx: +next_idx: if (idx >= MAX_RT_PRIO) continue; if (next && next->prio < idx) @@ -1316,7 +1316,7 @@ static int push_rt_task(struct rq *rq) if (!next_task) return 0; - retry: +retry: if (unlikely(next_task == rq->curr)) { WARN_ON(1); return 0; @@ -1464,7 +1464,7 @@ static int pull_rt_task(struct rq *this_rq) * but possible) */ } - skip: +skip: double_unlock_balance(this_rq, src_rq); } -- cgit v1.2.3-70-g09d2 From 34f971f6f7988be4d014eec3e3526bee6d007ffa Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 22 Sep 2010 13:53:15 +0200 Subject: sched: Create special class for stop/migrate work In order to separate the stop/migrate work thread from the SCHED_FIFO implementation, create a special class for it that is of higher priority than SCHED_FIFO itself. This currently solves a problem where cpu-hotplug consumes so much cpu-time that the SCHED_FIFO class gets throttled, but has the bandwidth replenishment timer pending on the now dead cpu. It is also required for when we add the planned deadline scheduling class above SCHED_FIFO, as the stop/migrate thread still needs to transcent those tasks. Tested-by: Heiko Carstens Signed-off-by: Peter Zijlstra LKML-Reference: <1285165776.2275.1022.camel@laptop> Signed-off-by: Ingo Molnar --- kernel/sched.c | 54 ++++++++++++++++++++---- kernel/sched_stoptask.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++++ kernel/stop_machine.c | 8 ++-- 3 files changed, 158 insertions(+), 12 deletions(-) create mode 100644 kernel/sched_stoptask.c (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 7f522832250..5f64fed56a4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -486,7 +486,7 @@ struct rq { */ unsigned long nr_uninterruptible; - struct task_struct *curr, *idle; + struct task_struct *curr, *idle, *stop; unsigned long next_balance; struct mm_struct *prev_mm; @@ -1837,7 +1837,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) static const struct sched_class rt_sched_class; -#define sched_class_highest (&rt_sched_class) +#define sched_class_highest (&stop_sched_class) #define for_each_class(class) \ for (class = sched_class_highest; class; class = class->next) @@ -1917,10 +1917,41 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) #include "sched_idletask.c" #include "sched_fair.c" #include "sched_rt.c" +#include "sched_stoptask.c" #ifdef CONFIG_SCHED_DEBUG # include "sched_debug.c" #endif +void sched_set_stop_task(int cpu, struct task_struct *stop) +{ + struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; + struct task_struct *old_stop = cpu_rq(cpu)->stop; + + if (stop) { + /* + * Make it appear like a SCHED_FIFO task, its something + * userspace knows about and won't get confused about. + * + * Also, it will make PI more or less work without too + * much confusion -- but then, stop work should not + * rely on PI working anyway. + */ + sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); + + stop->sched_class = &stop_sched_class; + } + + cpu_rq(cpu)->stop = stop; + + if (old_stop) { + /* + * Reset it back to a normal scheduling class so that + * it can die in pieces. + */ + old_stop->sched_class = &rt_sched_class; + } +} + /* * __normal_prio - return the priority that is based on the static prio */ @@ -3720,17 +3751,13 @@ pick_next_task(struct rq *rq) return p; } - class = sched_class_highest; - for ( ; ; ) { + for_each_class(class) { p = class->pick_next_task(rq); if (p) return p; - /* - * Will never be NULL as the idle class always - * returns a non-NULL p: - */ - class = class->next; } + + BUG(); /* the idle class will always have a runnable task */ } /* @@ -4659,6 +4686,15 @@ recheck: */ rq = __task_rq_lock(p); + /* + * Changing the policy of the stop threads its a very bad idea + */ + if (p == rq->stop) { + __task_rq_unlock(rq); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + return -EINVAL; + } + #ifdef CONFIG_RT_GROUP_SCHED if (user) { /* diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c new file mode 100644 index 00000000000..45bddc0c104 --- /dev/null +++ b/kernel/sched_stoptask.c @@ -0,0 +1,108 @@ +/* + * stop-task scheduling class. + * + * The stop task is the highest priority task in the system, it preempts + * everything and will be preempted by nothing. + * + * See kernel/stop_machine.c + */ + +#ifdef CONFIG_SMP +static int +select_task_rq_stop(struct rq *rq, struct task_struct *p, + int sd_flag, int flags) +{ + return task_cpu(p); /* stop tasks as never migrate */ +} +#endif /* CONFIG_SMP */ + +static void +check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) +{ + resched_task(rq->curr); /* we preempt everything */ +} + +static struct task_struct *pick_next_task_stop(struct rq *rq) +{ + struct task_struct *stop = rq->stop; + + if (stop && stop->state == TASK_RUNNING) + return stop; + + return NULL; +} + +static void +enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) +{ +} + +static void +dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) +{ +} + +static void yield_task_stop(struct rq *rq) +{ + BUG(); /* the stop task should never yield, its pointless. */ +} + +static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) +{ +} + +static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) +{ +} + +static void set_curr_task_stop(struct rq *rq) +{ +} + +static void switched_to_stop(struct rq *rq, struct task_struct *p, + int running) +{ + BUG(); /* its impossible to change to this class */ +} + +static void prio_changed_stop(struct rq *rq, struct task_struct *p, + int oldprio, int running) +{ + BUG(); /* how!?, what priority? */ +} + +static unsigned int +get_rr_interval_stop(struct rq *rq, struct task_struct *task) +{ + return 0; +} + +/* + * Simple, special scheduling class for the per-CPU stop tasks: + */ +static const struct sched_class stop_sched_class = { + .next = &rt_sched_class, + + .enqueue_task = enqueue_task_stop, + .dequeue_task = dequeue_task_stop, + .yield_task = yield_task_stop, + + .check_preempt_curr = check_preempt_curr_stop, + + .pick_next_task = pick_next_task_stop, + .put_prev_task = put_prev_task_stop, + +#ifdef CONFIG_SMP + .select_task_rq = select_task_rq_stop, +#endif + + .set_curr_task = set_curr_task_stop, + .task_tick = task_tick_stop, + + .get_rr_interval = get_rr_interval_stop, + + .prio_changed = prio_changed_stop, + .switched_to = switched_to_stop, + + /* no .task_new for stop tasks */ +}; diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 4372ccb2512..090c28812ce 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -287,11 +287,12 @@ repeat: goto repeat; } +extern void sched_set_stop_task(int cpu, struct task_struct *stop); + /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; unsigned int cpu = (unsigned long)hcpu; struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); struct task_struct *p; @@ -304,13 +305,13 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, cpu); if (IS_ERR(p)) return NOTIFY_BAD; - sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); get_task_struct(p); + kthread_bind(p, cpu); + sched_set_stop_task(cpu, p); stopper->thread = p; break; case CPU_ONLINE: - kthread_bind(stopper->thread, cpu); /* strictly unnecessary, as first user will wake it */ wake_up_process(stopper->thread); /* mark enabled */ @@ -325,6 +326,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, { struct cpu_stop_work *work; + sched_set_stop_task(cpu, NULL); /* kill the stopper */ kthread_stop(stopper->thread); /* drain remaining works */ -- cgit v1.2.3-70-g09d2 From 17bdcf949d03306b308c5fb694849cd35f119807 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 11 Oct 2010 16:36:51 +0200 Subject: sched: Drop all load weight manipulation for RT tasks Load weights are for the CFS, they do not belong in the RT task. This makes all RT scheduling classes leave the CFS weights alone. This fixes a real bug as well: I noticed the following phonomena: a process elevated to SCHED_RR forks with SCHED_RESET_ON_FORK set, and the child is indeed SCHED_OTHER, and the niceval is indeed reset to 0. However the weight inserted by set_load_weight() remains at 0, giving the task insignificat priority. With this fix, the weight is reset to what the task had before being elevated to SCHED_RR/SCHED_FIFO. Cc: Lennart Poettering Cc: stable@kernel.org Signed-off-by: Linus Walleij Signed-off-by: Peter Zijlstra LKML-Reference: <1286807811-10568-1-git-send-email-linus.walleij@stericsson.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 5f64fed56a4..728081a7ef1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1855,12 +1855,6 @@ static void dec_nr_running(struct rq *rq) static void set_load_weight(struct task_struct *p) { - if (task_has_rt_policy(p)) { - p->se.load.weight = 0; - p->se.load.inv_weight = WMULT_CONST; - return; - } - /* * SCHED_IDLE tasks get minimal weight: */ -- cgit v1.2.3-70-g09d2 From 620162505e5d46bc4494b1761743e4b0b3bf8e16 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Tue, 5 Oct 2010 18:01:51 +0900 Subject: lockdep: Add improved subclass caching Current lockdep_map only caches one class with subclass == 0, and looks up hash table of classes when subclass != 0. It seems that this has no problem because the case of subclass != 0 is rare. But locks of struct rq are acquired with subclass == 1 when task migration is executed. Task migration is high frequent event, so I modified lockdep to cache subclasses. I measured the score of perf bench sched messaging. This patch has slightly but certain (order of milli seconds or 10 milli seconds) effect when lots of tasks are running. I'll show the result in the tail of this description. NR_LOCKDEP_CACHING_CLASSES specifies how many classes can be cached in the instances of lockdep_map. I discussed with Peter Zijlstra in LinuxCon Japan about this approach and he taught me that caching every subclasses(8) is cleary waste of memory. So number of cached classes should be configurable. === Score comparison of benchmarks === # "min" means best score, and "max" means worst score for i in `seq 1 10`; do ./perf bench -f simple sched messaging; done before: min: 0.565000, max: 0.583000, avg: 0.572500 after: min: 0.559000, max: 0.568000, avg: 0.563300 # with more processes for i in `seq 1 10`; do ./perf bench -f simple sched messaging -g 40; done before: min: 2.274000, max: 2.298000, avg: 2.286300 after: min: 2.242000, max: 2.270000, avg: 2.259700 Signed-off-by: Hitoshi Mitake Cc: Frederic Weisbecker Signed-off-by: Peter Zijlstra LKML-Reference: <1286269311-28336-2-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 13 ++++++++++++- kernel/lockdep.c | 25 ++++++++++++++++++------- 2 files changed, 30 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 06aed8305bf..2186a64ee4b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -31,6 +31,17 @@ extern int lock_stat; #define MAX_LOCKDEP_SUBCLASSES 8UL +/* + * NR_LOCKDEP_CACHING_CLASSES ... Number of classes + * cached in the instance of lockdep_map + * + * Currently main class (subclass == 0) and signle depth subclass + * are cached in lockdep_map. This optimization is mainly targeting + * on rq->lock. double_rq_lock() acquires this highly competitive with + * single depth. + */ +#define NR_LOCKDEP_CACHING_CLASSES 2 + /* * Lock-classes are keyed via unique addresses, by embedding the * lockclass-key into the kernel (or module) .data section. (For @@ -138,7 +149,7 @@ void clear_lock_stats(struct lock_class *class); */ struct lockdep_map { struct lock_class_key *key; - struct lock_class *class_cache; + struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; const char *name; #ifdef CONFIG_LOCK_STAT int cpu; diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 84baa71cfda..bc4d32871f9 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -774,7 +774,9 @@ out_unlock_set: raw_local_irq_restore(flags); if (!subclass || force) - lock->class_cache = class; + lock->class_cache[0] = class; + else if (subclass < NR_LOCKDEP_CACHING_CLASSES) + lock->class_cache[subclass] = class; if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) return NULL; @@ -2679,7 +2681,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, void lockdep_init_map(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass) { - lock->class_cache = NULL; + int i; + + for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) + lock->class_cache[i] = NULL; + #ifdef CONFIG_LOCK_STAT lock->cpu = raw_smp_processor_id(); #endif @@ -2750,10 +2756,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, if (lock->key == &__lockdep_no_validate__) check = 1; - if (!subclass) - class = lock->class_cache; + if (subclass < NR_LOCKDEP_CACHING_CLASSES) + class = lock->class_cache[subclass]; /* - * Not cached yet or subclass? + * Not cached? */ if (unlikely(!class)) { class = register_lock_class(lock, subclass, 0); @@ -2918,7 +2924,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) return 1; if (hlock->references) { - struct lock_class *class = lock->class_cache; + struct lock_class *class = lock->class_cache[0]; if (!class) class = look_up_lock_class(lock, 0); @@ -3559,7 +3565,12 @@ void lockdep_reset_lock(struct lockdep_map *lock) if (list_empty(head)) continue; list_for_each_entry_safe(class, next, head, hash_entry) { - if (unlikely(class == lock->class_cache)) { + int match = 0; + + for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) + match |= class == lock->class_cache[j]; + + if (unlikely(match)) { if (debug_locks_off_graph_unlock()) WARN_ON(1); goto out_restore; -- cgit v1.2.3-70-g09d2 From 4ba053c04aece1f4734056f21b751eee47ea3fb1 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Wed, 13 Oct 2010 17:30:26 +0900 Subject: lockdep: Check the depth of subclass Current look_up_lock_class() doesn't check the parameter "subclass". This rarely rises problems because the main caller of this function, register_lock_class(), checks it. But register_lock_class() is not the only function which calls look_up_lock_class(). lock_set_class() and its callees also call it. And lock_set_class() doesn't check this parameter. This will rise problems when the the value of subclass is larger than MAX_LOCKDEP_SUBCLASSES. Because the address (used as the key of class) caliculated with too large subclass has a probability to point another key in different lock_class_key. Of course this problem depends on the memory layout and occurs with really low probability. Signed-off-by: Hitoshi Mitake Cc: Dmitry Torokhov Cc: Vojtech Pavlik Cc: Frederic Weisbecker Signed-off-by: Peter Zijlstra LKML-Reference: <1286958626-986-1-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/lockdep.c b/kernel/lockdep.c index bc4d32871f9..42ba65dff7d 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -639,6 +639,16 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) } #endif + if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { + debug_locks_off(); + printk(KERN_ERR + "BUG: looking up invalid subclass: %u\n", subclass); + printk(KERN_ERR + "turning off the locking correctness validator.\n"); + dump_stack(); + return NULL; + } + /* * Static locks do not have their class-keys yet - for them the key * is the lock object itself: @@ -2745,14 +2755,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return 0; - if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { - debug_locks_off(); - printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); - printk("turning off the locking correctness validator.\n"); - dump_stack(); - return 0; - } - if (lock->key == &__lockdep_no_validate__) check = 1; -- cgit v1.2.3-70-g09d2 From c530ccd9a1864a44a7ff35826681229ce9f2357a Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Fri, 15 Oct 2010 15:26:01 +0200 Subject: perf_events: Fix bogus context time tracking You can only call update_context_time() when the context is active, i.e., the thread it is attached to is still running. However, perf_event_read() can be called even when the context is inactive, e.g., user read() the counters. The call to update_context_time() must be conditioned on the status of the context, otherwise, bogus time_enabled, time_running may be returned. Here is an example on AMD64. The task program is an example from libpfm4. The -p prints deltas every 1s. $ task -p -e cpu_clk_unhalted sleep 5 2,266,610 cpu_clk_unhalted (0.00% scaling, ena=2,158,982, run=2,158,982) 0 cpu_clk_unhalted (0.00% scaling, ena=2,158,982, run=2,158,982) 0 cpu_clk_unhalted (0.00% scaling, ena=2,158,982, run=2,158,982) 0 cpu_clk_unhalted (0.00% scaling, ena=2,158,982, run=2,158,982) 0 cpu_clk_unhalted (0.00% scaling, ena=2,158,982, run=2,158,982) 5,242,358,071 cpu_clk_unhalted (99.95% scaling, ena=5,000,359,984, run=2,319,270) Whereas if you don't read deltas, e.g., no call to perf_event_read() until the process terminates: $ task -e cpu_clk_unhalted sleep 5 2,497,783 cpu_clk_unhalted (0.00% scaling, ena=2,376,899, run=2,376,899) Notice that time_enable, time_running are bogus in the first example causing bogus scaling. This patch fixes the problem, by conditionally calling update_context_time() in perf_event_read(). Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: stable@kernel.org LKML-Reference: <4cb856dc.51edd80a.5ae0.38fb@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 1ec3916ffef..e7eeba1794f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1780,7 +1780,13 @@ static u64 perf_event_read(struct perf_event *event) unsigned long flags; raw_spin_lock_irqsave(&ctx->lock, flags); - update_context_time(ctx); + /* + * may read while context is not active + * (e.g., thread is blocked), in that case + * we cannot update context time + */ + if (ctx->is_active) + update_context_time(ctx); update_event_times(event); raw_spin_unlock_irqrestore(&ctx->lock, flags); } -- cgit v1.2.3-70-g09d2 From 8e5fc1a7320baf6076391607515dceb61319b36a Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Fri, 15 Oct 2010 16:54:01 +0200 Subject: perf_events: Fix transaction recovery in group_sched_in() The group_sched_in() function uses a transactional approach to schedule a group of events. In a group, either all events can be scheduled or none are. To schedule each event in, the function calls event_sched_in(). In case of error, event_sched_out() is called on each event in the group. The problem is that event_sched_out() does not completely cancel the effects of event_sched_in(). Furthermore event_sched_out() changes the state of the event as if it had run which is not true is this particular case. Those inconsistencies impact time tracking fields and may lead to events in a group not all reporting the same time_enabled and time_running values. This is demonstrated with the example below: $ task -eunhalted_core_cycles,baclears,baclears -e unhalted_core_cycles,baclears,baclears sleep 5 1946101 unhalted_core_cycles (32.85% scaling, ena=829181, run=556827) 11423 baclears (32.85% scaling, ena=829181, run=556827) 7671 baclears (0.00% scaling, ena=556827, run=556827) 2250443 unhalted_core_cycles (57.83% scaling, ena=962822, run=405995) 11705 baclears (57.83% scaling, ena=962822, run=405995) 11705 baclears (57.83% scaling, ena=962822, run=405995) Notice that in the first group, the last baclears event does not report the same timings as its siblings. This issue comes from the fact that tstamp_stopped is updated by event_sched_out() as if the event had actually run. To solve the issue, we must ensure that, in case of error, there is no change in the event state whatsoever. That means timings must remain as they were when entering group_sched_in(). To do this we defer updating tstamp_running until we know the transaction succeeded. Therefore, we have split event_sched_in() in two parts separating the update to tstamp_running. Similarly, in case of error, we do not want to update tstamp_stopped. Therefore, we have split event_sched_out() in two parts separating the update to tstamp_stopped. With this patch, we now get the following output: $ task -eunhalted_core_cycles,baclears,baclears -e unhalted_core_cycles,baclears,baclears sleep 5 2492050 unhalted_core_cycles (71.75% scaling, ena=1093330, run=308841) 11243 baclears (71.75% scaling, ena=1093330, run=308841) 11243 baclears (71.75% scaling, ena=1093330, run=308841) 1852746 unhalted_core_cycles (0.00% scaling, ena=784489, run=784489) 9253 baclears (0.00% scaling, ena=784489, run=784489) 9253 baclears (0.00% scaling, ena=784489, run=784489) Note that the uneven timing between groups is a side effect of the process spending most of its time sleeping, i.e., not enough event rotations (but that's a separate issue). Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <4cb86b4c.41e9d80a.44e9.3e19@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 76 ++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 63 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e7eeba1794f..634f86a4b2f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -412,8 +412,8 @@ event_filter_match(struct perf_event *event) return event->cpu == -1 || event->cpu == smp_processor_id(); } -static void -event_sched_out(struct perf_event *event, +static int +__event_sched_out(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { @@ -432,14 +432,13 @@ event_sched_out(struct perf_event *event, } if (event->state != PERF_EVENT_STATE_ACTIVE) - return; + return 0; event->state = PERF_EVENT_STATE_INACTIVE; if (event->pending_disable) { event->pending_disable = 0; event->state = PERF_EVENT_STATE_OFF; } - event->tstamp_stopped = ctx->time; event->pmu->del(event, 0); event->oncpu = -1; @@ -448,6 +447,19 @@ event_sched_out(struct perf_event *event, ctx->nr_active--; if (event->attr.exclusive || !cpuctx->active_oncpu) cpuctx->exclusive = 0; + return 1; +} + +static void +event_sched_out(struct perf_event *event, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx) +{ + int ret; + + ret = __event_sched_out(event, cpuctx, ctx); + if (ret) + event->tstamp_stopped = ctx->time; } static void @@ -647,7 +659,7 @@ retry: } static int -event_sched_in(struct perf_event *event, +__event_sched_in(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { @@ -667,8 +679,6 @@ event_sched_in(struct perf_event *event, return -EAGAIN; } - event->tstamp_running += ctx->time - event->tstamp_stopped; - if (!is_software_event(event)) cpuctx->active_oncpu++; ctx->nr_active++; @@ -679,6 +689,35 @@ event_sched_in(struct perf_event *event, return 0; } +static inline int +event_sched_in(struct perf_event *event, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx) +{ + int ret = __event_sched_in(event, cpuctx, ctx); + if (ret) + return ret; + event->tstamp_running += ctx->time - event->tstamp_stopped; + return 0; +} + +static void +group_commit_event_sched_in(struct perf_event *group_event, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx) +{ + struct perf_event *event; + u64 now = ctx->time; + + group_event->tstamp_running += now - group_event->tstamp_stopped; + /* + * Schedule in siblings as one group (if any): + */ + list_for_each_entry(event, &group_event->sibling_list, group_entry) { + event->tstamp_running += now - event->tstamp_stopped; + } +} + static int group_sched_in(struct perf_event *group_event, struct perf_cpu_context *cpuctx, @@ -692,7 +731,13 @@ group_sched_in(struct perf_event *group_event, pmu->start_txn(pmu); - if (event_sched_in(group_event, cpuctx, ctx)) { + /* + * use __event_sched_in() to delay updating tstamp_running + * until the transaction is committed. In case of failure + * we will keep an unmodified tstamp_running which is a + * requirement to get correct timing information + */ + if (__event_sched_in(group_event, cpuctx, ctx)) { pmu->cancel_txn(pmu); return -EAGAIN; } @@ -701,26 +746,31 @@ group_sched_in(struct perf_event *group_event, * Schedule in siblings as one group (if any): */ list_for_each_entry(event, &group_event->sibling_list, group_entry) { - if (event_sched_in(event, cpuctx, ctx)) { + if (__event_sched_in(event, cpuctx, ctx)) { partial_group = event; goto group_error; } } - if (!pmu->commit_txn(pmu)) + if (!pmu->commit_txn(pmu)) { + /* commit tstamp_running */ + group_commit_event_sched_in(group_event, cpuctx, ctx); return 0; - + } group_error: /* * Groups can be scheduled in as one unit only, so undo any * partial group before returning: + * + * use __event_sched_out() to avoid updating tstamp_stopped + * because the event never actually ran */ list_for_each_entry(event, &group_event->sibling_list, group_entry) { if (event == partial_group) break; - event_sched_out(event, cpuctx, ctx); + __event_sched_out(event, cpuctx, ctx); } - event_sched_out(group_event, cpuctx, ctx); + __event_sched_out(group_event, cpuctx, ctx); pmu->cancel_txn(pmu); -- cgit v1.2.3-70-g09d2 From e360adbe29241a0194e10e20595360dd7b98a2b3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 14 Oct 2010 14:01:34 +0800 Subject: irq_work: Add generic hardirq context callbacks Provide a mechanism that allows running code in IRQ context. It is most useful for NMI code that needs to interact with the rest of the system -- like wakeup a task to drain buffers. Perf currently has such a mechanism, so extract that and provide it as a generic feature, independent of perf so that others may also benefit. The IRQ context callback is generated through self-IPIs where possible, or on architectures like powerpc the decrementer (the built-in timer facility) is set to generate an interrupt immediately. Architectures that don't have anything like this get to do with a callback from the timer tick. These architectures can call irq_work_run() at the tail of any IRQ handlers that might enqueue such work (like the perf IRQ handler) to avoid undue latencies in processing the work. Signed-off-by: Peter Zijlstra Acked-by: Kyle McMartin Acked-by: Martin Schwidefsky [ various fixes ] Signed-off-by: Huang Ying LKML-Reference: <1287036094.7768.291.camel@yhuang-dev> Signed-off-by: Ingo Molnar --- arch/alpha/Kconfig | 1 + arch/alpha/include/asm/perf_event.h | 5 -- arch/alpha/kernel/time.c | 30 +++---- arch/arm/Kconfig | 1 + arch/arm/include/asm/perf_event.h | 12 --- arch/arm/kernel/perf_event.c | 8 +- arch/frv/Kconfig | 1 + arch/frv/lib/Makefile | 2 +- arch/frv/lib/perf_event.c | 19 ---- arch/parisc/Kconfig | 1 + arch/parisc/include/asm/perf_event.h | 3 +- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/paca.h | 2 +- arch/powerpc/kernel/time.c | 42 ++++----- arch/s390/Kconfig | 1 + arch/s390/include/asm/perf_event.h | 3 +- arch/sh/Kconfig | 1 + arch/sh/include/asm/perf_event.h | 7 -- arch/sparc/Kconfig | 2 + arch/sparc/include/asm/perf_event.h | 4 - arch/sparc/kernel/pcr.c | 8 +- arch/x86/Kconfig | 1 + arch/x86/include/asm/entry_arch.h | 4 +- arch/x86/include/asm/hardirq.h | 2 +- arch/x86/include/asm/hw_irq.h | 2 +- arch/x86/include/asm/irq_vectors.h | 4 +- arch/x86/kernel/Makefile | 1 + arch/x86/kernel/cpu/perf_event.c | 19 ---- arch/x86/kernel/entry_64.S | 6 +- arch/x86/kernel/irq.c | 8 +- arch/x86/kernel/irq_work.c | 30 +++++++ arch/x86/kernel/irqinit.c | 6 +- include/linux/irq_work.h | 20 +++++ include/linux/perf_event.h | 11 +-- init/Kconfig | 8 ++ kernel/Makefile | 2 + kernel/irq_work.c | 164 +++++++++++++++++++++++++++++++++++ kernel/perf_event.c | 104 ++-------------------- kernel/timer.c | 7 +- 39 files changed, 311 insertions(+), 242 deletions(-) delete mode 100644 arch/frv/lib/perf_event.c create mode 100644 arch/x86/kernel/irq_work.c create mode 100644 include/linux/irq_work.h create mode 100644 kernel/irq_work.c (limited to 'kernel') diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index b9647bb66d1..d04ccd73af4 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -9,6 +9,7 @@ config ALPHA select HAVE_IDE select HAVE_OPROFILE select HAVE_SYSCALL_WRAPPERS + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select HAVE_DMA_ATTRS help diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h index 4157cd3c44a..fe792ca818f 100644 --- a/arch/alpha/include/asm/perf_event.h +++ b/arch/alpha/include/asm/perf_event.h @@ -1,11 +1,6 @@ #ifndef __ASM_ALPHA_PERF_EVENT_H #define __ASM_ALPHA_PERF_EVENT_H -/* Alpha only supports software events through this interface. */ -extern void set_perf_event_pending(void); - -#define PERF_EVENT_INDEX_OFFSET 0 - #ifdef CONFIG_PERF_EVENTS extern void init_hw_perf_events(void); #else diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 396af1799ea..0f1d8493cfc 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include #include @@ -83,25 +83,25 @@ static struct { unsigned long est_cycle_freq; -#ifdef CONFIG_PERF_EVENTS +#ifdef CONFIG_IRQ_WORK -DEFINE_PER_CPU(u8, perf_event_pending); +DEFINE_PER_CPU(u8, irq_work_pending); -#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 -#define test_perf_event_pending() __get_cpu_var(perf_event_pending) -#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 +#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 +#define test_irq_work_pending() __get_cpu_var(irq_work_pending) +#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 -void set_perf_event_pending(void) +void set_irq_work_pending(void) { - set_perf_event_pending_flag(); + set_irq_work_pending_flag(); } -#else /* CONFIG_PERF_EVENTS */ +#else /* CONFIG_IRQ_WORK */ -#define test_perf_event_pending() 0 -#define clear_perf_event_pending() +#define test_irq_work_pending() 0 +#define clear_irq_work_pending() -#endif /* CONFIG_PERF_EVENTS */ +#endif /* CONFIG_IRQ_WORK */ static inline __u32 rpcc(void) @@ -191,9 +191,9 @@ irqreturn_t timer_interrupt(int irq, void *dev) write_sequnlock(&xtime_lock); - if (test_perf_event_pending()) { - clear_perf_event_pending(); - perf_event_do_pending(); + if (test_irq_work_pending()) { + clear_irq_work_pending(); + irq_work_run(); } #ifndef CONFIG_SMP diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 88c97bc7a6f..7c0dfccd05b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -23,6 +23,7 @@ config ARM select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZO select HAVE_KERNEL_LZMA + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index b5799a3b711..c4aa4e8c6af 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -12,18 +12,6 @@ #ifndef __ARM_PERF_EVENT_H__ #define __ARM_PERF_EVENT_H__ -/* - * NOP: on *most* (read: all supported) ARM platforms, the performance - * counter interrupts are regular interrupts and not an NMI. This - * means that when we receive the interrupt we can call - * perf_event_do_pending() that handles all of the work with - * interrupts disabled. - */ -static inline void -set_perf_event_pending(void) -{ -} - /* ARM performance counters start from 1 (in the cp15 accesses) so use the * same indexes here for consistency. */ #define PERF_EVENT_INDEX_OFFSET 1 diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 6cc6521881a..49643b1467e 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -1092,7 +1092,7 @@ armv6pmu_handle_irq(int irq_num, * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ - perf_event_do_pending(); + irq_work_run(); return IRQ_HANDLED; } @@ -2068,7 +2068,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ - perf_event_do_pending(); + irq_work_run(); return IRQ_HANDLED; } @@ -2436,7 +2436,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) armpmu->disable(hwc, idx); } - perf_event_do_pending(); + irq_work_run(); /* * Re-enable the PMU. @@ -2763,7 +2763,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) armpmu->disable(hwc, idx); } - perf_event_do_pending(); + irq_work_run(); /* * Re-enable the PMU. diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 16399bd2499..0f2417df632 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -7,6 +7,7 @@ config FRV default y select HAVE_IDE select HAVE_ARCH_TRACEHOOK + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS config ZONE_DMA diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile index f4709756d0d..4ff2fb1e6b1 100644 --- a/arch/frv/lib/Makefile +++ b/arch/frv/lib/Makefile @@ -5,4 +5,4 @@ lib-y := \ __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ - outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o + outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o diff --git a/arch/frv/lib/perf_event.c b/arch/frv/lib/perf_event.c deleted file mode 100644 index 9ac5acfd2e9..00000000000 --- a/arch/frv/lib/perf_event.c +++ /dev/null @@ -1,19 +0,0 @@ -/* Performance event handling - * - * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#include - -/* - * mark the performance event as pending - */ -void set_perf_event_pending(void) -{ -} diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 907417d187e..79a04a9394d 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -16,6 +16,7 @@ config PARISC select RTC_DRV_GENERIC select INIT_ALL_POSSIBLE select BUG + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select GENERIC_ATOMIC64 if !64BIT help diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h index cc146427d8f..1e0fd8ba6c0 100644 --- a/arch/parisc/include/asm/perf_event.h +++ b/arch/parisc/include/asm/perf_event.h @@ -1,7 +1,6 @@ #ifndef __ASM_PARISC_PERF_EVENT_H #define __ASM_PARISC_PERF_EVENT_H -/* parisc only supports software events through this interface. */ -static inline void set_perf_event_pending(void) { } +/* Empty, just to avoid compiling error */ #endif /* __ASM_PARISC_PERF_EVENT_H */ diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 631e5a0fb6a..4b1e521d966 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -138,6 +138,7 @@ config PPC select HAVE_OPROFILE select HAVE_SYSCALL_WRAPPERS if PPC64 select GENERIC_ATOMIC64 if PPC32 + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 1ff6662f7fa..9b287fdd8ea 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -129,7 +129,7 @@ struct paca_struct { u8 soft_enabled; /* irq soft-enable flag */ u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 io_sync; /* writel() needs spin_unlock sync */ - u8 perf_event_pending; /* PM interrupt while soft-disabled */ + u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 8533b3b83f5..54888eb10c3 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -53,7 +53,7 @@ #include #include #include -#include +#include #include #include @@ -493,60 +493,60 @@ void __init iSeries_time_init_early(void) } #endif /* CONFIG_PPC_ISERIES */ -#ifdef CONFIG_PERF_EVENTS +#ifdef CONFIG_IRQ_WORK /* * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... */ #ifdef CONFIG_PPC64 -static inline unsigned long test_perf_event_pending(void) +static inline unsigned long test_irq_work_pending(void) { unsigned long x; asm volatile("lbz %0,%1(13)" : "=r" (x) - : "i" (offsetof(struct paca_struct, perf_event_pending))); + : "i" (offsetof(struct paca_struct, irq_work_pending))); return x; } -static inline void set_perf_event_pending_flag(void) +static inline void set_irq_work_pending_flag(void) { asm volatile("stb %0,%1(13)" : : "r" (1), - "i" (offsetof(struct paca_struct, perf_event_pending))); + "i" (offsetof(struct paca_struct, irq_work_pending))); } -static inline void clear_perf_event_pending(void) +static inline void clear_irq_work_pending(void) { asm volatile("stb %0,%1(13)" : : "r" (0), - "i" (offsetof(struct paca_struct, perf_event_pending))); + "i" (offsetof(struct paca_struct, irq_work_pending))); } #else /* 32-bit */ -DEFINE_PER_CPU(u8, perf_event_pending); +DEFINE_PER_CPU(u8, irq_work_pending); -#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 -#define test_perf_event_pending() __get_cpu_var(perf_event_pending) -#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 +#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 +#define test_irq_work_pending() __get_cpu_var(irq_work_pending) +#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 #endif /* 32 vs 64 bit */ -void set_perf_event_pending(void) +void set_irq_work_pending(void) { preempt_disable(); - set_perf_event_pending_flag(); + set_irq_work_pending_flag(); set_dec(1); preempt_enable(); } -#else /* CONFIG_PERF_EVENTS */ +#else /* CONFIG_IRQ_WORK */ -#define test_perf_event_pending() 0 -#define clear_perf_event_pending() +#define test_irq_work_pending() 0 +#define clear_irq_work_pending() -#endif /* CONFIG_PERF_EVENTS */ +#endif /* CONFIG_IRQ_WORK */ /* * For iSeries shared processors, we have to let the hypervisor @@ -587,9 +587,9 @@ void timer_interrupt(struct pt_regs * regs) calculate_steal_time(); - if (test_perf_event_pending()) { - clear_perf_event_pending(); - perf_event_do_pending(); + if (test_irq_work_pending()) { + clear_irq_work_pending(); + irq_work_run(); } #ifdef CONFIG_PPC_ISERIES diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index f0777a47e3a..958f0dadead 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -95,6 +95,7 @@ config S390 select HAVE_KVM if 64BIT select HAVE_ARCH_TRACEHOOK select INIT_ALL_POSSIBLE + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 3840cbe7763..a75f168d271 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -4,7 +4,6 @@ * Copyright 2009 Martin Schwidefsky, IBM Corporation. */ -static inline void set_perf_event_pending(void) {} -static inline void clear_perf_event_pending(void) {} +/* Empty, just to avoid compiling error */ #define PERF_EVENT_INDEX_OFFSET 0 diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 35b6c3f8517..35b6879628a 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -16,6 +16,7 @@ config SUPERH select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG select HAVE_DMA_ATTRS + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_KERNEL_GZIP diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h index 3d0c9f36d15..14308bed7ea 100644 --- a/arch/sh/include/asm/perf_event.h +++ b/arch/sh/include/asm/perf_event.h @@ -26,11 +26,4 @@ extern int register_sh_pmu(struct sh_pmu *); extern int reserve_pmc_hardware(void); extern void release_pmc_hardware(void); -static inline void set_perf_event_pending(void) -{ - /* Nothing to see here, move along. */ -} - -#define PERF_EVENT_INDEX_OFFSET 0 - #endif /* __ASM_SH_PERF_EVENT_H */ diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 9212cd42a83..3e9d31401fb 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -26,6 +26,7 @@ config SPARC select ARCH_WANT_OPTIONAL_GPIOLIB select RTC_CLASS select RTC_DRV_M48T59 + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_DMA_ATTRS @@ -54,6 +55,7 @@ config SPARC64 select RTC_DRV_BQ4802 select RTC_DRV_SUN4V select RTC_DRV_STARFIRE + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h index 727af70646c..6e8bfa1786d 100644 --- a/arch/sparc/include/asm/perf_event.h +++ b/arch/sparc/include/asm/perf_event.h @@ -1,10 +1,6 @@ #ifndef __ASM_SPARC_PERF_EVENT_H #define __ASM_SPARC_PERF_EVENT_H -extern void set_perf_event_pending(void); - -#define PERF_EVENT_INDEX_OFFSET 0 - #ifdef CONFIG_PERF_EVENTS #include diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index c4a6a50b484..b87873c0e8e 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include @@ -43,14 +43,14 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) old_regs = set_irq_regs(regs); irq_enter(); -#ifdef CONFIG_PERF_EVENTS - perf_event_do_pending(); +#ifdef CONFIG_IRQ_WORK + irq_work_run(); #endif irq_exit(); set_irq_regs(old_regs); } -void set_perf_event_pending(void) +void arch_irq_work_raise(void) { set_softint(1 << PIL_DEFERRED_PCR_WORK); } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9815221976a..fd227d6b8d9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -25,6 +25,7 @@ config X86 select HAVE_IDE select HAVE_OPROFILE select HAVE_PERF_EVENTS if (!M386 && !M486) + select HAVE_IRQ_WORK select HAVE_IOREMAP_PROT select HAVE_KPROBES select ARCH_WANT_OPTIONAL_GPIOLIB diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 8e8ec663a98..b8e96a18676 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -49,8 +49,8 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) -#ifdef CONFIG_PERF_EVENTS -BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) +#ifdef CONFIG_IRQ_WORK +BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR) #endif #ifdef CONFIG_X86_THERMAL_VECTOR diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index aeab29aee61..55e4de613f0 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -14,7 +14,7 @@ typedef struct { #endif unsigned int x86_platform_ipis; /* arch dependent */ unsigned int apic_perf_irqs; - unsigned int apic_pending_irqs; + unsigned int apic_irq_work_irqs; #ifdef CONFIG_SMP unsigned int irq_resched_count; unsigned int irq_call_count; diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 46c0fe05f23..3a54a1ca1a0 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -29,7 +29,7 @@ extern void apic_timer_interrupt(void); extern void x86_platform_ipi(void); extern void error_interrupt(void); -extern void perf_pending_interrupt(void); +extern void irq_work_interrupt(void); extern void spurious_interrupt(void); extern void thermal_interrupt(void); diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index e2ca3009255..6af0894dafb 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -114,9 +114,9 @@ #define X86_PLATFORM_IPI_VECTOR 0xed /* - * Performance monitoring pending work vector: + * IRQ work vector: */ -#define LOCAL_PENDING_VECTOR 0xec +#define IRQ_WORK_VECTOR 0xec #define UV_BAU_MESSAGE 0xea diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 9d3f485e5dd..7490bf8d145 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -35,6 +35,7 @@ obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time.o ioport.o ldt.o dumpstack.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o +obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_X86_VISWS) += visws_quirks.o obj-$(CONFIG_X86_32) += probe_roms_32.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index e2513f26ba8..fe73c1844a9 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1196,25 +1196,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) return handled; } -void smp_perf_pending_interrupt(struct pt_regs *regs) -{ - irq_enter(); - ack_APIC_irq(); - inc_irq_stat(apic_pending_irqs); - perf_event_do_pending(); - irq_exit(); -} - -void set_perf_event_pending(void) -{ -#ifdef CONFIG_X86_LOCAL_APIC - if (!x86_pmu.apic || !x86_pmu_initialized()) - return; - - apic->send_IPI_self(LOCAL_PENDING_VECTOR); -#endif -} - void perf_events_lapic_init(void) { if (!x86_pmu.apic || !x86_pmu_initialized()) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 17be5ec7cbb..c375c79065f 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1023,9 +1023,9 @@ apicinterrupt ERROR_APIC_VECTOR \ apicinterrupt SPURIOUS_APIC_VECTOR \ spurious_interrupt smp_spurious_interrupt -#ifdef CONFIG_PERF_EVENTS -apicinterrupt LOCAL_PENDING_VECTOR \ - perf_pending_interrupt smp_perf_pending_interrupt +#ifdef CONFIG_IRQ_WORK +apicinterrupt IRQ_WORK_VECTOR \ + irq_work_interrupt smp_irq_work_interrupt #endif /* diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 91fd0c70a18..44edb03fc9e 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -67,10 +67,10 @@ static int show_other_interrupts(struct seq_file *p, int prec) for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); seq_printf(p, " Performance monitoring interrupts\n"); - seq_printf(p, "%*s: ", prec, "PND"); + seq_printf(p, "%*s: ", prec, "IWI"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); - seq_printf(p, " Performance pending work\n"); + seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); + seq_printf(p, " IRQ work interrupts\n"); #endif if (x86_platform_ipi_callback) { seq_printf(p, "%*s: ", prec, "PLT"); @@ -185,7 +185,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) sum += irq_stats(cpu)->apic_timer_irqs; sum += irq_stats(cpu)->irq_spurious_count; sum += irq_stats(cpu)->apic_perf_irqs; - sum += irq_stats(cpu)->apic_pending_irqs; + sum += irq_stats(cpu)->apic_irq_work_irqs; #endif if (x86_platform_ipi_callback) sum += irq_stats(cpu)->x86_platform_ipis; diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c new file mode 100644 index 00000000000..ca8f703a1e7 --- /dev/null +++ b/arch/x86/kernel/irq_work.c @@ -0,0 +1,30 @@ +/* + * x86 specific code for irq_work + * + * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra + */ + +#include +#include +#include +#include + +void smp_irq_work_interrupt(struct pt_regs *regs) +{ + irq_enter(); + ack_APIC_irq(); + inc_irq_stat(apic_irq_work_irqs); + irq_work_run(); + irq_exit(); +} + +void arch_irq_work_raise(void) +{ +#ifdef CONFIG_X86_LOCAL_APIC + if (!cpu_has_apic) + return; + + apic->send_IPI_self(IRQ_WORK_VECTOR); + apic_wait_icr_idle(); +#endif +} diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 990ae7cfc57..713969b9266 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -224,9 +224,9 @@ static void __init apic_intr_init(void) alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); - /* Performance monitoring interrupts: */ -# ifdef CONFIG_PERF_EVENTS - alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); + /* IRQ work interrupts: */ +# ifdef CONFIG_IRQ_WORK + alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt); # endif #endif diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h new file mode 100644 index 00000000000..4fa09d4d0b7 --- /dev/null +++ b/include/linux/irq_work.h @@ -0,0 +1,20 @@ +#ifndef _LINUX_IRQ_WORK_H +#define _LINUX_IRQ_WORK_H + +struct irq_work { + struct irq_work *next; + void (*func)(struct irq_work *); +}; + +static inline +void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *)) +{ + entry->next = NULL; + entry->func = func; +} + +bool irq_work_queue(struct irq_work *entry); +void irq_work_run(void); +void irq_work_sync(struct irq_work *entry); + +#endif /* _LINUX_IRQ_WORK_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a9227e98520..2ebfc9ae475 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -486,6 +486,7 @@ struct perf_guest_info_callbacks { #include #include #include +#include #include #include @@ -672,11 +673,6 @@ struct perf_buffer { void *data_pages[0]; }; -struct perf_pending_entry { - struct perf_pending_entry *next; - void (*func)(struct perf_pending_entry *); -}; - struct perf_sample_data; typedef void (*perf_overflow_handler_t)(struct perf_event *, int, @@ -784,7 +780,7 @@ struct perf_event { int pending_wakeup; int pending_kill; int pending_disable; - struct perf_pending_entry pending; + struct irq_work pending; atomic_t event_limit; @@ -898,8 +894,6 @@ extern int perf_event_init_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); extern void perf_event_delayed_put(struct task_struct *task); -extern void set_perf_event_pending(void); -extern void perf_event_do_pending(void); extern void perf_event_print_debug(void); extern void perf_pmu_disable(struct pmu *pmu); extern void perf_pmu_enable(struct pmu *pmu); @@ -1078,7 +1072,6 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_delayed_put(struct task_struct *task) { } -static inline void perf_event_do_pending(void) { } static inline void perf_event_print_debug(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; } diff --git a/init/Kconfig b/init/Kconfig index 2de5b1cbadd..1ef0b439908 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -21,6 +21,13 @@ config CONSTRUCTORS depends on !UML default y +config HAVE_IRQ_WORK + bool + +config IRQ_WORK + bool + depends on HAVE_IRQ_WORK + menu "General setup" config EXPERIMENTAL @@ -987,6 +994,7 @@ config PERF_EVENTS default y if (PROFILING || PERF_COUNTERS) depends on HAVE_PERF_EVENTS select ANON_INODES + select IRQ_WORK help Enable kernel support for various performance events provided by software and hardware. diff --git a/kernel/Makefile b/kernel/Makefile index d52b473c99a..4d9bf5f8531 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -23,6 +23,7 @@ CFLAGS_REMOVE_rtmutex-debug.o = -pg CFLAGS_REMOVE_cgroup-debug.o = -pg CFLAGS_REMOVE_sched_clock.o = -pg CFLAGS_REMOVE_perf_event.o = -pg +CFLAGS_REMOVE_irq_work.o = -pg endif obj-$(CONFIG_FREEZER) += freezer.o @@ -100,6 +101,7 @@ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o +obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o diff --git a/kernel/irq_work.c b/kernel/irq_work.c new file mode 100644 index 00000000000..f16763ff848 --- /dev/null +++ b/kernel/irq_work.c @@ -0,0 +1,164 @@ +/* + * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra + * + * Provides a framework for enqueueing and running callbacks from hardirq + * context. The enqueueing is NMI-safe. + */ + +#include +#include +#include +#include + +/* + * An entry can be in one of four states: + * + * free NULL, 0 -> {claimed} : free to be used + * claimed NULL, 3 -> {pending} : claimed to be enqueued + * pending next, 3 -> {busy} : queued, pending callback + * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed + * + * We use the lower two bits of the next pointer to keep PENDING and BUSY + * flags. + */ + +#define IRQ_WORK_PENDING 1UL +#define IRQ_WORK_BUSY 2UL +#define IRQ_WORK_FLAGS 3UL + +static inline bool irq_work_is_set(struct irq_work *entry, int flags) +{ + return (unsigned long)entry->next & flags; +} + +static inline struct irq_work *irq_work_next(struct irq_work *entry) +{ + unsigned long next = (unsigned long)entry->next; + next &= ~IRQ_WORK_FLAGS; + return (struct irq_work *)next; +} + +static inline struct irq_work *next_flags(struct irq_work *entry, int flags) +{ + unsigned long next = (unsigned long)entry; + next |= flags; + return (struct irq_work *)next; +} + +static DEFINE_PER_CPU(struct irq_work *, irq_work_list); + +/* + * Claim the entry so that no one else will poke at it. + */ +static bool irq_work_claim(struct irq_work *entry) +{ + struct irq_work *next, *nflags; + + do { + next = entry->next; + if ((unsigned long)next & IRQ_WORK_PENDING) + return false; + nflags = next_flags(next, IRQ_WORK_FLAGS); + } while (cmpxchg(&entry->next, next, nflags) != next); + + return true; +} + + +void __weak arch_irq_work_raise(void) +{ + /* + * Lame architectures will get the timer tick callback + */ +} + +/* + * Queue the entry and raise the IPI if needed. + */ +static void __irq_work_queue(struct irq_work *entry) +{ + struct irq_work **head, *next; + + head = &get_cpu_var(irq_work_list); + + do { + next = *head; + /* Can assign non-atomic because we keep the flags set. */ + entry->next = next_flags(next, IRQ_WORK_FLAGS); + } while (cmpxchg(head, next, entry) != next); + + /* The list was empty, raise self-interrupt to start processing. */ + if (!irq_work_next(entry)) + arch_irq_work_raise(); + + put_cpu_var(irq_work_list); +} + +/* + * Enqueue the irq_work @entry, returns true on success, failure when the + * @entry was already enqueued by someone else. + * + * Can be re-enqueued while the callback is still in progress. + */ +bool irq_work_queue(struct irq_work *entry) +{ + if (!irq_work_claim(entry)) { + /* + * Already enqueued, can't do! + */ + return false; + } + + __irq_work_queue(entry); + return true; +} +EXPORT_SYMBOL_GPL(irq_work_queue); + +/* + * Run the irq_work entries on this cpu. Requires to be ran from hardirq + * context with local IRQs disabled. + */ +void irq_work_run(void) +{ + struct irq_work *list, **head; + + head = &__get_cpu_var(irq_work_list); + if (*head == NULL) + return; + + BUG_ON(!in_irq()); + BUG_ON(!irqs_disabled()); + + list = xchg(head, NULL); + while (list != NULL) { + struct irq_work *entry = list; + + list = irq_work_next(list); + + /* + * Clear the PENDING bit, after this point the @entry + * can be re-used. + */ + entry->next = next_flags(NULL, IRQ_WORK_BUSY); + entry->func(entry); + /* + * Clear the BUSY bit and return to the free state if + * no-one else claimed it meanwhile. + */ + cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL); + } +} +EXPORT_SYMBOL_GPL(irq_work_run); + +/* + * Synchronize against the irq_work @entry, ensures the entry is not + * currently in use. + */ +void irq_work_sync(struct irq_work *entry) +{ + WARN_ON_ONCE(irqs_disabled()); + + while (irq_work_is_set(entry, IRQ_WORK_BUSY)) + cpu_relax(); +} +EXPORT_SYMBOL_GPL(irq_work_sync); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 634f86a4b2f..99b9700e74d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2206,12 +2206,11 @@ static void free_event_rcu(struct rcu_head *head) kfree(event); } -static void perf_pending_sync(struct perf_event *event); static void perf_buffer_put(struct perf_buffer *buffer); static void free_event(struct perf_event *event) { - perf_pending_sync(event); + irq_work_sync(&event->pending); if (!event->parent) { atomic_dec(&nr_events); @@ -3162,16 +3161,7 @@ void perf_event_wakeup(struct perf_event *event) } } -/* - * Pending wakeups - * - * Handle the case where we need to wakeup up from NMI (or rq->lock) context. - * - * The NMI bit means we cannot possibly take locks. Therefore, maintain a - * single linked list and use cmpxchg() to add entries lockless. - */ - -static void perf_pending_event(struct perf_pending_entry *entry) +static void perf_pending_event(struct irq_work *entry) { struct perf_event *event = container_of(entry, struct perf_event, pending); @@ -3187,89 +3177,6 @@ static void perf_pending_event(struct perf_pending_entry *entry) } } -#define PENDING_TAIL ((struct perf_pending_entry *)-1UL) - -static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { - PENDING_TAIL, -}; - -static void perf_pending_queue(struct perf_pending_entry *entry, - void (*func)(struct perf_pending_entry *)) -{ - struct perf_pending_entry **head; - - if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) - return; - - entry->func = func; - - head = &get_cpu_var(perf_pending_head); - - do { - entry->next = *head; - } while (cmpxchg(head, entry->next, entry) != entry->next); - - set_perf_event_pending(); - - put_cpu_var(perf_pending_head); -} - -static int __perf_pending_run(void) -{ - struct perf_pending_entry *list; - int nr = 0; - - list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); - while (list != PENDING_TAIL) { - void (*func)(struct perf_pending_entry *); - struct perf_pending_entry *entry = list; - - list = list->next; - - func = entry->func; - entry->next = NULL; - /* - * Ensure we observe the unqueue before we issue the wakeup, - * so that we won't be waiting forever. - * -- see perf_not_pending(). - */ - smp_wmb(); - - func(entry); - nr++; - } - - return nr; -} - -static inline int perf_not_pending(struct perf_event *event) -{ - /* - * If we flush on whatever cpu we run, there is a chance we don't - * need to wait. - */ - get_cpu(); - __perf_pending_run(); - put_cpu(); - - /* - * Ensure we see the proper queue state before going to sleep - * so that we do not miss the wakeup. -- see perf_pending_handle() - */ - smp_rmb(); - return event->pending.next == NULL; -} - -static void perf_pending_sync(struct perf_event *event) -{ - wait_event(event->waitq, perf_not_pending(event)); -} - -void perf_event_do_pending(void) -{ - __perf_pending_run(); -} - /* * We assume there is only KVM supporting the callbacks. * Later on, we might change it to a list if there is @@ -3319,8 +3226,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle) if (handle->nmi) { handle->event->pending_wakeup = 1; - perf_pending_queue(&handle->event->pending, - perf_pending_event); + irq_work_queue(&handle->event->pending); } else perf_event_wakeup(handle->event); } @@ -4356,8 +4262,7 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, event->pending_kill = POLL_HUP; if (nmi) { event->pending_disable = 1; - perf_pending_queue(&event->pending, - perf_pending_event); + irq_work_queue(&event->pending); } else perf_event_disable(event); } @@ -5374,6 +5279,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, INIT_LIST_HEAD(&event->event_entry); INIT_LIST_HEAD(&event->sibling_list); init_waitqueue_head(&event->waitq); + init_irq_work(&event->pending, perf_pending_event); mutex_init(&event->mmap_mutex); diff --git a/kernel/timer.c b/kernel/timer.c index 97bf05baade..68a9ae7679b 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include @@ -1279,7 +1279,10 @@ void update_process_times(int user_tick) run_local_timers(); rcu_check_callbacks(cpu, user_tick); printk_tick(); - perf_event_do_pending(); +#ifdef CONFIG_IRQ_WORK + if (in_irq()) + irq_work_run(); +#endif scheduler_tick(); run_posix_cpu_timers(p); } -- cgit v1.2.3-70-g09d2 From 74c3337c2fc6389d3a57a622a936036b6db6b2e8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 Oct 2010 11:40:29 +0200 Subject: perf: Fix group moving Matt found we trigger the WARN_ON_ONCE() in perf_group_attach() when we take the move_group path in perf_event_open(). Since we cannot de-construct the group (we rely on it to move the events), we have to simply ignore the double attach. The group state is context invariant and doesn't need changing. Reported-by: Matt Fleming Signed-off-by: Peter Zijlstra LKML-Reference: <1287135757.29097.1368.camel@twins> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 99b9700e74d..346dc0e35a0 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -315,7 +315,12 @@ static void perf_group_attach(struct perf_event *event) { struct perf_event *group_leader = event->group_leader; - WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP); + /* + * We can have double attach due to group movement in perf_event_open. + */ + if (event->attach_state & PERF_ATTACH_GROUP) + return; + event->attach_state |= PERF_ATTACH_GROUP; if (group_leader == event) -- cgit v1.2.3-70-g09d2 From e7d0bc047548d76feee6b23f7d3d9da927189a50 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 14 Oct 2010 16:54:51 +0200 Subject: perf: Fix task refcount bugs Currently it looks like find_lively_task_by_vpid() takes a task ref and relies on find_get_context() to drop it. The problem is that perf_event_create_kernel_counter() shouldn't be dropping task refs. Signed-off-by: Peter Zijlstra Acked-by: Frederic Weisbecker Acked-by: Matt Helsley LKML-Reference: <20101014203625.278436085@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 346dc0e35a0..f928878a1c1 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2190,11 +2190,9 @@ retry: } } - put_task_struct(task); return ctx; errout: - put_task_struct(task); return ERR_PTR(err); } @@ -5602,7 +5600,7 @@ SYSCALL_DEFINE5(perf_event_open, ctx = find_get_context(pmu, task, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); - goto err_group_fd; + goto err_task; } /* @@ -5698,6 +5696,9 @@ SYSCALL_DEFINE5(perf_event_open, err_context: put_ctx(ctx); +err_task: + if (task) + put_task_struct(task); err_group_fd: fput_light(group_file, fput_needed); free_event(event); -- cgit v1.2.3-70-g09d2 From c6be5a5cb62592d9d661899a2aa78236eb00ffa5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 14 Oct 2010 16:59:46 +0200 Subject: perf: Find task before event alloc So that we can pass the task pointer to the event allocation, so that we can use task associated data during event initialization. Signed-off-by: Peter Zijlstra LKML-Reference: <20101014203625.340789919@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index f928878a1c1..b21d06aaef6 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5551,10 +5551,18 @@ SYSCALL_DEFINE5(perf_event_open, group_leader = NULL; } + if (pid != -1) { + task = find_lively_task_by_vpid(pid); + if (IS_ERR(task)) { + err = PTR_ERR(task); + goto err_group_fd; + } + } + event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL); if (IS_ERR(event)) { err = PTR_ERR(event); - goto err_fd; + goto err_task; } /* @@ -5586,21 +5594,13 @@ SYSCALL_DEFINE5(perf_event_open, } } - if (pid != -1) { - task = find_lively_task_by_vpid(pid); - if (IS_ERR(task)) { - err = PTR_ERR(task); - goto err_group_fd; - } - } - /* * Get the target context (task or percpu): */ ctx = find_get_context(pmu, task, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); - goto err_task; + goto err_alloc; } /* @@ -5696,12 +5696,13 @@ SYSCALL_DEFINE5(perf_event_open, err_context: put_ctx(ctx); +err_alloc: + free_event(event); err_task: if (task) put_task_struct(task); err_group_fd: fput_light(group_file, fput_needed); - free_event(event); err_fd: put_unused_fd(event_fd); return err; -- cgit v1.2.3-70-g09d2 From d580ff8699e8811a9af37e9de4dea375401bdeec Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 14 Oct 2010 17:43:23 +0200 Subject: perf, hw_breakpoint: Fix crash in hw_breakpoint creation hw_breakpoint creation needs to account stuff per-task to ensure there is always sufficient hardware resources to back these things due to ptrace. With the perf per pmu context changes the event initialization no longer has access to the event context, for the simple reason that we need to first find the pmu (result of initialization) before we can find the context. This makes hw_breakpoints unhappy, because it can no longer do per task accounting, cure this by frobbing a task pointer in the event::hw bits for now... Signed-off-by: Peter Zijlstra Cc: Frederic Weisbecker LKML-Reference: <20101014203625.391543667@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 7 +++++++ kernel/hw_breakpoint.c | 8 ++++---- kernel/perf_event.c | 23 ++++++++++++++++++----- 3 files changed, 29 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2ebfc9ae475..97965fac55f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -536,6 +536,12 @@ struct hw_perf_event { struct { /* breakpoint */ struct arch_hw_breakpoint info; struct list_head bp_list; + /* + * Crufty hack to avoid the chicken and egg + * problem hw_breakpoint has with context + * creation and event initalization. + */ + struct task_struct *bp_target; }; #endif }; @@ -693,6 +699,7 @@ struct swevent_hlist { #define PERF_ATTACH_CONTEXT 0x01 #define PERF_ATTACH_GROUP 0x02 +#define PERF_ATTACH_TASK 0x04 /** * struct perf_event - performance event kernel representation: diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 3b714e839c1..2c9120f0afc 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -113,12 +113,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) */ static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) { - struct perf_event_context *ctx = bp->ctx; + struct task_struct *tsk = bp->hw.bp_target; struct perf_event *iter; int count = 0; list_for_each_entry(iter, &bp_task_head, hw.bp_list) { - if (iter->ctx == ctx && find_slot_idx(iter) == type) + if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type) count += hw_breakpoint_weight(iter); } @@ -134,7 +134,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, enum bp_type_idx type) { int cpu = bp->cpu; - struct task_struct *tsk = bp->ctx->task; + struct task_struct *tsk = bp->hw.bp_target; if (cpu >= 0) { slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); @@ -213,7 +213,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int weight) { int cpu = bp->cpu; - struct task_struct *tsk = bp->ctx->task; + struct task_struct *tsk = bp->hw.bp_target; /* Pinned counter cpu profiling */ if (!tsk) { diff --git a/kernel/perf_event.c b/kernel/perf_event.c index b21d06aaef6..856e20baf13 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5255,9 +5255,10 @@ unlock: */ static struct perf_event * perf_event_alloc(struct perf_event_attr *attr, int cpu, - struct perf_event *group_leader, - struct perf_event *parent_event, - perf_overflow_handler_t overflow_handler) + struct task_struct *task, + struct perf_event *group_leader, + struct perf_event *parent_event, + perf_overflow_handler_t overflow_handler) { struct pmu *pmu; struct perf_event *event; @@ -5299,6 +5300,17 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, event->state = PERF_EVENT_STATE_INACTIVE; + if (task) { + event->attach_state = PERF_ATTACH_TASK; +#ifdef CONFIG_HAVE_HW_BREAKPOINT + /* + * hw_breakpoint is a bit difficult here.. + */ + if (attr->type == PERF_TYPE_BREAKPOINT) + event->hw.bp_target = task; +#endif + } + if (!overflow_handler && parent_event) overflow_handler = parent_event->overflow_handler; @@ -5559,7 +5571,7 @@ SYSCALL_DEFINE5(perf_event_open, } } - event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL); + event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL); if (IS_ERR(event)) { err = PTR_ERR(event); goto err_task; @@ -5728,7 +5740,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, * Get the target context (task or percpu): */ - event = perf_event_alloc(attr, cpu, NULL, NULL, overflow_handler); + event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler); if (IS_ERR(event)) { err = PTR_ERR(event); goto err; @@ -5996,6 +6008,7 @@ inherit_event(struct perf_event *parent_event, child_event = perf_event_alloc(&parent_event->attr, parent_event->cpu, + child, group_leader, parent_event, NULL); if (IS_ERR(child_event)) -- cgit v1.2.3-70-g09d2 From 3b6e901f839f42afb40f614418df82c08b01320a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 14 Oct 2010 21:10:38 +0200 Subject: jump_label: Use more consistent naming Now that there's still only a few users around, rename things to make them more consistent. Signed-off-by: Peter Zijlstra LKML-Reference: <20101014203625.448565169@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/jump_label.h | 8 ++++---- kernel/tracepoint.c | 6 +++--- lib/dynamic_debug.c | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index b72cd9f92c2..81be4962b7a 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -25,10 +25,10 @@ extern void jump_label_update(unsigned long key, enum jump_label_type type); extern void jump_label_apply_nops(struct module *mod); extern int jump_label_text_reserved(void *start, void *end); -#define enable_jump_label(key) \ +#define jump_label_enable(key) \ jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); -#define disable_jump_label(key) \ +#define jump_label_disable(key) \ jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); #else @@ -39,12 +39,12 @@ do { \ goto label; \ } while (0) -#define enable_jump_label(cond_var) \ +#define jump_label_enable(cond_var) \ do { \ *(cond_var) = 1; \ } while (0) -#define disable_jump_label(cond_var) \ +#define jump_label_disable(cond_var) \ do { \ *(cond_var) = 0; \ } while (0) diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index d6073a50a6c..e95ee7f31d4 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -265,10 +265,10 @@ static void set_tracepoint(struct tracepoint_entry **entry, */ rcu_assign_pointer(elem->funcs, (*entry)->funcs); if (!elem->state && active) { - enable_jump_label(&elem->state); + jump_label_enable(&elem->state); elem->state = active; } else if (elem->state && !active) { - disable_jump_label(&elem->state); + jump_label_disable(&elem->state); elem->state = active; } } @@ -285,7 +285,7 @@ static void disable_tracepoint(struct tracepoint *elem) elem->unregfunc(); if (elem->state) { - disable_jump_label(&elem->state); + jump_label_disable(&elem->state); elem->state = 0; } rcu_assign_pointer(elem->funcs, NULL); diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index e925c7b960f..7bd6df781ce 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -142,9 +142,9 @@ static void ddebug_change(const struct ddebug_query *query, dt->num_enabled++; dp->flags = newflags; if (newflags) { - enable_jump_label(&dp->enabled); + jump_label_enable(&dp->enabled); } else { - disable_jump_label(&dp->enabled); + jump_label_disable(&dp->enabled); } if (verbose) printk(KERN_INFO -- cgit v1.2.3-70-g09d2 From 82cd6def9806dcb6a325fb6abbc1d61388a15f6a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 14 Oct 2010 17:57:23 +0200 Subject: perf: Use jump_labels to optimize the scheduler hooks Trades a call + conditional + ret for an unconditional jmp. Acked-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra LKML-Reference: <20101014203625.501657727@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 27 +++++++++++++++++++++++++-- kernel/perf_event.c | 24 +++++++++--------------- 2 files changed, 34 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 97965fac55f..7f0e7f52af8 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -487,6 +487,7 @@ struct perf_guest_info_callbacks { #include #include #include +#include #include #include @@ -895,8 +896,30 @@ extern void perf_pmu_unregister(struct pmu *pmu); extern int perf_num_counters(void); extern const char *perf_pmu_name(void); -extern void perf_event_task_sched_in(struct task_struct *task); -extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); +extern void __perf_event_task_sched_in(struct task_struct *task); +extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); + +extern atomic_t perf_task_events; + +static inline void perf_event_task_sched_in(struct task_struct *task) +{ + JUMP_LABEL(&perf_task_events, have_events); + return; + +have_events: + __perf_event_task_sched_in(task); +} + +static inline +void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) +{ + JUMP_LABEL(&perf_task_events, have_events); + return; + +have_events: + __perf_event_task_sched_out(task, next); +} + extern int perf_event_init_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 856e20baf13..f7febb02ab9 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -34,7 +34,7 @@ #include -static atomic_t nr_events __read_mostly; +atomic_t perf_task_events __read_mostly; static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_comm_events __read_mostly; static atomic_t nr_task_events __read_mostly; @@ -1311,8 +1311,8 @@ void perf_event_context_sched_out(struct task_struct *task, int ctxn, * accessing the event control register. If a NMI hits, then it will * not restart the event. */ -void perf_event_task_sched_out(struct task_struct *task, - struct task_struct *next) +void __perf_event_task_sched_out(struct task_struct *task, + struct task_struct *next) { int ctxn; @@ -1337,14 +1337,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx, cpuctx->task_ctx = NULL; } -/* - * Called with IRQs disabled - */ -static void __perf_event_task_sched_out(struct perf_event_context *ctx) -{ - task_ctx_sched_out(ctx, EVENT_ALL); -} - /* * Called with IRQs disabled */ @@ -1494,7 +1486,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx) * accessing the event control register. If a NMI hits, then it will * keep the event running. */ -void perf_event_task_sched_in(struct task_struct *task) +void __perf_event_task_sched_in(struct task_struct *task) { struct perf_event_context *ctx; int ctxn; @@ -2216,7 +2208,8 @@ static void free_event(struct perf_event *event) irq_work_sync(&event->pending); if (!event->parent) { - atomic_dec(&nr_events); + if (event->attach_state & PERF_ATTACH_TASK) + jump_label_dec(&perf_task_events); if (event->attr.mmap || event->attr.mmap_data) atomic_dec(&nr_mmap_events); if (event->attr.comm) @@ -5354,7 +5347,8 @@ done: event->pmu = pmu; if (!event->parent) { - atomic_inc(&nr_events); + if (event->attach_state & PERF_ATTACH_TASK) + jump_label_inc(&perf_task_events); if (event->attr.mmap || event->attr.mmap_data) atomic_inc(&nr_mmap_events); if (event->attr.comm) @@ -5849,7 +5843,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) * our context. */ child_ctx = child->perf_event_ctxp[ctxn]; - __perf_event_task_sched_out(child_ctx); + task_ctx_sched_out(child_ctx, EVENT_ALL); /* * Take the context lock here so that if find_get_context is -- cgit v1.2.3-70-g09d2 From 7e54a5a0b655734326dc78c2b5efc1eb35497bb6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 14 Oct 2010 22:32:45 +0200 Subject: perf: Optimize sw events Acked-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 20 +++++++++++--------- kernel/perf_event.c | 4 ++-- 2 files changed, 13 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 7f0e7f52af8..3b80cbf509e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1012,18 +1012,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs) perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); } -static inline void +static __always_inline void perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { - if (atomic_read(&perf_swevent_enabled[event_id])) { - struct pt_regs hot_regs; - - if (!regs) { - perf_fetch_caller_regs(&hot_regs); - regs = &hot_regs; - } - __perf_sw_event(event_id, nr, nmi, regs, addr); + struct pt_regs hot_regs; + + JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); + return; + +have_event: + if (!regs) { + perf_fetch_caller_regs(&hot_regs); + regs = &hot_regs; } + __perf_sw_event(event_id, nr, nmi, regs, addr); } extern void perf_event_mmap(struct vm_area_struct *vma); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index f7febb02ab9..05ecf6f7c67 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4669,7 +4669,7 @@ static void sw_perf_event_destroy(struct perf_event *event) WARN_ON(event->parent); - atomic_dec(&perf_swevent_enabled[event_id]); + jump_label_dec(&perf_swevent_enabled[event_id]); swevent_hlist_put(event); } @@ -4699,7 +4699,7 @@ static int perf_swevent_init(struct perf_event *event) if (err) return err; - atomic_inc(&perf_swevent_enabled[event_id]); + jump_label_inc(&perf_swevent_enabled[event_id]); event->destroy = sw_perf_event_destroy; } -- cgit v1.2.3-70-g09d2 From ef8002f6848236de5adc613063ebeabddea8a6fb Mon Sep 17 00:00:00 2001 From: Nikhil Rao Date: Wed, 13 Oct 2010 12:09:35 -0700 Subject: sched: Do not consider SCHED_IDLE tasks to be cache hot This patch adds a check in task_hot to return if the task has SCHED_IDLE policy. SCHED_IDLE tasks have very low weight, and when run with regular workloads, are typically scheduled many milliseconds apart. There is no need to consider these tasks hot for load balancing. Signed-off-by: Nikhil Rao Signed-off-by: Peter Zijlstra LKML-Reference: <1287173550-30365-2-git-send-email-ncrao@google.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 728081a7ef1..771b518e5f1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2025,6 +2025,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) if (p->sched_class != &fair_sched_class) return 0; + if (unlikely(p->policy == SCHED_IDLE)) + return 0; + /* * Buddy candidates are cache hot: */ -- cgit v1.2.3-70-g09d2 From 2582f0eba54066b5e98ff2b27ef0cfa833b59f54 Mon Sep 17 00:00:00 2001 From: Nikhil Rao Date: Wed, 13 Oct 2010 12:09:36 -0700 Subject: sched: Set group_imb only a task can be pulled from the busiest cpu When cycling through sched groups to determine the busiest group, set group_imb only if the busiest cpu has more than 1 runnable task. This patch fixes the case where two cpus in a group have one runnable task each, but there is a large weight differential between these two tasks. The load balancer is unable to migrate any task from this group, and hence do not consider this group to be imbalanced. Signed-off-by: Nikhil Rao Signed-off-by: Peter Zijlstra LKML-Reference: <1286996978-7007-3-git-send-email-ncrao@google.com> [ small code readability edits ] Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index bf87192e97f..3656480e0f7 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2378,7 +2378,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, int local_group, const struct cpumask *cpus, int *balance, struct sg_lb_stats *sgs) { - unsigned long load, max_cpu_load, min_cpu_load; + unsigned long load, max_cpu_load, min_cpu_load, max_nr_running; int i; unsigned int balance_cpu = -1, first_idle_cpu = 0; unsigned long avg_load_per_task = 0; @@ -2389,6 +2389,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, /* Tally up the load of all CPUs in the group */ max_cpu_load = 0; min_cpu_load = ~0UL; + max_nr_running = 0; for_each_cpu_and(i, sched_group_cpus(group), cpus) { struct rq *rq = cpu_rq(i); @@ -2406,8 +2407,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, load = target_load(i, load_idx); } else { load = source_load(i, load_idx); - if (load > max_cpu_load) + if (load > max_cpu_load) { max_cpu_load = load; + max_nr_running = rq->nr_running; + } if (min_cpu_load > load) min_cpu_load = load; } @@ -2447,11 +2450,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, if (sgs->sum_nr_running) avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; - if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) + if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1) sgs->group_imb = 1; - sgs->group_capacity = - DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); + sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); if (!sgs->group_capacity) sgs->group_capacity = fix_small_capacity(sd, group); } -- cgit v1.2.3-70-g09d2 From fab476228ba37907ad75216d0fd9732ada9c119e Mon Sep 17 00:00:00 2001 From: Nikhil Rao Date: Fri, 15 Oct 2010 13:12:29 -0700 Subject: sched: Force balancing on newidle balance if local group has capacity This patch forces a load balance on a newly idle cpu when the local group has extra capacity and the busiest group does not have any. It improves system utilization when balancing tasks with a large weight differential. Under certain situations, such as a niced down task (i.e. nice = -15) in the presence of nr_cpus NICE0 tasks, the niced task lands on a sched group and kicks away other tasks because of its large weight. This leads to sub-optimal utilization of the machine. Even though the sched group has capacity, it does not pull tasks because sds.this_load >> sds.max_load, and f_b_g() returns NULL. With this patch, if the local group has extra capacity, we shortcut the checks in f_b_g() and try to pull a task over. A sched group has extra capacity if the group capacity is greater than the number of running tasks in that group. Thanks to Mike Galbraith for discussions leading to this patch and for the insight to reuse SD_NEWIDLE_BALANCE. Signed-off-by: Nikhil Rao Signed-off-by: Peter Zijlstra LKML-Reference: <1287173550-30365-4-git-send-email-ncrao@google.com> Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 3656480e0f7..032b548be0f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1764,6 +1764,10 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, set_task_cpu(p, this_cpu); activate_task(this_rq, p, 0); check_preempt_curr(this_rq, p, 0); + + /* re-arm NEWIDLE balancing when moving tasks */ + src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost; + this_rq->idle_stamp = 0; } /* @@ -2030,12 +2034,14 @@ struct sd_lb_stats { unsigned long this_load; unsigned long this_load_per_task; unsigned long this_nr_running; + unsigned long this_has_capacity; /* Statistics of the busiest group */ unsigned long max_load; unsigned long busiest_load_per_task; unsigned long busiest_nr_running; unsigned long busiest_group_capacity; + unsigned long busiest_has_capacity; int group_imb; /* Is there imbalance in this sd */ #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) @@ -2058,6 +2064,7 @@ struct sg_lb_stats { unsigned long sum_weighted_load; /* Weighted load of group's tasks */ unsigned long group_capacity; int group_imb; /* Is there an imbalance in the group ? */ + int group_has_capacity; /* Is there extra capacity in the group? */ }; /** @@ -2456,6 +2463,9 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); if (!sgs->group_capacity) sgs->group_capacity = fix_small_capacity(sd, group); + + if (sgs->group_capacity > sgs->sum_nr_running) + sgs->group_has_capacity = 1; } /** @@ -2554,12 +2564,14 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, sds->this = sg; sds->this_nr_running = sgs.sum_nr_running; sds->this_load_per_task = sgs.sum_weighted_load; + sds->this_has_capacity = sgs.group_has_capacity; } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { sds->max_load = sgs.avg_load; sds->busiest = sg; sds->busiest_nr_running = sgs.sum_nr_running; sds->busiest_group_capacity = sgs.group_capacity; sds->busiest_load_per_task = sgs.sum_weighted_load; + sds->busiest_has_capacity = sgs.group_has_capacity; sds->group_imb = sgs.group_imb; } @@ -2756,6 +2768,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, return fix_small_imbalance(sds, this_cpu, imbalance); } + /******* find_busiest_group() helpers end here *********************/ /** @@ -2807,6 +2820,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, * 4) This group is more busy than the avg busieness at this * sched_domain. * 5) The imbalance is within the specified limit. + * + * Note: when doing newidle balance, if the local group has excess + * capacity (i.e. nr_running < group_capacity) and the busiest group + * does not have any capacity, we force a load balance to pull tasks + * to the local group. In this case, we skip past checks 3, 4 and 5. */ if (!(*balance)) goto ret; @@ -2818,6 +2836,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, if (!sds.busiest || sds.busiest_nr_running == 0) goto out_balanced; + /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ + if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity && + !sds.busiest_has_capacity) + goto force_balance; + if (sds.this_load >= sds.max_load) goto out_balanced; @@ -2829,6 +2852,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) goto out_balanced; +force_balance: /* Looks like there is an imbalance. Compute it */ calculate_imbalance(&sds, this_cpu, imbalance); return sds.busiest; @@ -3162,10 +3186,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq) interval = msecs_to_jiffies(sd->balance_interval); if (time_after(next_balance, sd->last_balance + interval)) next_balance = sd->last_balance + interval; - if (pulled_task) { - this_rq->idle_stamp = 0; + if (pulled_task) break; - } } raw_spin_lock(&this_rq->lock); -- cgit v1.2.3-70-g09d2 From 75dd321d79d495a0ee579e6249ebc38ddbb2667f Mon Sep 17 00:00:00 2001 From: Nikhil Rao Date: Fri, 15 Oct 2010 13:12:30 -0700 Subject: sched: Drop group_capacity to 1 only if local group has extra capacity When SD_PREFER_SIBLING is set on a sched domain, drop group_capacity to 1 only if the local group has extra capacity. The extra check prevents the case where you always pull from the heaviest group when it is already under-utilized (possible with a large weight task outweighs the tasks on the system). For example, consider a 16-cpu quad-core quad-socket machine with MC and NUMA scheduling domains. Let's say we spawn 15 nice0 tasks and one nice-15 task, and each task is running on one core. In this case, we observe the following events when balancing at the NUMA domain: - find_busiest_group() will always pick the sched group containing the niced task to be the busiest group. - find_busiest_queue() will then always pick one of the cpus running the nice0 task (never picks the cpu with the nice -15 task since weighted_cpuload > imbalance). - The load balancer fails to migrate the task since it is the running task and increments sd->nr_balance_failed. - It repeats the above steps a few more times until sd->nr_balance_failed > 5, at which point it kicks off the active load balancer, wakes up the migration thread and kicks the nice 0 task off the cpu. The load balancer doesn't stop until we kick out all nice 0 tasks from the sched group, leaving you with 3 idle cpus and one cpu running the nice -15 task. When balancing at the NUMA domain, we drop sgs.group_capacity to 1 if the child domain (in this case MC) has SD_PREFER_SIBLING set. Subsequent load checks are not relevant because the niced task has a very large weight. In this patch, we add an extra condition to the "if(prefer_sibling)" check in update_sd_lb_stats(). We drop the capacity of a group only if the local group has extra capacity, ie. nr_running < group_capacity. This patch preserves the original intent of the prefer_siblings check (to spread tasks across the system in low utilization scenarios) and fixes the case above. It helps in the following ways: - In low utilization cases (where nr_tasks << nr_cpus), we still drop group_capacity down to 1 if we prefer siblings. - On very busy systems (where nr_tasks >> nr_cpus), sgs.nr_running will most likely be > sgs.group_capacity. - When balancing large weight tasks, if the local group does not have extra capacity, we do not pick the group with the niced task as the busiest group. This prevents failed balances, active migration and the under-utilization described above. Signed-off-by: Nikhil Rao Signed-off-by: Peter Zijlstra LKML-Reference: <1287173550-30365-5-git-send-email-ncrao@google.com> Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 032b548be0f..f1c615ff39d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2554,9 +2554,14 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, /* * In case the child domain prefers tasks go to siblings * first, lower the sg capacity to one so that we'll try - * and move all the excess tasks away. + * and move all the excess tasks away. We lower the capacity + * of a group only if the local group has the capacity to fit + * these excess tasks, i.e. nr_running < group_capacity. The + * extra check prevents the case where you always pull from the + * heaviest group when it is already under-utilized (possible + * with a large weight task outweighs the tasks on the system). */ - if (prefer_sibling) + if (prefer_sibling && !local_group && sds->this_has_capacity) sgs.group_capacity = min(sgs.group_capacity, 1UL); if (local_group) { -- cgit v1.2.3-70-g09d2 From 75e1056f5c57050415b64cb761a3acc35d91f013 Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 4 Oct 2010 17:03:16 -0700 Subject: sched: Fix softirq time accounting Peter Zijlstra found a bug in the way softirq time is accounted in VIRT_CPU_ACCOUNTING on this thread: http://lkml.indiana.edu/hypermail//linux/kernel/1009.2/01366.html The problem is, softirq processing uses local_bh_disable internally. There is no way, later in the flow, to differentiate between whether softirq is being processed or is it just that bh has been disabled. So, a hardirq when bh is disabled results in time being wrongly accounted as softirq. Looking at the code a bit more, the problem exists in !VIRT_CPU_ACCOUNTING as well. As account_system_time() in normal tick based accouting also uses softirq_count, which will be set even when not in softirq with bh disabled. Peter also suggested solution of using 2*SOFTIRQ_OFFSET as irq count for local_bh_{disable,enable} and using just SOFTIRQ_OFFSET while softirq processing. The patch below does that and adds API in_serving_softirq() which returns whether we are currently processing softirq or not. Also changes one of the usages of softirq_count in net/sched/cls_cgroup.c to in_serving_softirq. Looks like many usages of in_softirq really want in_serving_softirq. Those changes can be made individually on a case by case basis. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Peter Zijlstra LKML-Reference: <1286237003-12406-2-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar --- include/linux/hardirq.h | 5 +++++ include/linux/sched.h | 6 +++--- kernel/sched.c | 2 +- kernel/softirq.c | 51 ++++++++++++++++++++++++++++++++----------------- net/sched/cls_cgroup.c | 2 +- 5 files changed, 44 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index d5b387669da..e37a77cbd58 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -64,6 +64,8 @@ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #define NMI_OFFSET (1UL << NMI_SHIFT) +#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) + #ifndef PREEMPT_ACTIVE #define PREEMPT_ACTIVE_BITS 1 #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) @@ -82,10 +84,13 @@ /* * Are we doing bottom half or hardware interrupt processing? * Are we in a softirq context? Interrupt context? + * in_softirq - Are we currently processing softirq or have bh disabled? + * in_serving_softirq - Are we currently processing softirq? */ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) /* * Are we in NMI context? diff --git a/include/linux/sched.h b/include/linux/sched.h index cdf56693ecb..8744e50cb08 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2366,9 +2366,9 @@ extern int __cond_resched_lock(spinlock_t *lock); extern int __cond_resched_softirq(void); -#define cond_resched_softirq() ({ \ - __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ - __cond_resched_softirq(); \ +#define cond_resched_softirq() ({ \ + __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + __cond_resched_softirq(); \ }) /* diff --git a/kernel/sched.c b/kernel/sched.c index 771b518e5f1..089be8adb07 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3422,7 +3422,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, tmp = cputime_to_cputime64(cputime); if (hardirq_count() - hardirq_offset) cpustat->irq = cputime64_add(cpustat->irq, tmp); - else if (softirq_count()) + else if (in_serving_softirq()) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); else cpustat->system = cputime64_add(cpustat->system, tmp); diff --git a/kernel/softirq.c b/kernel/softirq.c index 07b4f1b1a73..988dfbe6bbe 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -76,12 +76,22 @@ void wakeup_softirqd(void) wake_up_process(tsk); } +/* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving + * softirq processing. + * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) + * on local_bh_disable or local_bh_enable. + * This lets us distinguish between whether we are currently processing + * softirq and whether we just have bh disabled. + */ + /* * This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: */ #ifdef CONFIG_TRACE_IRQFLAGS -static void __local_bh_disable(unsigned long ip) +static void __local_bh_disable(unsigned long ip, unsigned int cnt) { unsigned long flags; @@ -95,32 +105,43 @@ static void __local_bh_disable(unsigned long ip) * We must manually increment preempt_count here and manually * call the trace_preempt_off later. */ - preempt_count() += SOFTIRQ_OFFSET; + preempt_count() += cnt; /* * Were softirqs turned off above: */ - if (softirq_count() == SOFTIRQ_OFFSET) + if (softirq_count() == cnt) trace_softirqs_off(ip); raw_local_irq_restore(flags); - if (preempt_count() == SOFTIRQ_OFFSET) + if (preempt_count() == cnt) trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); } #else /* !CONFIG_TRACE_IRQFLAGS */ -static inline void __local_bh_disable(unsigned long ip) +static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) { - add_preempt_count(SOFTIRQ_OFFSET); + add_preempt_count(cnt); barrier(); } #endif /* CONFIG_TRACE_IRQFLAGS */ void local_bh_disable(void) { - __local_bh_disable((unsigned long)__builtin_return_address(0)); + __local_bh_disable((unsigned long)__builtin_return_address(0), + SOFTIRQ_DISABLE_OFFSET); } EXPORT_SYMBOL(local_bh_disable); +static void __local_bh_enable(unsigned int cnt) +{ + WARN_ON_ONCE(in_irq()); + WARN_ON_ONCE(!irqs_disabled()); + + if (softirq_count() == cnt) + trace_softirqs_on((unsigned long)__builtin_return_address(0)); + sub_preempt_count(cnt); +} + /* * Special-case - softirqs can safely be enabled in * cond_resched_softirq(), or by __do_softirq(), @@ -128,12 +149,7 @@ EXPORT_SYMBOL(local_bh_disable); */ void _local_bh_enable(void) { - WARN_ON_ONCE(in_irq()); - WARN_ON_ONCE(!irqs_disabled()); - - if (softirq_count() == SOFTIRQ_OFFSET) - trace_softirqs_on((unsigned long)__builtin_return_address(0)); - sub_preempt_count(SOFTIRQ_OFFSET); + __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); } EXPORT_SYMBOL(_local_bh_enable); @@ -147,13 +163,13 @@ static inline void _local_bh_enable_ip(unsigned long ip) /* * Are softirqs going to be turned on now: */ - if (softirq_count() == SOFTIRQ_OFFSET) + if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) trace_softirqs_on(ip); /* * Keep preemption disabled until we are done with * softirq processing: */ - sub_preempt_count(SOFTIRQ_OFFSET - 1); + sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) do_softirq(); @@ -198,7 +214,8 @@ asmlinkage void __do_softirq(void) pending = local_softirq_pending(); account_system_vtime(current); - __local_bh_disable((unsigned long)__builtin_return_address(0)); + __local_bh_disable((unsigned long)__builtin_return_address(0), + SOFTIRQ_OFFSET); lockdep_softirq_enter(); cpu = smp_processor_id(); @@ -245,7 +262,7 @@ restart: lockdep_softirq_exit(); account_system_vtime(current); - _local_bh_enable(); + __local_bh_enable(SOFTIRQ_OFFSET); } #ifndef __ARCH_HAS_DO_SOFTIRQ diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 78ef2c5e130..37dff78e9cb 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -123,7 +123,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, * calls by looking at the number of nested bh disable calls because * softirqs always disables bh. */ - if (softirq_count() != SOFTIRQ_OFFSET) { + if (in_serving_softirq()) { /* If there is an sk_classid we'll use that. */ if (!skb->sk) return -1; -- cgit v1.2.3-70-g09d2 From 6cdd5199daf0cb7b0fcc8dca941af08492612887 Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 4 Oct 2010 17:03:18 -0700 Subject: sched: Add a PF flag for ksoftirqd identification To account softirq time cleanly in scheduler, we need to identify whether softirq is invoked in ksoftirqd context or softirq at hardirq tail context. Add PF_KSOFTIRQD for that purpose. As all PF flag bits are currently taken, create space by moving one of the infrequently used bits (PF_THREAD_BOUND) down in task_struct to be along with some other state fields. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Peter Zijlstra LKML-Reference: <1286237003-12406-4-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 + kernel/softirq.c | 1 + 2 files changed, 2 insertions(+) (limited to 'kernel') diff --git a/include/linux/sched.h b/include/linux/sched.h index 8744e50cb08..aca0ce67593 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1682,6 +1682,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * /* * Per process flags */ +#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */ #define PF_STARTING 0x00000002 /* being created */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ diff --git a/kernel/softirq.c b/kernel/softirq.c index 988dfbe6bbe..267f7b763eb 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -713,6 +713,7 @@ static int run_ksoftirqd(void * __bind_cpu) { set_current_state(TASK_INTERRUPTIBLE); + current->flags |= PF_KSOFTIRQD; while (!kthread_should_stop()) { preempt_disable(); if (!local_softirq_pending()) { -- cgit v1.2.3-70-g09d2 From b52bfee445d315549d41eacf2fa7c156e7d153d5 Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 4 Oct 2010 17:03:19 -0700 Subject: sched: Add IRQ_TIME_ACCOUNTING, finer accounting of irq time s390/powerpc/ia64 have support for CONFIG_VIRT_CPU_ACCOUNTING which does the fine granularity accounting of user, system, hardirq, softirq times. Adding that option on archs like x86 will be challenging however, given the state of TSC reliability on various platforms and also the overhead it will add in syscall entry exit. Instead, add a lighter variant that only does finer accounting of hardirq and softirq times, providing precise irq times (instead of timer tick based samples). This accounting is added with a new config option CONFIG_IRQ_TIME_ACCOUNTING so that there won't be any overhead for users not interested in paying the perf penalty. This accounting is based on sched_clock, with the code being generic. So, other archs may find it useful as well. This patch just adds the core logic and does not enable this logic yet. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Peter Zijlstra LKML-Reference: <1286237003-12406-5-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar --- include/linux/hardirq.h | 2 +- include/linux/sched.h | 13 +++++++++++++ kernel/sched.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 41367c5c3c6..ff43e926844 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -137,7 +137,7 @@ extern void synchronize_irq(unsigned int irq); struct task_struct; -#ifndef CONFIG_VIRT_CPU_ACCOUNTING +#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) static inline void account_system_vtime(struct task_struct *tsk) { } diff --git a/include/linux/sched.h b/include/linux/sched.h index aca0ce67593..2cca9a92f5e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1826,6 +1826,19 @@ extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); #endif +#ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * An i/f to runtime opt-in for irq time accounting based off of sched_clock. + * The reason for this explicit opt-in is not to have perf penalty with + * slow sched_clocks. + */ +extern void enable_sched_clock_irqtime(void); +extern void disable_sched_clock_irqtime(void); +#else +static inline void enable_sched_clock_irqtime(void) {} +static inline void disable_sched_clock_irqtime(void) {} +#endif + extern unsigned long long task_sched_runtime(struct task_struct *task); extern unsigned long long thread_group_sched_runtime(struct task_struct *task); diff --git a/kernel/sched.c b/kernel/sched.c index 089be8adb07..9b302e35579 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1908,6 +1908,55 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) dec_nr_running(rq); } +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + +static DEFINE_PER_CPU(u64, cpu_hardirq_time); +static DEFINE_PER_CPU(u64, cpu_softirq_time); + +static DEFINE_PER_CPU(u64, irq_start_time); +static int sched_clock_irqtime; + +void enable_sched_clock_irqtime(void) +{ + sched_clock_irqtime = 1; +} + +void disable_sched_clock_irqtime(void) +{ + sched_clock_irqtime = 0; +} + +void account_system_vtime(struct task_struct *curr) +{ + unsigned long flags; + int cpu; + u64 now, delta; + + if (!sched_clock_irqtime) + return; + + local_irq_save(flags); + + now = sched_clock(); + cpu = smp_processor_id(); + delta = now - per_cpu(irq_start_time, cpu); + per_cpu(irq_start_time, cpu) = now; + /* + * We do not account for softirq time from ksoftirqd here. + * We want to continue accounting softirq time to ksoftirqd thread + * in that case, so as not to confuse scheduler with a special task + * that do not consume any time, but still wants to run. + */ + if (hardirq_count()) + per_cpu(cpu_hardirq_time, cpu) += delta; + else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD)) + per_cpu(cpu_softirq_time, cpu) += delta; + + local_irq_restore(flags); +} + +#endif + #include "sched_idletask.c" #include "sched_fair.c" #include "sched_rt.c" -- cgit v1.2.3-70-g09d2 From 305e6835e05513406fa12820e40e4a8ecb63743c Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 4 Oct 2010 17:03:21 -0700 Subject: sched: Do not account irq time to current task Scheduler accounts both softirq and interrupt processing times to the currently running task. This means, if the interrupt processing was for some other task in the system, then the current task ends up being penalized as it gets shorter runtime than otherwise. Change sched task accounting to acoount only actual task time from currently running task. Now update_curr(), modifies the delta_exec to depend on rq->clock_task. Note that this change only handles CONFIG_IRQ_TIME_ACCOUNTING case. We can extend this to CONFIG_VIRT_CPU_ACCOUNTING with minimal effort. But, thats for later. This change will impact scheduling behavior in interrupt heavy conditions. Tested on a 4-way system with eth0 handled by CPU 2 and a network heavy task (nc) running on CPU 3 (and no RSS/RFS). With that I have CPU 2 spending 75%+ of its time in irq processing. CPU 3 spending around 35% time running nc task. Now, if I run another CPU intensive task on CPU 2, without this change /proc//schedstat shows 100% of time accounted to this task. With this change, it rightly shows less than 25% accounted to this task as remaining time is actually spent on irq processing. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Peter Zijlstra LKML-Reference: <1286237003-12406-7-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 43 ++++++++++++++++++++++++++++++++++++++++--- kernel/sched_fair.c | 6 +++--- kernel/sched_rt.c | 8 ++++---- 3 files changed, 47 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 9b302e35579..9e01b7100ef 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -491,6 +491,7 @@ struct rq { struct mm_struct *prev_mm; u64 clock; + u64 clock_task; atomic_t nr_iowait; @@ -641,10 +642,19 @@ static inline struct task_group *task_group(struct task_struct *p) #endif /* CONFIG_CGROUP_SCHED */ +static u64 irq_time_cpu(int cpu); + inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) - rq->clock = sched_clock_cpu(cpu_of(rq)); + if (!rq->skip_clock_update) { + int cpu = cpu_of(rq); + u64 irq_time; + + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + } } /* @@ -1910,6 +1920,18 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) #ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * There are no locks covering percpu hardirq/softirq time. + * They are only modified in account_system_vtime, on corresponding CPU + * with interrupts disabled. So, writes are safe. + * They are read and saved off onto struct rq in update_rq_clock(). + * This may result in other CPU reading this CPU's irq time and can + * race with irq/account_system_vtime on this CPU. We would either get old + * or new value (or semi updated value on 32 bit) with a side effect of + * accounting a slice of irq time to wrong task when irq is in progress + * while we read rq->clock. That is a worthy compromise in place of having + * locks on each irq in account_system_time. + */ static DEFINE_PER_CPU(u64, cpu_hardirq_time); static DEFINE_PER_CPU(u64, cpu_softirq_time); @@ -1926,6 +1948,14 @@ void disable_sched_clock_irqtime(void) sched_clock_irqtime = 0; } +static u64 irq_time_cpu(int cpu) +{ + if (!sched_clock_irqtime) + return 0; + + return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); +} + void account_system_vtime(struct task_struct *curr) { unsigned long flags; @@ -1955,6 +1985,13 @@ void account_system_vtime(struct task_struct *curr) local_irq_restore(flags); } +#else + +static u64 irq_time_cpu(int cpu) +{ + return 0; +} + #endif #include "sched_idletask.c" @@ -3322,7 +3359,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) if (task_current(rq, p)) { update_rq_clock(rq); - ns = rq->clock - p->se.exec_start; + ns = rq->clock_task - p->se.exec_start; if ((s64)ns < 0) ns = 0; } diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f1c615ff39d..c358d4081b8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -519,7 +519,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, static void update_curr(struct cfs_rq *cfs_rq) { struct sched_entity *curr = cfs_rq->curr; - u64 now = rq_of(cfs_rq)->clock; + u64 now = rq_of(cfs_rq)->clock_task; unsigned long delta_exec; if (unlikely(!curr)) @@ -602,7 +602,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) /* * We are starting a new run period: */ - se->exec_start = rq_of(cfs_rq)->clock; + se->exec_start = rq_of(cfs_rq)->clock_task; } /************************************************** @@ -1802,7 +1802,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, * 2) too many balance attempts have failed. */ - tsk_cache_hot = task_hot(p, rq->clock, sd); + tsk_cache_hot = task_hot(p, rq->clock_task, sd); if (!tsk_cache_hot || sd->nr_balance_failed > sd->cache_nice_tries) { #ifdef CONFIG_SCHEDSTATS diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index ab77aa00b7b..bea7d79f7e9 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -609,7 +609,7 @@ static void update_curr_rt(struct rq *rq) if (!task_has_rt_policy(curr)) return; - delta_exec = rq->clock - curr->se.exec_start; + delta_exec = rq->clock_task - curr->se.exec_start; if (unlikely((s64)delta_exec < 0)) delta_exec = 0; @@ -618,7 +618,7 @@ static void update_curr_rt(struct rq *rq) curr->se.sum_exec_runtime += delta_exec; account_group_exec_runtime(curr, delta_exec); - curr->se.exec_start = rq->clock; + curr->se.exec_start = rq->clock_task; cpuacct_charge(curr, delta_exec); sched_rt_avg_update(rq, delta_exec); @@ -1075,7 +1075,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) } while (rt_rq); p = rt_task_of(rt_se); - p->se.exec_start = rq->clock; + p->se.exec_start = rq->clock_task; return p; } @@ -1713,7 +1713,7 @@ static void set_curr_task_rt(struct rq *rq) { struct task_struct *p = rq->curr; - p->se.exec_start = rq->clock; + p->se.exec_start = rq->clock_task; /* The running task is never eligible for pushing */ dequeue_pushable_task(rq, p); -- cgit v1.2.3-70-g09d2 From aa483808516ca5cacfa0e5849691f64fec25828e Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 4 Oct 2010 17:03:22 -0700 Subject: sched: Remove irq time from available CPU power The idea was suggested by Peter Zijlstra here: http://marc.info/?l=linux-kernel&m=127476934517534&w=2 irq time is technically not available to the tasks running on the CPU. This patch removes irq time from CPU power piggybacking on sched_rt_avg_update(). Tested this by keeping CPU X busy with a network intensive task having 75% oa a single CPU irq processing (hard+soft) on a 4-way system. And start seven cycle soakers on the system. Without this change, there will be two tasks on each CPU. With this change, there is a single task on irq busy CPU X and remaining 7 tasks are spread around among other 3 CPUs. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Peter Zijlstra LKML-Reference: <1286237003-12406-8-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 18 ++++++++++++++++++ kernel/sched_fair.c | 8 +++++++- kernel/sched_features.h | 5 +++++ 3 files changed, 30 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 9e01b7100ef..bff9ef537df 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -519,6 +519,10 @@ struct rq { u64 avg_idle; #endif +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + u64 prev_irq_time; +#endif + /* calc_load related fields */ unsigned long calc_load_update; long calc_load_active; @@ -643,6 +647,7 @@ static inline struct task_group *task_group(struct task_struct *p) #endif /* CONFIG_CGROUP_SCHED */ static u64 irq_time_cpu(int cpu); +static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { @@ -654,6 +659,8 @@ inline void update_rq_clock(struct rq *rq) irq_time = irq_time_cpu(cpu); if (rq->clock - irq_time > rq->clock_task) rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } } @@ -1985,6 +1992,15 @@ void account_system_vtime(struct task_struct *curr) local_irq_restore(flags); } +static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) +{ + if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) { + u64 delta_irq = curr_irq_time - rq->prev_irq_time; + rq->prev_irq_time = curr_irq_time; + sched_rt_avg_update(rq, delta_irq); + } +} + #else static u64 irq_time_cpu(int cpu) @@ -1992,6 +2008,8 @@ static u64 irq_time_cpu(int cpu) return 0; } +static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { } + #endif #include "sched_idletask.c" diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c358d4081b8..74cccfae87a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2275,7 +2275,13 @@ unsigned long scale_rt_power(int cpu) u64 total, available; total = sched_avg_period() + (rq->clock - rq->age_stamp); - available = total - rq->rt_avg; + + if (unlikely(total < rq->rt_avg)) { + /* Ensures that power won't end up being negative */ + available = 0; + } else { + available = total - rq->rt_avg; + } if (unlikely((s64)total < SCHED_LOAD_SCALE)) total = SCHED_LOAD_SCALE; diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 83c66e8ad3e..185f920ec1a 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -61,3 +61,8 @@ SCHED_FEAT(ASYM_EFF_LOAD, 1) * release the lock. Decreases scheduling overhead. */ SCHED_FEAT(OWNER_SPIN, 1) + +/* + * Decrement CPU power based on irq activity + */ +SCHED_FEAT(NONIRQ_POWER, 1) -- cgit v1.2.3-70-g09d2 From d267f87fb8179c6dba03d08b91952e81bc3723c7 Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 4 Oct 2010 17:03:23 -0700 Subject: sched: Call tick_check_idle before __irq_enter When CPU is idle and on first interrupt, irq_enter calls tick_check_idle() to notify interruption from idle. But, there is a problem if this call is done after __irq_enter, as all routines in __irq_enter may find stale time due to yet to be done tick_check_idle. Specifically, trace calls in __irq_enter when they use global clock and also account_system_vtime change in this patch as it wants to use sched_clock_cpu() to do proper irq timing. But, tick_check_idle was moved after __irq_enter intentionally to prevent problem of unneeded ksoftirqd wakeups by the commit ee5f80a: irq: call __irq_enter() before calling the tick_idle_check Impact: avoid spurious ksoftirqd wakeups Moving tick_check_idle() before __irq_enter and wrapping it with local_bh_enable/disable would solve both the problems. Fixed-by: Yong Zhang Signed-off-by: Venkatesh Pallipadi Signed-off-by: Peter Zijlstra LKML-Reference: <1286237003-12406-9-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 2 +- kernel/softirq.c | 12 +++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index bff9ef537df..567f5cb9808 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1974,8 +1974,8 @@ void account_system_vtime(struct task_struct *curr) local_irq_save(flags); - now = sched_clock(); cpu = smp_processor_id(); + now = sched_clock_cpu(cpu); delta = now - per_cpu(irq_start_time, cpu); per_cpu(irq_start_time, cpu) = now; /* diff --git a/kernel/softirq.c b/kernel/softirq.c index 267f7b763eb..79ee8f1fc0e 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -296,10 +296,16 @@ void irq_enter(void) rcu_irq_enter(); if (idle_cpu(cpu) && !in_interrupt()) { - __irq_enter(); + /* + * Prevent raise_softirq from needlessly waking up ksoftirqd + * here, as softirq will be serviced on return from interrupt. + */ + local_bh_disable(); tick_check_idle(cpu); - } else - __irq_enter(); + _local_bh_enable(); + } + + __irq_enter(); } #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED -- cgit v1.2.3-70-g09d2 From b7dadc38797584f6203386da1947ed5edf516646 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 18 Oct 2010 20:00:37 +0200 Subject: sched: Export account_system_vtime() KVM uses it for example: ERROR: "account_system_vtime" [arch/x86/kvm/kvm.ko] undefined! Cc: Venkatesh Pallipadi Cc: Peter Zijlstra LKML-Reference: <1286237003-12406-3-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 567f5cb9808..5998222f901 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1991,6 +1991,7 @@ void account_system_vtime(struct task_struct *curr) local_irq_restore(flags); } +EXPORT_SYMBOL_GPL(account_system_vtime); static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { -- cgit v1.2.3-70-g09d2 From 31ddd871fc3db73e2024cb3eb3ee5051edf5a80f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 19 Oct 2010 11:14:49 +0200 Subject: workqueue: Clarify that schedule_on_each_cpu is synchronous The documentation for schedule_on_each_cpu() states that it calls a function on each online CPU from keventd. This can easily be interpreted as an asyncronous call because the description does not mention that flush_work is called. Clarify that it is synchronous. tj: rephrased a bit Signed-off-by: Mel Gorman Reviewed-by: KOSAKI Motohiro Signed-off-by: Tejun Heo --- kernel/workqueue.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2c6871cbcbe..eb5c1972443 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2676,13 +2676,15 @@ int schedule_delayed_work_on(int cpu, EXPORT_SYMBOL(schedule_delayed_work_on); /** - * schedule_on_each_cpu - call a function on each online CPU from keventd + * schedule_on_each_cpu - execute a function synchronously on each online CPU * @func: the function to call * - * Returns zero on success. - * Returns -ve errno on failure. - * + * schedule_on_each_cpu() executes @func on each online CPU using the + * system workqueue and blocks until all CPUs have completed. * schedule_on_each_cpu() is very slow. + * + * RETURNS: + * 0 on success, -errno on failure. */ int schedule_on_each_cpu(work_func_t func) { -- cgit v1.2.3-70-g09d2 From daaae6b010ac0f60c9c35e481589966f9f1fcc22 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 19 Oct 2010 11:28:15 +0200 Subject: workqueue: remove in_workqueue_context() Commit a25909a4 (lockdep: Add an in_workqueue_context() lockdep-based test function) added in_workqueue_context() but there hasn't been any in-kernel user and the lockdep annotation in workqueue is scheduled to change. Remove the unused function. Signed-off-by: Tejun Heo Cc: Paul E. McKenney --- include/linux/workqueue.h | 4 ---- kernel/workqueue.c | 15 --------------- 2 files changed, 19 deletions(-) (limited to 'kernel') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 03bbe903e5c..070bb7a8893 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -430,8 +430,4 @@ extern bool freeze_workqueues_busy(void); extern void thaw_workqueues(void); #endif /* CONFIG_FREEZER */ -#ifdef CONFIG_LOCKDEP -int in_workqueue_context(struct workqueue_struct *wq); -#endif - #endif diff --git a/kernel/workqueue.c b/kernel/workqueue.c index eb5c1972443..30acdb74cc2 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -310,21 +310,6 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, (cpu) < WORK_CPU_NONE; \ (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) -#ifdef CONFIG_LOCKDEP -/** - * in_workqueue_context() - in context of specified workqueue? - * @wq: the workqueue of interest - * - * Checks lockdep state to see if the current task is executing from - * within a workqueue item. This function exists only if lockdep is - * enabled. - */ -int in_workqueue_context(struct workqueue_struct *wq) -{ - return lock_is_held(&wq->lockdep_map); -} -#endif - #ifdef CONFIG_DEBUG_OBJECTS_WORK static struct debug_obj_descr work_debug_descr; -- cgit v1.2.3-70-g09d2 From 0fc86c7bd924debd0bddee790ecc884604fdcc63 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 11 Sep 2010 20:11:08 +0200 Subject: rtmutex-tester: make it build without BKL The big kernel lock is going away, so make sure that if it is disabled by Kconfig, we do not try to validate it, which would result in compile errors. Signed-off-by: Arnd Bergmann Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Arjan van de Ven Cc: Andrew Morton --- kernel/rtmutex-tester.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c index a56f629b057..66cb89bc5ef 100644 --- a/kernel/rtmutex-tester.c +++ b/kernel/rtmutex-tester.c @@ -76,7 +76,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) } if (!lockwakeup && td->bkl == 4) { +#ifdef CONFIG_LOCK_KERNEL unlock_kernel(); +#endif td->bkl = 0; } return 0; @@ -133,14 +135,18 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) if (td->bkl) return 0; td->bkl = 1; +#ifdef CONFIG_LOCK_KERNEL lock_kernel(); +#endif td->bkl = 4; return 0; case RTTEST_UNLOCKBKL: if (td->bkl != 4) break; +#ifdef CONFIG_LOCK_KERNEL unlock_kernel(); +#endif td->bkl = 0; return 0; -- cgit v1.2.3-70-g09d2 From 01b284f9b6d51cc3f3bcf3b49f16d2601d3ca22d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 17 Sep 2010 20:39:22 +0200 Subject: blktrace: remove the big kernel lock According to Jens, this code does not need the BKL at all, it is sufficiently serialized by bd_mutex. Signed-off-by: Arnd Bergmann Cc: Jens Axboe Cc: Steven Rostedt --- kernel/trace/blktrace.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 959f8d6c8cc..5328e8779d4 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -639,7 +638,6 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) if (!q) return -ENXIO; - lock_kernel(); mutex_lock(&bdev->bd_mutex); switch (cmd) { @@ -667,7 +665,6 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) } mutex_unlock(&bdev->bd_mutex); - unlock_kernel(); return ret; } @@ -1652,10 +1649,9 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, struct block_device *bdev; ssize_t ret = -ENXIO; - lock_kernel(); bdev = bdget(part_devt(p)); if (bdev == NULL) - goto out_unlock_kernel; + goto out; q = blk_trace_get_queue(bdev); if (q == NULL) @@ -1683,8 +1679,7 @@ out_unlock_bdev: mutex_unlock(&bdev->bd_mutex); out_bdput: bdput(bdev); -out_unlock_kernel: - unlock_kernel(); +out: return ret; } @@ -1714,11 +1709,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, ret = -ENXIO; - lock_kernel(); p = dev_to_part(dev); bdev = bdget(part_devt(p)); if (bdev == NULL) - goto out_unlock_kernel; + goto out; q = blk_trace_get_queue(bdev); if (q == NULL) @@ -1753,8 +1747,6 @@ out_unlock_bdev: mutex_unlock(&bdev->bd_mutex); out_bdput: bdput(bdev); -out_unlock_kernel: - unlock_kernel(); out: return ret ? ret : count; } -- cgit v1.2.3-70-g09d2 From 7ada876a8703f23befbb20a7465a702ee39b1704 Mon Sep 17 00:00:00 2001 From: Darren Hart Date: Sun, 17 Oct 2010 08:35:04 -0700 Subject: futex: Fix errors in nested key ref-counting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit futex_wait() is leaking key references due to futex_wait_setup() acquiring an additional reference via the queue_lock() routine. The nested key ref-counting has been masking bugs and complicating code analysis. queue_lock() is only called with a previously ref-counted key, so remove the additional ref-counting from the queue_(un)lock() functions. Also futex_wait_requeue_pi() drops one key reference too many in unqueue_me_pi(). Remove the key reference handling from unqueue_me_pi(). This was paired with a queue_lock() in futex_lock_pi(), so the count remains unchanged. Document remaining nested key ref-counting sites. Signed-off-by: Darren Hart Reported-and-tested-by: Matthieu Fertré Reported-by: Louis Rilling Cc: Peter Zijlstra Cc: Eric Dumazet Cc: John Kacur Cc: Rusty Russell LKML-Reference: <4CBB17A8.70401@linux.intel.com> Signed-off-by: Thomas Gleixner Cc: stable@kernel.org --- kernel/futex.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 6a3a5fa1526..e328f574c97 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1363,7 +1363,6 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) { struct futex_hash_bucket *hb; - get_futex_key_refs(&q->key); hb = hash_futex(&q->key); q->lock_ptr = &hb->lock; @@ -1375,7 +1374,6 @@ static inline void queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) { spin_unlock(&hb->lock); - drop_futex_key_refs(&q->key); } /** @@ -1480,8 +1478,6 @@ static void unqueue_me_pi(struct futex_q *q) q->pi_state = NULL; spin_unlock(q->lock_ptr); - - drop_futex_key_refs(&q->key); } /* @@ -1812,7 +1808,10 @@ static int futex_wait(u32 __user *uaddr, int fshared, } retry: - /* Prepare to wait on uaddr. */ + /* + * Prepare to wait on uaddr. On success, holds hb lock and increments + * q.key refs. + */ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); if (ret) goto out; @@ -1822,24 +1821,23 @@ retry: /* If we were woken (and unqueued), we succeeded, whatever. */ ret = 0; + /* unqueue_me() drops q.key ref */ if (!unqueue_me(&q)) - goto out_put_key; + goto out; ret = -ETIMEDOUT; if (to && !to->task) - goto out_put_key; + goto out; /* * We expect signal_pending(current), but we might be the * victim of a spurious wakeup as well. */ - if (!signal_pending(current)) { - put_futex_key(fshared, &q.key); + if (!signal_pending(current)) goto retry; - } ret = -ERESTARTSYS; if (!abs_time) - goto out_put_key; + goto out; restart = ¤t_thread_info()->restart_block; restart->fn = futex_wait_restart; @@ -1856,8 +1854,6 @@ retry: ret = -ERESTART_RESTARTBLOCK; -out_put_key: - put_futex_key(fshared, &q.key); out: if (to) { hrtimer_cancel(&to->timer); @@ -2236,7 +2232,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, q.rt_waiter = &rt_waiter; q.requeue_pi_key = &key2; - /* Prepare to wait on uaddr. */ + /* + * Prepare to wait on uaddr. On success, increments q.key (key1) ref + * count. + */ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); if (ret) goto out_key2; @@ -2254,7 +2253,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, * In order for us to be here, we know our q.key == key2, and since * we took the hb->lock above, we also know that futex_requeue() has * completed and we no longer have to concern ourselves with a wakeup - * race with the atomic proxy lock acquition by the requeue code. + * race with the atomic proxy lock acquisition by the requeue code. The + * futex_requeue dropped our key1 reference and incremented our key2 + * reference count. */ /* Check if the requeue code acquired the second futex for us. */ -- cgit v1.2.3-70-g09d2 From 7e40798f406fe73f9bac496a390daabd8768a8f7 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 19 Oct 2010 10:56:19 -0400 Subject: tracing: Fix compile issue for trace_sched_wakeup.c The function start_func_tracer() was incorrectly added in the #ifdef CONFIG_FUNCTION_TRACER condition, but is still used even when function tracing is not enabled. The calls to register_ftrace_function() and register_ftrace_graph() become nops (and their arguments are even ignored), thus there is no reason to hide start_func_tracer() when function tracing is not enabled. Reported-by: Ingo Molnar Signed-off-by: Steven Rostedt --- kernel/trace/trace_sched_wakeup.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 31689d2df7f..7319559ed59 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -130,6 +130,7 @@ static struct ftrace_ops trace_ops __read_mostly = { .func = wakeup_tracer_call, }; +#endif /* CONFIG_FUNCTION_TRACER */ static int start_func_tracer(int graph) { @@ -159,8 +160,6 @@ static void stop_func_tracer(int graph) unregister_ftrace_graph(); } -#endif /* CONFIG_FUNCTION_TRACER */ - #ifdef CONFIG_FUNCTION_GRAPH_TRACER static int wakeup_set_flag(u32 old_flags, u32 bit, int set) { -- cgit v1.2.3-70-g09d2 From 747e94ae3d1b4c9bf5380e569f614eb9040b79e7 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 8 Oct 2010 13:51:48 -0400 Subject: ring-buffer: Make write slow path out of line Gcc inlines the slow path of the ring buffer write which can hurt performance. This patch simply forces the slow path function rb_move_tail() to always be a function. The ring_buffer_benchmark module with reader_disabled=1 shows that this patch changes the time to record an event from 135 ns to 132 ns. (3 ns or 2.22% improvement) Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bca96377fd4..0b88df849a5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1823,7 +1823,10 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, local_sub(length, &tail_page->write); } -static struct ring_buffer_event * +/* + * This is the slow path, force gcc not to inline it. + */ +static noinline struct ring_buffer_event * rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, unsigned long length, unsigned long tail, struct buffer_page *tail_page, u64 *ts) @@ -1943,7 +1946,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, tail = write - length; /* See if we shot pass the end of this buffer page */ - if (write > BUF_PAGE_SIZE) + if (unlikely(write > BUF_PAGE_SIZE)) return rb_move_tail(cpu_buffer, length, tail, tail_page, ts); -- cgit v1.2.3-70-g09d2 From e8bc43e84fada397af1b677b07dbf26e6ac78fcc Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Oct 2010 10:58:02 -0400 Subject: ring-buffer: Pass timestamp by value and not by reference The original code for the ring buffer had locations that modified the timestamp and that change was used by the callers. Now, the timestamp is not reused by the callers and there is no reason to pass it by reference. By changing the call to pass by value, lets gcc optimize the code a bit more where it can store the timestamp in a register and not worry about updating the reference. Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 0b88df849a5..c8ce6bde7fa 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1829,7 +1829,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, static noinline struct ring_buffer_event * rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, unsigned long length, unsigned long tail, - struct buffer_page *tail_page, u64 *ts) + struct buffer_page *tail_page, u64 ts) { struct buffer_page *commit_page = cpu_buffer->commit_page; struct ring_buffer *buffer = cpu_buffer->buffer; @@ -1912,8 +1912,8 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, * Nested commits always have zero deltas, so * just reread the time stamp */ - *ts = rb_time_stamp(buffer); - next_page->page->time_stamp = *ts; + ts = rb_time_stamp(buffer); + next_page->page->time_stamp = ts; } out_again: @@ -1932,7 +1932,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, static struct ring_buffer_event * __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, - unsigned type, unsigned long length, u64 *ts) + unsigned type, unsigned long length, u64 ts) { struct buffer_page *tail_page; struct ring_buffer_event *event; @@ -1965,7 +1965,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, * its timestamp. */ if (!tail) - tail_page->page->time_stamp = *ts; + tail_page->page->time_stamp = ts; return event; } @@ -2008,7 +2008,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, static int rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, - u64 *ts, u64 *delta) + u64 ts, u64 *delta) { struct ring_buffer_event *event; int ret; @@ -2016,7 +2016,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, WARN_ONCE(*delta > (1ULL << 59), KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", (unsigned long long)*delta, - (unsigned long long)*ts, + (unsigned long long)ts, (unsigned long long)cpu_buffer->write_stamp); /* @@ -2051,7 +2051,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, event->array[0] = 0; } } - cpu_buffer->write_stamp = *ts; + cpu_buffer->write_stamp = ts; /* let the caller know this was the commit */ ret = 1; } else { @@ -2175,7 +2175,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, delta = diff; if (unlikely(test_time_stamp(delta))) { - commit = rb_add_time_stamp(cpu_buffer, &ts, &delta); + commit = rb_add_time_stamp(cpu_buffer, ts, &delta); if (commit == -EBUSY) goto out_fail; @@ -2187,7 +2187,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, } get_event: - event = __rb_reserve_next(cpu_buffer, 0, length, &ts); + event = __rb_reserve_next(cpu_buffer, 0, length, ts); if (unlikely(PTR_ERR(event) == -EAGAIN)) goto again; -- cgit v1.2.3-70-g09d2 From f25106aeab7408394b9dd707e5ecf557e269c723 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Oct 2010 12:40:12 -0400 Subject: ring-buffer: Pass delta by value and not by reference The delta between events is passed to the timestamp code by reference and the timestamp code will reset the value. But it can be reset from the caller. No need to pass it in by reference. By changing the call to pass by value, lets gcc optimize the code a bit more where it can store the delta in a register and not worry about updating the reference. Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c8ce6bde7fa..3af77cd47f2 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2008,14 +2008,14 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, static int rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, - u64 ts, u64 *delta) + u64 ts, u64 delta) { struct ring_buffer_event *event; int ret; - WARN_ONCE(*delta > (1ULL << 59), + WARN_ONCE(delta > (1ULL << 59), KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", - (unsigned long long)*delta, + (unsigned long long)delta, (unsigned long long)ts, (unsigned long long)cpu_buffer->write_stamp); @@ -2041,8 +2041,8 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, * and if we can't just make it zero. */ if (rb_event_index(event)) { - event->time_delta = *delta & TS_MASK; - event->array[0] = *delta >> TS_SHIFT; + event->time_delta = delta & TS_MASK; + event->array[0] = delta >> TS_SHIFT; } else { /* try to discard, since we do not need this */ if (!rb_try_to_discard(cpu_buffer, event)) { @@ -2064,8 +2064,6 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, ret = 0; } - *delta = 0; - return ret; } @@ -2175,7 +2173,9 @@ rb_reserve_next_event(struct ring_buffer *buffer, delta = diff; if (unlikely(test_time_stamp(delta))) { - commit = rb_add_time_stamp(cpu_buffer, ts, &delta); + commit = rb_add_time_stamp(cpu_buffer, ts, delta); + delta = 0; + if (commit == -EBUSY) goto out_fail; -- cgit v1.2.3-70-g09d2 From 69d1b839f7eee347e357b3f6cce7f630cc6ff93d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 7 Oct 2010 18:18:05 -0400 Subject: ring-buffer: Bind time extend and data events together When the time between two timestamps is greater than 2^27 nanosecs (~134 ms) a time extend event is added that extends the time difference to 59 bits (~18 years). This is due to events only having a 27 bit field to store time. Currently this time extend is a separate event. We add it just before the event data that is being written to the buffer. But before the event data is committed, the event data can also be discarded (as with the case of filters). But because the time extend has already been committed, it will stay in the buffer. If lots of events are being filtered and no event is being written, then every 134ms a time extend can be added to the buffer without any data attached. To keep from filling the entire buffer with time extends, a time extend will never be the first event in a page because the page timestamp can be used. Time extends can only fill the rest of a page with some data at the beginning. This patch binds the time extend with the data. The difference here is that the time extend is not committed before the data is added. Instead, when a time extend is needed, the space reserved on the ring buffer is the time extend + the data event size. The time extend is added to the first part of the reserved block and the data is added to the second. The time extend event is passed back to the reserver, but since the reserver also uses a function to find the data portion of the reserved block, no changes to the ring buffer interface need to be made. When a commit is discarded, we now remove both the time extend and the event. With this approach no more than one time extend can be in the buffer in a row. Data must always follow a time extend. Thanks to Mathieu Desnoyers for suggesting this idea. Suggested-by: Mathieu Desnoyers Cc: Thomas Gleixner Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 266 ++++++++++++++++++++++++--------------------- 1 file changed, 142 insertions(+), 124 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 3af77cd47f2..f50f43107e9 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -224,6 +224,9 @@ enum { RB_LEN_TIME_STAMP = 16, }; +#define skip_time_extend(event) \ + ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) + static inline int rb_null_event(struct ring_buffer_event *event) { return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; @@ -248,8 +251,12 @@ rb_event_data_length(struct ring_buffer_event *event) return length + RB_EVNT_HDR_SIZE; } -/* inline for ring buffer fast paths */ -static unsigned +/* + * Return the length of the given event. Will return + * the length of the time extend if the event is a + * time extend. + */ +static inline unsigned rb_event_length(struct ring_buffer_event *event) { switch (event->type_len) { @@ -274,13 +281,41 @@ rb_event_length(struct ring_buffer_event *event) return 0; } +/* + * Return total length of time extend and data, + * or just the event length for all other events. + */ +static inline unsigned +rb_event_ts_length(struct ring_buffer_event *event) +{ + unsigned len = 0; + + if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { + /* time extends include the data event after it */ + len = RB_LEN_TIME_EXTEND; + event = skip_time_extend(event); + } + return len + rb_event_length(event); +} + /** * ring_buffer_event_length - return the length of the event * @event: the event to get the length of + * + * Returns the size of the data load of a data event. + * If the event is something other than a data event, it + * returns the size of the event itself. With the exception + * of a TIME EXTEND, where it still returns the size of the + * data load of the data event after it. */ unsigned ring_buffer_event_length(struct ring_buffer_event *event) { - unsigned length = rb_event_length(event); + unsigned length; + + if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) + event = skip_time_extend(event); + + length = rb_event_length(event); if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) return length; length -= RB_EVNT_HDR_SIZE; @@ -294,6 +329,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_length); static void * rb_event_data(struct ring_buffer_event *event) { + if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) + event = skip_time_extend(event); BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); /* If length is in len field, then array[0] has the data */ if (event->type_len) @@ -1546,6 +1583,25 @@ static void rb_inc_iter(struct ring_buffer_iter *iter) iter->head = 0; } +/* Slow path, do not inline */ +static noinline struct ring_buffer_event * +rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) +{ + event->type_len = RINGBUF_TYPE_TIME_EXTEND; + + /* Not the first event on the page? */ + if (rb_event_index(event)) { + event->time_delta = delta & TS_MASK; + event->array[0] = delta >> TS_SHIFT; + } else { + /* nope, just zero it */ + event->time_delta = 0; + event->array[0] = 0; + } + + return skip_time_extend(event); +} + /** * ring_buffer_update_event - update event type and data * @event: the even to update @@ -1558,28 +1614,31 @@ static void rb_inc_iter(struct ring_buffer_iter *iter) * data field. */ static void -rb_update_event(struct ring_buffer_event *event, - unsigned type, unsigned length) +rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, + struct ring_buffer_event *event, unsigned length, + int add_timestamp, u64 delta) { - event->type_len = type; - - switch (type) { - - case RINGBUF_TYPE_PADDING: - case RINGBUF_TYPE_TIME_EXTEND: - case RINGBUF_TYPE_TIME_STAMP: - break; + /* Only a commit updates the timestamp */ + if (unlikely(!rb_event_is_commit(cpu_buffer, event))) + delta = 0; - case 0: - length -= RB_EVNT_HDR_SIZE; - if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) - event->array[0] = length; - else - event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); - break; - default: - BUG(); + /* + * If we need to add a timestamp, then we + * add it to the start of the resevered space. + */ + if (unlikely(add_timestamp)) { + event = rb_add_time_stamp(event, delta); + length -= RB_LEN_TIME_EXTEND; + delta = 0; } + + event->time_delta = delta; + length -= RB_EVNT_HDR_SIZE; + if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { + event->type_len = 0; + event->array[0] = length; + } else + event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); } /* @@ -1932,12 +1991,21 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, static struct ring_buffer_event * __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, - unsigned type, unsigned long length, u64 ts) + unsigned long length, u64 ts, + u64 delta, int add_timestamp) { struct buffer_page *tail_page; struct ring_buffer_event *event; unsigned long tail, write; + /* + * If the time delta since the last event is too big to + * hold in the time field of the event, then we append a + * TIME EXTEND event ahead of the data event. + */ + if (unlikely(add_timestamp)) + length += RB_LEN_TIME_EXTEND; + tail_page = cpu_buffer->tail_page; write = local_add_return(length, &tail_page->write); @@ -1954,11 +2022,9 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, event = __rb_page_index(tail_page, tail); kmemcheck_annotate_bitfield(event, bitfield); - rb_update_event(event, type, length); + rb_update_event(cpu_buffer, event, length, add_timestamp, delta); - /* The passed in type is zero for DATA */ - if (likely(!type)) - local_inc(&tail_page->entries); + local_inc(&tail_page->entries); /* * If this is the first commit on the page, then update @@ -1980,7 +2046,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, unsigned long addr; new_index = rb_event_index(event); - old_index = new_index + rb_event_length(event); + old_index = new_index + rb_event_ts_length(event); addr = (unsigned long)event; addr &= PAGE_MASK; @@ -2006,67 +2072,6 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, return 0; } -static int -rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, - u64 ts, u64 delta) -{ - struct ring_buffer_event *event; - int ret; - - WARN_ONCE(delta > (1ULL << 59), - KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", - (unsigned long long)delta, - (unsigned long long)ts, - (unsigned long long)cpu_buffer->write_stamp); - - /* - * The delta is too big, we to add a - * new timestamp. - */ - event = __rb_reserve_next(cpu_buffer, - RINGBUF_TYPE_TIME_EXTEND, - RB_LEN_TIME_EXTEND, - ts); - if (!event) - return -EBUSY; - - if (PTR_ERR(event) == -EAGAIN) - return -EAGAIN; - - /* Only a commited time event can update the write stamp */ - if (rb_event_is_commit(cpu_buffer, event)) { - /* - * If this is the first on the page, then it was - * updated with the page itself. Try to discard it - * and if we can't just make it zero. - */ - if (rb_event_index(event)) { - event->time_delta = delta & TS_MASK; - event->array[0] = delta >> TS_SHIFT; - } else { - /* try to discard, since we do not need this */ - if (!rb_try_to_discard(cpu_buffer, event)) { - /* nope, just zero it */ - event->time_delta = 0; - event->array[0] = 0; - } - } - cpu_buffer->write_stamp = ts; - /* let the caller know this was the commit */ - ret = 1; - } else { - /* Try to discard the event */ - if (!rb_try_to_discard(cpu_buffer, event)) { - /* Darn, this is just wasted space */ - event->time_delta = 0; - event->array[0] = 0; - } - ret = 0; - } - - return ret; -} - static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) { local_inc(&cpu_buffer->committing); @@ -2111,9 +2116,9 @@ rb_reserve_next_event(struct ring_buffer *buffer, unsigned long length) { struct ring_buffer_event *event; - u64 ts, delta = 0; - int commit = 0; + u64 ts, delta; int nr_loops = 0; + int add_timestamp; rb_start_commit(cpu_buffer); @@ -2134,6 +2139,9 @@ rb_reserve_next_event(struct ring_buffer *buffer, length = rb_calculate_event_length(length); again: + add_timestamp = 0; + delta = 0; + /* * We allow for interrupts to reenter here and do a trace. * If one does, it will cause this original code to loop @@ -2172,33 +2180,24 @@ rb_reserve_next_event(struct ring_buffer *buffer, delta = diff; if (unlikely(test_time_stamp(delta))) { - - commit = rb_add_time_stamp(cpu_buffer, ts, delta); - delta = 0; - - if (commit == -EBUSY) - goto out_fail; - - if (commit == -EAGAIN) - goto again; - - RB_WARN_ON(cpu_buffer, commit < 0); + WARN_ONCE(delta > (1ULL << 59), + KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", + (unsigned long long)delta, + (unsigned long long)ts, + (unsigned long long)cpu_buffer->write_stamp); + add_timestamp = 1; } } get_event: - event = __rb_reserve_next(cpu_buffer, 0, length, ts); + event = __rb_reserve_next(cpu_buffer, length, ts, + delta, add_timestamp); if (unlikely(PTR_ERR(event) == -EAGAIN)) goto again; if (!event) goto out_fail; - if (!rb_event_is_commit(cpu_buffer, event)) - delta = 0; - - event->time_delta = delta; - return event; out_fail: @@ -2311,12 +2310,28 @@ static void rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) { + u64 delta; + /* * The event first in the commit queue updates the * time stamp. */ - if (rb_event_is_commit(cpu_buffer, event)) - cpu_buffer->write_stamp += event->time_delta; + if (rb_event_is_commit(cpu_buffer, event)) { + /* + * A commit event that is first on a page + * updates the write timestamp with the page stamp + */ + if (!rb_event_index(event)) + cpu_buffer->write_stamp = + cpu_buffer->commit_page->page->time_stamp; + else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { + delta = event->array[0]; + delta <<= TS_SHIFT; + delta += event->time_delta; + cpu_buffer->write_stamp += delta; + } else + cpu_buffer->write_stamp += event->time_delta; + } } static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, @@ -2356,6 +2371,9 @@ EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); static inline void rb_event_discard(struct ring_buffer_event *event) { + if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) + event = skip_time_extend(event); + /* array[0] holds the actual length for the discarded event */ event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; event->type_len = RINGBUF_TYPE_PADDING; @@ -3043,12 +3061,12 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, again: /* - * We repeat when a timestamp is encountered. It is possible - * to get multiple timestamps from an interrupt entering just - * as one timestamp is about to be written, or from discarded - * commits. The most that we can have is the number on a single page. + * We repeat when a time extend is encountered. + * Since the time extend is always attached to a data event, + * we should never loop more than once. + * (We never hit the following condition more than twice). */ - if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE)) + if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) return NULL; reader = rb_get_reader_page(cpu_buffer); @@ -3124,14 +3142,12 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) return NULL; /* - * We repeat when a timestamp is encountered. - * We can get multiple timestamps by nested interrupts or also - * if filtering is on (discarding commits). Since discarding - * commits can be frequent we can get a lot of timestamps. - * But we limit them by not adding timestamps if they begin - * at the start of a page. + * We repeat when a time extend is encountered. + * Since the time extend is always attached to a data event, + * we should never loop more than once. + * (We never hit the following condition more than twice). */ - if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE)) + if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) return NULL; if (rb_per_cpu_empty(cpu_buffer)) @@ -3829,7 +3845,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer, if (len > (commit - read)) len = (commit - read); - size = rb_event_length(event); + /* Always keep the time extend and data together */ + size = rb_event_ts_length(event); if (len < size) goto out_unlock; @@ -3851,7 +3868,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer, break; event = rb_reader_event(cpu_buffer); - size = rb_event_length(event); + /* Always keep the time extend and data together */ + size = rb_event_ts_length(event); } while (len > size); /* update bpage */ -- cgit v1.2.3-70-g09d2 From 140ff89127c74b1b1c1b0152a36ea3720ccf6bc3 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 8 Oct 2010 10:50:30 -0400 Subject: ring-buffer: Remove condition to add timestamp in fast path There's a condition to check if we should add a time extend or not in the fast path. But this condition is racey (in the sense that we can add a unnecessary time extend, but nothing that can break anything). We later check if the time or event time delta should be zero or have real data in it (not racey), making this first check redundant. This check may help save space once in a while, but really is not worth the hassle to try to save some space that happens at most 134 ms at a time. Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index f50f43107e9..d9f3e7a8213 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2119,6 +2119,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, u64 ts, delta; int nr_loops = 0; int add_timestamp; + u64 diff; rb_start_commit(cpu_buffer); @@ -2155,29 +2156,13 @@ rb_reserve_next_event(struct ring_buffer *buffer, goto out_fail; ts = rb_time_stamp(cpu_buffer->buffer); + diff = ts - cpu_buffer->write_stamp; - /* - * Only the first commit can update the timestamp. - * Yes there is a race here. If an interrupt comes in - * just after the conditional and it traces too, then it - * will also check the deltas. More than one timestamp may - * also be made. But only the entry that did the actual - * commit will be something other than zero. - */ - if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page && - rb_page_write(cpu_buffer->tail_page) == - rb_commit_index(cpu_buffer))) { - u64 diff; - - diff = ts - cpu_buffer->write_stamp; - - /* make sure this diff is calculated here */ - barrier(); - - /* Did the write stamp get updated already? */ - if (unlikely(ts < cpu_buffer->write_stamp)) - goto get_event; + /* make sure this diff is calculated here */ + barrier(); + /* Did the write stamp get updated already? */ + if (likely(ts >= cpu_buffer->write_stamp)) { delta = diff; if (unlikely(test_time_stamp(delta))) { WARN_ONCE(delta > (1ULL << 59), @@ -2189,7 +2174,6 @@ rb_reserve_next_event(struct ring_buffer *buffer, } } - get_event: event = __rb_reserve_next(cpu_buffer, length, ts, delta, add_timestamp); if (unlikely(PTR_ERR(event) == -EAGAIN)) -- cgit v1.2.3-70-g09d2 From d9abde2138e0a00a0d7e44676928efa0ef629d48 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 19 Oct 2010 13:17:08 -0400 Subject: ring-buffer: Micro-optimize with some strategic inlining By using inline and noinline, we are able to make the fast path of recording an event 4% faster. Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d9f3e7a8213..f5007d0d932 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2078,7 +2078,7 @@ static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) local_inc(&cpu_buffer->commits); } -static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) +static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) { unsigned long commits; @@ -2193,13 +2193,9 @@ rb_reserve_next_event(struct ring_buffer *buffer, #define TRACE_RECURSIVE_DEPTH 16 -static int trace_recursive_lock(void) +/* Keep this code out of the fast path cache */ +static noinline void trace_recursive_fail(void) { - current->trace_recursion++; - - if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) - return 0; - /* Disable all tracing before we do anything else */ tracing_off_permanent(); @@ -2211,10 +2207,21 @@ static int trace_recursive_lock(void) in_nmi()); WARN_ON_ONCE(1); +} + +static inline int trace_recursive_lock(void) +{ + current->trace_recursion++; + + if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) + return 0; + + trace_recursive_fail(); + return -1; } -static void trace_recursive_unlock(void) +static inline void trace_recursive_unlock(void) { WARN_ON_ONCE(!current->trace_recursion); -- cgit v1.2.3-70-g09d2 From b8b2663bd7c9da04ac804659b9f617c199d0252c Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 19 Oct 2010 13:23:25 -0400 Subject: ring-buffer: Remove unused macro RB_TIMESTAMPS_PER_PAGE With the binding of time extends to events we no longer need to use the macro RB_TIMESTAMPS_PER_PAGE. Remove it. Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index f5007d0d932..ad25490f8b4 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -441,9 +441,6 @@ static inline int test_time_stamp(u64 delta) /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) -/* Max number of timestamps that can fit on a page */ -#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND) - int ring_buffer_print_page_header(struct trace_seq *s) { struct buffer_data_page field; -- cgit v1.2.3-70-g09d2 From b0ae19811375031ae3b3fecc65b702a9c6e5cc28 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Fri, 15 Oct 2010 04:21:18 +0900 Subject: security: remove unused parameter from security_task_setscheduler() All security modules shouldn't change sched_param parameter of security_task_setscheduler(). This is not only meaningless, but also make a harmful result if caller pass a static variable. This patch remove policy and sched_param parameter from security_task_setscheduler() becuase none of security module is using it. Cc: James Morris Signed-off-by: KOSAKI Motohiro Signed-off-by: James Morris --- arch/mips/kernel/mips-mt-fpaff.c | 2 +- include/linux/security.h | 14 +++++--------- kernel/cpuset.c | 4 ++-- kernel/sched.c | 4 ++-- security/commoncap.c | 5 +---- security/security.c | 5 ++--- security/selinux/hooks.c | 4 ++-- security/smack/smack_lsm.c | 5 ++--- 8 files changed, 17 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 2340f11dc29..9a526ba6f25 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -103,7 +103,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) goto out_unlock; - retval = security_task_setscheduler(p, 0, NULL); + retval = security_task_setscheduler(p) if (retval) goto out_unlock; diff --git a/include/linux/security.h b/include/linux/security.h index a22219afff0..294a0b22812 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -74,7 +74,7 @@ extern int cap_file_mmap(struct file *file, unsigned long reqprot, extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); -extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp); +extern int cap_task_setscheduler(struct task_struct *p); extern int cap_task_setioprio(struct task_struct *p, int ioprio); extern int cap_task_setnice(struct task_struct *p, int nice); extern int cap_syslog(int type, bool from_file); @@ -1501,8 +1501,7 @@ struct security_operations { int (*task_getioprio) (struct task_struct *p); int (*task_setrlimit) (struct task_struct *p, unsigned int resource, struct rlimit *new_rlim); - int (*task_setscheduler) (struct task_struct *p, int policy, - struct sched_param *lp); + int (*task_setscheduler) (struct task_struct *p); int (*task_getscheduler) (struct task_struct *p); int (*task_movememory) (struct task_struct *p); int (*task_kill) (struct task_struct *p, @@ -1752,8 +1751,7 @@ int security_task_setioprio(struct task_struct *p, int ioprio); int security_task_getioprio(struct task_struct *p); int security_task_setrlimit(struct task_struct *p, unsigned int resource, struct rlimit *new_rlim); -int security_task_setscheduler(struct task_struct *p, - int policy, struct sched_param *lp); +int security_task_setscheduler(struct task_struct *p); int security_task_getscheduler(struct task_struct *p); int security_task_movememory(struct task_struct *p); int security_task_kill(struct task_struct *p, struct siginfo *info, @@ -2320,11 +2318,9 @@ static inline int security_task_setrlimit(struct task_struct *p, return 0; } -static inline int security_task_setscheduler(struct task_struct *p, - int policy, - struct sched_param *lp) +static inline int security_task_setscheduler(struct task_struct *p) { - return cap_task_setscheduler(p, policy, lp); + return cap_task_setscheduler(p); } static inline int security_task_getscheduler(struct task_struct *p) diff --git a/kernel/cpuset.c b/kernel/cpuset.c index b23c0979bbe..51b143e2a07 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1397,7 +1397,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, if (tsk->flags & PF_THREAD_BOUND) return -EINVAL; - ret = security_task_setscheduler(tsk, 0, NULL); + ret = security_task_setscheduler(tsk); if (ret) return ret; if (threadgroup) { @@ -1405,7 +1405,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, rcu_read_lock(); list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { - ret = security_task_setscheduler(c, 0, NULL); + ret = security_task_setscheduler(c); if (ret) { rcu_read_unlock(); return ret; diff --git a/kernel/sched.c b/kernel/sched.c index dc85ceb9083..df6579d9b4d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4645,7 +4645,7 @@ recheck: } if (user) { - retval = security_task_setscheduler(p, policy, param); + retval = security_task_setscheduler(p); if (retval) return retval; } @@ -4887,7 +4887,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) goto out_unlock; - retval = security_task_setscheduler(p, 0, NULL); + retval = security_task_setscheduler(p); if (retval) goto out_unlock; diff --git a/security/commoncap.c b/security/commoncap.c index 9d172e6e330..5e632b4857e 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -719,14 +719,11 @@ static int cap_safe_nice(struct task_struct *p) /** * cap_task_setscheduler - Detemine if scheduler policy change is permitted * @p: The task to affect - * @policy: The policy to effect - * @lp: The parameters to the scheduling policy * * Detemine if the requested scheduler policy change is permitted for the * specified task, returning 0 if permission is granted, -ve if denied. */ -int cap_task_setscheduler(struct task_struct *p, int policy, - struct sched_param *lp) +int cap_task_setscheduler(struct task_struct *p) { return cap_safe_nice(p); } diff --git a/security/security.c b/security/security.c index 43b6463ebbf..1cbcdfa4b01 100644 --- a/security/security.c +++ b/security/security.c @@ -778,10 +778,9 @@ int security_task_setrlimit(struct task_struct *p, unsigned int resource, return security_ops->task_setrlimit(p, resource, new_rlim); } -int security_task_setscheduler(struct task_struct *p, - int policy, struct sched_param *lp) +int security_task_setscheduler(struct task_struct *p) { - return security_ops->task_setscheduler(p, policy, lp); + return security_ops->task_setscheduler(p); } int security_task_getscheduler(struct task_struct *p) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 4796ddd4e72..db2b331de89 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3354,11 +3354,11 @@ static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource, return 0; } -static int selinux_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp) +static int selinux_task_setscheduler(struct task_struct *p) { int rc; - rc = cap_task_setscheduler(p, policy, lp); + rc = cap_task_setscheduler(p); if (rc) return rc; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index c448d57ae2b..174aec44bfa 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -1281,12 +1281,11 @@ static int smack_task_getioprio(struct task_struct *p) * * Return 0 if read access is permitted */ -static int smack_task_setscheduler(struct task_struct *p, int policy, - struct sched_param *lp) +static int smack_task_setscheduler(struct task_struct *p) { int rc; - rc = cap_task_setscheduler(p, policy, lp); + rc = cap_task_setscheduler(p); if (rc == 0) rc = smk_curacc_on_task(p, MAY_WRITE); return rc; -- cgit v1.2.3-70-g09d2 From dd49a38cf30944be27892c10b1c0e5b3fa73bcb2 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Oct 2010 21:51:26 -0400 Subject: tracing: Do not limit the size of the number of CPU buffers The tracing per_cpu buffers were limited to 999 CPUs for a mear savings in stack space of a char array. Up the array to 30 characters which is more than enough to hold a 64 bit number. Reported-by: Robin Holt Suggested-by: Ingo Molnar Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 001bcd2ccf4..82d9b8106cd 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3996,13 +3996,9 @@ static void tracing_init_debugfs_percpu(long cpu) { struct dentry *d_percpu = tracing_dentry_percpu(); struct dentry *d_cpu; - /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ - char cpu_dir[7]; + char cpu_dir[30]; /* 30 characters should be more than enough */ - if (cpu > 999 || cpu < 0) - return; - - sprintf(cpu_dir, "cpu%ld", cpu); + snprintf(cpu_dir, 30, "cpu%ld", cpu); d_cpu = debugfs_create_dir(cpu_dir, d_percpu); if (!d_cpu) { pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); -- cgit v1.2.3-70-g09d2 From f4bc6bb2d562703eafc895c37e7be20906de139d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 19 Oct 2010 15:00:13 +0200 Subject: tracing: Cleanup the convoluted softirq tracepoints With the addition of trace_softirq_raise() the softirq tracepoint got even more convoluted. Why the tracepoints take two pointers to assign an integer is beyond my comprehension. But adding an extra case which treats the first pointer as an unsigned long when the second pointer is NULL including the back and forth type casting is just horrible. Convert the softirq tracepoints to take a single unsigned int argument for the softirq vector number and fix the call sites. Signed-off-by: Thomas Gleixner LKML-Reference: Acked-by: Peter Zijlstra Acked-by: mathieu.desnoyers@efficios.com Cc: Frederic Weisbecker Cc: Steven Rostedt --- include/linux/interrupt.h | 2 +- include/trace/events/irq.h | 54 +++++++++++++++++----------------------------- kernel/softirq.c | 16 ++++++++------ 3 files changed, 30 insertions(+), 42 deletions(-) (limited to 'kernel') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 531495db170..0ac194946fe 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -410,7 +410,7 @@ extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); static inline void __raise_softirq_irqoff(unsigned int nr) { - trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL); + trace_softirq_raise(nr); or_softirq_pending(1UL << nr); } diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index 6fa7cbab7d9..1c09820df58 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h @@ -86,76 +86,62 @@ TRACE_EVENT(irq_handler_exit, DECLARE_EVENT_CLASS(softirq, - TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + TP_PROTO(unsigned int vec_nr), - TP_ARGS(h, vec), + TP_ARGS(vec_nr), TP_STRUCT__entry( - __field( int, vec ) + __field( unsigned int, vec ) ), TP_fast_assign( - if (vec) - __entry->vec = (int)(h - vec); - else - __entry->vec = (int)(long)h; + __entry->vec = vec_nr; ), - TP_printk("vec=%d [action=%s]", __entry->vec, + TP_printk("vec=%u [action=%s]", __entry->vec, show_softirq_name(__entry->vec)) ); /** * softirq_entry - called immediately before the softirq handler - * @h: pointer to struct softirq_action - * @vec: pointer to first struct softirq_action in softirq_vec array + * @vec_nr: softirq vector number * - * The @h parameter, contains a pointer to the struct softirq_action - * which has a pointer to the action handler that is called. By subtracting - * the @vec pointer from the @h pointer, we can determine the softirq - * number. Also, when used in combination with the softirq_exit tracepoint - * we can determine the softirq latency. + * When used in combination with the softirq_exit tracepoint + * we can determine the softirq handler runtine. */ DEFINE_EVENT(softirq, softirq_entry, - TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + TP_PROTO(unsigned int vec_nr), - TP_ARGS(h, vec) + TP_ARGS(vec_nr) ); /** * softirq_exit - called immediately after the softirq handler returns - * @h: pointer to struct softirq_action - * @vec: pointer to first struct softirq_action in softirq_vec array + * @vec_nr: softirq vector number * - * The @h parameter contains a pointer to the struct softirq_action - * that has handled the softirq. By subtracting the @vec pointer from - * the @h pointer, we can determine the softirq number. Also, when used in - * combination with the softirq_entry tracepoint we can determine the softirq - * latency. + * When used in combination with the softirq_entry tracepoint + * we can determine the softirq handler runtine. */ DEFINE_EVENT(softirq, softirq_exit, - TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + TP_PROTO(unsigned int vec_nr), - TP_ARGS(h, vec) + TP_ARGS(vec_nr) ); /** * softirq_raise - called immediately when a softirq is raised - * @h: pointer to struct softirq_action - * @vec: pointer to first struct softirq_action in softirq_vec array + * @vec_nr: softirq vector number * - * The @h parameter contains a pointer to the softirq vector number which is - * raised. @vec is NULL and it means @h includes vector number not - * softirq_action. When used in combination with the softirq_entry tracepoint - * we can determine the softirq raise latency. + * When used in combination with the softirq_entry tracepoint + * we can determine the softirq raise to run latency. */ DEFINE_EVENT(softirq, softirq_raise, - TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + TP_PROTO(unsigned int vec_nr), - TP_ARGS(h, vec) + TP_ARGS(vec_nr) ); #endif /* _TRACE_IRQ_H */ diff --git a/kernel/softirq.c b/kernel/softirq.c index 07b4f1b1a73..b3cb1dc1579 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -212,18 +212,20 @@ restart: do { if (pending & 1) { + unsigned int vec_nr = h - softirq_vec; int prev_count = preempt_count(); - kstat_incr_softirqs_this_cpu(h - softirq_vec); - trace_softirq_entry(h, softirq_vec); + kstat_incr_softirqs_this_cpu(vec_nr); + + trace_softirq_entry(vec_nr); h->action(h); - trace_softirq_exit(h, softirq_vec); + trace_softirq_exit(vec_nr); if (unlikely(prev_count != preempt_count())) { - printk(KERN_ERR "huh, entered softirq %td %s %p" + printk(KERN_ERR "huh, entered softirq %u %s %p" "with preempt_count %08x," - " exited with %08x?\n", h - softirq_vec, - softirq_to_name[h - softirq_vec], - h->action, prev_count, preempt_count()); + " exited with %08x?\n", vec_nr, + softirq_to_name[vec_nr], h->action, + prev_count, preempt_count()); preempt_count() = prev_count; } -- cgit v1.2.3-70-g09d2 From b2b5ce022acf5e9f52f7b78c5579994fdde191d4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 Oct 2010 15:24:15 +0200 Subject: sched, cgroup: Fixup broken cgroup movement Dima noticed that we fail to correct the ->vruntime of sleeping tasks when we move them between cgroups. Reported-by: Dima Zavin Signed-off-by: Peter Zijlstra Tested-by: Mike Galbraith LKML-Reference: <1287150604.29097.1513.camel@twins> Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 +- kernel/sched.c | 8 ++++---- kernel/sched_fair.c | 25 +++++++++++++++++++------ 3 files changed, 24 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched.h b/include/linux/sched.h index 2cca9a92f5e..be312c12978 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1073,7 +1073,7 @@ struct sched_class { struct task_struct *task); #ifdef CONFIG_FAIR_GROUP_SCHED - void (*moved_group) (struct task_struct *p, int on_rq); + void (*task_move_group) (struct task_struct *p, int on_rq); #endif }; diff --git a/kernel/sched.c b/kernel/sched.c index 5998222f901..3fe253e6a6e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8498,12 +8498,12 @@ void sched_move_task(struct task_struct *tsk) if (unlikely(running)) tsk->sched_class->put_prev_task(rq, tsk); - set_task_rq(tsk, task_cpu(tsk)); - #ifdef CONFIG_FAIR_GROUP_SCHED - if (tsk->sched_class->moved_group) - tsk->sched_class->moved_group(tsk, on_rq); + if (tsk->sched_class->task_move_group) + tsk->sched_class->task_move_group(tsk, on_rq); + else #endif + set_task_rq(tsk, task_cpu(tsk)); if (unlikely(running)) tsk->sched_class->set_curr_task(rq); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 74cccfae87a..3acc2a487c1 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -3866,13 +3866,26 @@ static void set_curr_task_fair(struct rq *rq) } #ifdef CONFIG_FAIR_GROUP_SCHED -static void moved_group_fair(struct task_struct *p, int on_rq) +static void task_move_group_fair(struct task_struct *p, int on_rq) { - struct cfs_rq *cfs_rq = task_cfs_rq(p); - - update_curr(cfs_rq); + /* + * If the task was not on the rq at the time of this cgroup movement + * it must have been asleep, sleeping tasks keep their ->vruntime + * absolute on their old rq until wakeup (needed for the fair sleeper + * bonus in place_entity()). + * + * If it was on the rq, we've just 'preempted' it, which does convert + * ->vruntime to a relative base. + * + * Make sure both cases convert their relative position when migrating + * to another cgroup's rq. This does somewhat interfere with the + * fair sleeper stuff for the first placement, but who cares. + */ + if (!on_rq) + p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime; + set_task_rq(p, task_cpu(p)); if (!on_rq) - place_entity(cfs_rq, &p->se, 1); + p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime; } #endif @@ -3924,7 +3937,7 @@ static const struct sched_class fair_sched_class = { .get_rr_interval = get_rr_interval_fair, #ifdef CONFIG_FAIR_GROUP_SCHED - .moved_group = moved_group_fair, + .task_move_group = task_move_group_fair, #endif }; -- cgit v1.2.3-70-g09d2 From 9ffcfa6f1f63eeac15555b745c292eb9f59130f6 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 20 Oct 2010 15:25:01 +0200 Subject: perf_events: Revert: Fix transaction recovery in group_sched_in() This patch reverts commit 8e5fc1a (perf_events: Fix transaction recovery in group_sched_in()) because it had one flaw in case the group could never be scheduled. It would cause time_enabled to get negative. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <4cbeeeb7.0aefd80a.6e40.0e2f@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 76 +++++++++-------------------------------------------- 1 file changed, 13 insertions(+), 63 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index f309e8014c7..39afdb07d75 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -417,8 +417,8 @@ event_filter_match(struct perf_event *event) return event->cpu == -1 || event->cpu == smp_processor_id(); } -static int -__event_sched_out(struct perf_event *event, +static void +event_sched_out(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { @@ -437,13 +437,14 @@ __event_sched_out(struct perf_event *event, } if (event->state != PERF_EVENT_STATE_ACTIVE) - return 0; + return; event->state = PERF_EVENT_STATE_INACTIVE; if (event->pending_disable) { event->pending_disable = 0; event->state = PERF_EVENT_STATE_OFF; } + event->tstamp_stopped = ctx->time; event->pmu->del(event, 0); event->oncpu = -1; @@ -452,19 +453,6 @@ __event_sched_out(struct perf_event *event, ctx->nr_active--; if (event->attr.exclusive || !cpuctx->active_oncpu) cpuctx->exclusive = 0; - return 1; -} - -static void -event_sched_out(struct perf_event *event, - struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx) -{ - int ret; - - ret = __event_sched_out(event, cpuctx, ctx); - if (ret) - event->tstamp_stopped = ctx->time; } static void @@ -664,7 +652,7 @@ retry: } static int -__event_sched_in(struct perf_event *event, +event_sched_in(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { @@ -684,6 +672,8 @@ __event_sched_in(struct perf_event *event, return -EAGAIN; } + event->tstamp_running += ctx->time - event->tstamp_stopped; + if (!is_software_event(event)) cpuctx->active_oncpu++; ctx->nr_active++; @@ -694,35 +684,6 @@ __event_sched_in(struct perf_event *event, return 0; } -static inline int -event_sched_in(struct perf_event *event, - struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx) -{ - int ret = __event_sched_in(event, cpuctx, ctx); - if (ret) - return ret; - event->tstamp_running += ctx->time - event->tstamp_stopped; - return 0; -} - -static void -group_commit_event_sched_in(struct perf_event *group_event, - struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx) -{ - struct perf_event *event; - u64 now = ctx->time; - - group_event->tstamp_running += now - group_event->tstamp_stopped; - /* - * Schedule in siblings as one group (if any): - */ - list_for_each_entry(event, &group_event->sibling_list, group_entry) { - event->tstamp_running += now - event->tstamp_stopped; - } -} - static int group_sched_in(struct perf_event *group_event, struct perf_cpu_context *cpuctx, @@ -736,13 +697,7 @@ group_sched_in(struct perf_event *group_event, pmu->start_txn(pmu); - /* - * use __event_sched_in() to delay updating tstamp_running - * until the transaction is committed. In case of failure - * we will keep an unmodified tstamp_running which is a - * requirement to get correct timing information - */ - if (__event_sched_in(group_event, cpuctx, ctx)) { + if (event_sched_in(group_event, cpuctx, ctx)) { pmu->cancel_txn(pmu); return -EAGAIN; } @@ -751,31 +706,26 @@ group_sched_in(struct perf_event *group_event, * Schedule in siblings as one group (if any): */ list_for_each_entry(event, &group_event->sibling_list, group_entry) { - if (__event_sched_in(event, cpuctx, ctx)) { + if (event_sched_in(event, cpuctx, ctx)) { partial_group = event; goto group_error; } } - if (!pmu->commit_txn(pmu)) { - /* commit tstamp_running */ - group_commit_event_sched_in(group_event, cpuctx, ctx); + if (!pmu->commit_txn(pmu)) return 0; - } + group_error: /* * Groups can be scheduled in as one unit only, so undo any * partial group before returning: - * - * use __event_sched_out() to avoid updating tstamp_stopped - * because the event never actually ran */ list_for_each_entry(event, &group_event->sibling_list, group_entry) { if (event == partial_group) break; - __event_sched_out(event, cpuctx, ctx); + event_sched_out(event, cpuctx, ctx); } - __event_sched_out(group_event, cpuctx, ctx); + event_sched_out(group_event, cpuctx, ctx); pmu->cancel_txn(pmu); -- cgit v1.2.3-70-g09d2 From d7842da470f244d258f21c5f72cd8388b3541d04 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 20 Oct 2010 15:25:01 +0200 Subject: perf_events: Fix for transaction recovery in group_sched_in() This new version (see commit 8e5fc1a) is much simpler and ensures that in case of error in group_sched_in() during event_sched_in(), the events up to the failed event go through regular event_sched_out(). But the failed event and the remaining events in the group have their timings adjusted as if they had also gone through event_sched_in() and event_sched_out(). This ensures timing uniformity across all events in a group. This also takes care of the tstamp_stopped problem in case the group could never be scheduled. The tstamp_stopped is updated as if the event had actually run. With this patch, the following now reports correct time_enabled, in case the NMI watchdog is active: $ task -e unhalted_core_cycles,instructions_retired,baclears,baclears noploop 1 noploop for 1 seconds 0 unhalted_core_cycles (100.00% scaling, ena=997,552,872, run=0) 0 instructions_retired (100.00% scaling, ena=997,552,872, run=0) 0 baclears (100.00% scaling, ena=997,552,872, run=0) 0 baclears (100.00% scaling, ena=997,552,872, run=0) And the older test case also works: $ task -einstructions_retired,baclears,baclears -e unhalted_core_cycles,baclears,baclears sleep 5 1680885 instructions_retired (69.39% scaling, ena=950756, run=291006) 10735 baclears (69.39% scaling, ena=950756, run=291006) 10735 baclears (69.39% scaling, ena=950756, run=291006) 0 unhalted_core_cycles (100.00% scaling, ena=817932, run=0) 0 baclears (100.00% scaling, ena=817932, run=0) 0 baclears (100.00% scaling, ena=817932, run=0) Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <4cbeeebc.8ee7d80a.5a28.0d5f@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 39afdb07d75..517d827f498 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -691,6 +691,8 @@ group_sched_in(struct perf_event *group_event, { struct perf_event *event, *partial_group = NULL; struct pmu *pmu = group_event->pmu; + u64 now = ctx->time; + bool simulate = false; if (group_event->state == PERF_EVENT_STATE_OFF) return 0; @@ -719,11 +721,27 @@ group_error: /* * Groups can be scheduled in as one unit only, so undo any * partial group before returning: + * The events up to the failed event are scheduled out normally, + * tstamp_stopped will be updated. + * + * The failed events and the remaining siblings need to have + * their timings updated as if they had gone thru event_sched_in() + * and event_sched_out(). This is required to get consistent timings + * across the group. This also takes care of the case where the group + * could never be scheduled by ensuring tstamp_stopped is set to mark + * the time the event was actually stopped, such that time delta + * calculation in update_event_times() is correct. */ list_for_each_entry(event, &group_event->sibling_list, group_entry) { if (event == partial_group) - break; - event_sched_out(event, cpuctx, ctx); + simulate = true; + + if (simulate) { + event->tstamp_running += now - event->tstamp_stopped; + event->tstamp_stopped = now; + } else { + event_sched_out(event, cpuctx, ctx); + } } event_sched_out(group_event, cpuctx, ctx); -- cgit v1.2.3-70-g09d2 From 2656c36699677238edc9ec1fea79039f1fddbcb6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 22 Oct 2010 14:47:57 +0200 Subject: genirq: Warn if enable_irq is called before irq is set up The recent changes in the genirq core unearthed a bug in arch/um which called enable_irq() before the interrupt was set up. Warn and return instead of crashing the machine with a NULL pointer dereference. Signed-off-by: Thomas Gleixner Cc: Richard Weinberger --- kernel/irq/manage.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 644e8d5fa36..5f92acc5f95 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -324,6 +324,10 @@ void enable_irq(unsigned int irq) if (!desc) return; + if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable, + KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) + return; + chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, flags); __enable_irq(desc, irq, false); -- cgit v1.2.3-70-g09d2 From 16cdc628c3aed47d02205135b7e2f01e0064f566 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Fri, 6 Aug 2010 11:47:14 -0500 Subject: debug_core: move all watch dog syncs to a single function Move the various clock and watch dog syncs to a single function in advance of adding another sync for the rcu stall detector. Signed-off-by: Jason Wessel --- kernel/debug/debug_core.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index de407c78178..c812857d0b8 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -470,6 +470,12 @@ static void dbg_cpu_switch(int cpu, int next_cpu) kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER; } +static void dbg_touch_watchdogs(void) +{ + touch_softlockup_watchdog_sync(); + clocksource_touch_watchdog(); +} + static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) { unsigned long flags; @@ -523,8 +529,7 @@ return_normal: if (trace_on) tracing_on(); atomic_dec(&cpu_in_kgdb[cpu]); - touch_softlockup_watchdog_sync(); - clocksource_touch_watchdog(); + dbg_touch_watchdogs(); local_irq_restore(flags); return 0; } @@ -541,8 +546,7 @@ return_normal: (kgdb_info[cpu].task && kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { atomic_set(&kgdb_active, -1); - touch_softlockup_watchdog_sync(); - clocksource_touch_watchdog(); + dbg_touch_watchdogs(); local_irq_restore(flags); goto acquirelock; @@ -659,8 +663,7 @@ kgdb_restore: tracing_on(); /* Free kgdb_active */ atomic_set(&kgdb_active, -1); - touch_softlockup_watchdog_sync(); - clocksource_touch_watchdog(); + dbg_touch_watchdogs(); local_irq_restore(flags); return kgdb_info[cpu].ret_state; -- cgit v1.2.3-70-g09d2 From fb70b5888b70b0b50f738fbfc019445493112eb1 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Fri, 13 Aug 2010 12:44:04 -0500 Subject: debug_core: stop rcu warnings on kernel resume When returning from the kernel debugger reset the rcu jiffies_stall value to prevent the rcu stall detector from sending NMI events which invoke a stack dump for each cpu in the system. Signed-off-by: Jason Wessel --- kernel/debug/debug_core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index c812857d0b8..5a3b04d2049 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -474,6 +475,7 @@ static void dbg_touch_watchdogs(void) { touch_softlockup_watchdog_sync(); clocksource_touch_watchdog(); + rcu_cpu_stall_reset(); } static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) -- cgit v1.2.3-70-g09d2 From f7030bbc446430ecd12c9ad02cf0ea94934e5f91 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Mon, 11 Oct 2010 10:20:14 -0500 Subject: kdb: Allow kernel loadable modules to add kdb shell functions In order to allow kernel modules to dynamically add a command to the kdb shell the kdb_register, kdb_register_repeat, kdb_unregister, and kdb_printf need to be exported as GPL symbols. Any kernel module that adds a dynamic kdb shell function should only need to include linux/kdb.h. Signed-off-by: Jason Wessel --- include/linux/kdb.h | 43 ++++++++++++++++++++++++++++++++++++++++++ kernel/debug/kdb/kdb_io.c | 2 +- kernel/debug/kdb/kdb_main.c | 4 ++++ kernel/debug/kdb/kdb_private.h | 39 -------------------------------------- 4 files changed, 48 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/include/linux/kdb.h b/include/linux/kdb.h index ea6e5244ed3..deda197ced6 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -28,6 +28,41 @@ extern int kdb_poll_idx; extern int kdb_initial_cpu; extern atomic_t kdb_event; +/* Types and messages used for dynamically added kdb shell commands */ + +#define KDB_MAXARGS 16 /* Maximum number of arguments to a function */ + +typedef enum { + KDB_REPEAT_NONE = 0, /* Do not repeat this command */ + KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ + KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ +} kdb_repeat_t; + +typedef int (*kdb_func_t)(int, const char **); + +/* KDB return codes from a command or internal kdb function */ +#define KDB_NOTFOUND (-1) +#define KDB_ARGCOUNT (-2) +#define KDB_BADWIDTH (-3) +#define KDB_BADRADIX (-4) +#define KDB_NOTENV (-5) +#define KDB_NOENVVALUE (-6) +#define KDB_NOTIMP (-7) +#define KDB_ENVFULL (-8) +#define KDB_ENVBUFFULL (-9) +#define KDB_TOOMANYBPT (-10) +#define KDB_TOOMANYDBREGS (-11) +#define KDB_DUPBPT (-12) +#define KDB_BPTNOTFOUND (-13) +#define KDB_BADMODE (-14) +#define KDB_BADINT (-15) +#define KDB_INVADDRFMT (-16) +#define KDB_BADREG (-17) +#define KDB_BADCPUNUM (-18) +#define KDB_BADLENGTH (-19) +#define KDB_NOBP (-20) +#define KDB_BADADDR (-21) + /* * kdb_diemsg * @@ -105,9 +140,17 @@ int kdb_process_cpu(const struct task_struct *p) /* kdb access to register set for stack dumping */ extern struct pt_regs *kdb_current_regs; +/* Dynamic kdb shell command registration */ +extern int kdb_register(char *, kdb_func_t, char *, char *, short); +extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, + short, kdb_repeat_t); +extern int kdb_unregister(char *); #else /* ! CONFIG_KGDB_KDB */ #define kdb_printf(...) #define kdb_init(x) +#define kdb_register(...) +#define kdb_register_repeat(...) +#define kdb_uregister(x) #endif /* CONFIG_KGDB_KDB */ enum { KDB_NOT_INITIALIZED, diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index c9b7f4f90bb..96fdaac46a8 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -823,4 +823,4 @@ int kdb_printf(const char *fmt, ...) return r; } - +EXPORT_SYMBOL_GPL(kdb_printf); diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index caf057a3de0..5448990a299 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2783,6 +2783,8 @@ int kdb_register_repeat(char *cmd, return 0; } +EXPORT_SYMBOL_GPL(kdb_register_repeat); + /* * kdb_register - Compatibility register function for commands that do @@ -2805,6 +2807,7 @@ int kdb_register(char *cmd, return kdb_register_repeat(cmd, func, usage, help, minlen, KDB_REPEAT_NONE); } +EXPORT_SYMBOL_GPL(kdb_register); /* * kdb_unregister - This function is used to unregister a kernel @@ -2833,6 +2836,7 @@ int kdb_unregister(char *cmd) /* Couldn't find it. */ return 1; } +EXPORT_SYMBOL_GPL(kdb_unregister); /* Initialize the kdb command table. */ static void __init kdb_inittab(void) diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index be775f7e81e..1921e6e4c0b 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -15,29 +15,6 @@ #include #include "../debug_core.h" -/* Kernel Debugger Error codes. Must not overlap with command codes. */ -#define KDB_NOTFOUND (-1) -#define KDB_ARGCOUNT (-2) -#define KDB_BADWIDTH (-3) -#define KDB_BADRADIX (-4) -#define KDB_NOTENV (-5) -#define KDB_NOENVVALUE (-6) -#define KDB_NOTIMP (-7) -#define KDB_ENVFULL (-8) -#define KDB_ENVBUFFULL (-9) -#define KDB_TOOMANYBPT (-10) -#define KDB_TOOMANYDBREGS (-11) -#define KDB_DUPBPT (-12) -#define KDB_BPTNOTFOUND (-13) -#define KDB_BADMODE (-14) -#define KDB_BADINT (-15) -#define KDB_INVADDRFMT (-16) -#define KDB_BADREG (-17) -#define KDB_BADCPUNUM (-18) -#define KDB_BADLENGTH (-19) -#define KDB_NOBP (-20) -#define KDB_BADADDR (-21) - /* Kernel Debugger Command codes. Must not overlap with error codes. */ #define KDB_CMD_GO (-1001) #define KDB_CMD_CPU (-1002) @@ -93,17 +70,6 @@ */ #define KDB_MAXBPT 16 -/* Maximum number of arguments to a function */ -#define KDB_MAXARGS 16 - -typedef enum { - KDB_REPEAT_NONE = 0, /* Do not repeat this command */ - KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ - KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ -} kdb_repeat_t; - -typedef int (*kdb_func_t)(int, const char **); - /* Symbol table format returned by kallsyms. */ typedef struct __ksymtab { unsigned long value; /* Address of symbol */ @@ -123,11 +89,6 @@ extern int kallsyms_symbol_next(char *prefix_name, int flag); extern int kallsyms_symbol_complete(char *prefix_name, int max_len); /* Exported Symbols for kernel loadable modules to use. */ -extern int kdb_register(char *, kdb_func_t, char *, char *, short); -extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, - short, kdb_repeat_t); -extern int kdb_unregister(char *); - extern int kdb_getarea_size(void *, unsigned long, size_t); extern int kdb_putarea_size(unsigned long, void *, size_t); -- cgit v1.2.3-70-g09d2 From e3bda3ac33d3bf3e5a4049e2cabe82d3caaffc26 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Mon, 11 Oct 2010 10:20:14 -0500 Subject: kdb,ftdump: Remove reference to internal kdb include Now that include/linux/kdb.h properly exports all the functions required to dynamically add a kdb shell command, the reference to the private kdb header can be removed. Signed-off-by: Jason Wessel --- kernel/trace/trace_kdb.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index 7b8ecd751d9..3c5c5dfea0b 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c @@ -13,7 +13,6 @@ #include #include -#include "../debug/kdb/kdb_private.h" #include "trace.h" #include "trace_output.h" -- cgit v1.2.3-70-g09d2 From 75d14edee5689716b55afe467acfc13206a31f95 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Mon, 11 Oct 2010 10:20:14 -0500 Subject: kdb: Fix oops in kdb_unregister Nothing should try to use kdb_commands directly as sometimes it is null. Instead, use the for_each_kdbcmd() iterator. This particular problem dates back to the initial kdb merge (2.6.35), but at that point nothing was dynamically unregistering commands from the kdb shell. Signed-off-by: Jason Wessel --- kernel/debug/kdb/kdb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 5448990a299..4226f32517d 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2826,7 +2826,7 @@ int kdb_unregister(char *cmd) /* * find the command. */ - for (i = 0, kp = kdb_commands; i < kdb_max_commands; i++, kp++) { + for_each_kdbcmd(kp, i) { if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) { kp->cmd_name = NULL; return 0; -- cgit v1.2.3-70-g09d2 From 91b152aa85bbcf076e269565394c31964f940371 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Mon, 23 Aug 2010 09:20:14 -0500 Subject: kdb,kgdb: fix sparse fixups Fix the following sparse warnings: kdb_main.c:328:5: warning: symbol 'kdbgetu64arg' was not declared. Should it be static? kgdboc.c:246:12: warning: symbol 'kgdboc_early_init' was not declared. Should it be static? kgdb.c:652:26: warning: incorrect type in argument 1 (different address spaces) kgdb.c:652:26: expected void const *ptr kgdb.c:652:26: got struct perf_event *[noderef] *pev The one in kgdb.c required the (void * __force) because of the return code from register_wide_hw_breakpoint looking like: return (void __percpu __force *)ERR_PTR(err); Signed-off-by: Jason Wessel --- arch/x86/kernel/kgdb.c | 2 +- drivers/serial/kgdboc.c | 2 +- include/linux/kdb.h | 8 ++++++++ kernel/debug/kdb/kdb_private.h | 9 +-------- 4 files changed, 11 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 497f9738641..101bf22cf16 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -649,7 +649,7 @@ void kgdb_arch_late(void) if (breakinfo[i].pev) continue; breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL); - if (IS_ERR(breakinfo[i].pev)) { + if (IS_ERR((void * __force)breakinfo[i].pev)) { printk(KERN_ERR "kgdb: Could not allocate hw" "breakpoints\nDisabling the kernel debugger\n"); breakinfo[i].pev = NULL; diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c index 39f9a1adaa7..d4b711c9a41 100644 --- a/drivers/serial/kgdboc.c +++ b/drivers/serial/kgdboc.c @@ -243,7 +243,7 @@ static struct kgdb_io kgdboc_io_ops = { #ifdef CONFIG_KGDB_SERIAL_CONSOLE /* This is only available if kgdboc is a built in for early debugging */ -int __init kgdboc_early_init(char *opt) +static int __init kgdboc_early_init(char *opt) { /* save the first character of the config string because the * init routine can destroy it. diff --git a/include/linux/kdb.h b/include/linux/kdb.h index deda197ced6..aadff7cc2b8 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -139,6 +139,14 @@ int kdb_process_cpu(const struct task_struct *p) /* kdb access to register set for stack dumping */ extern struct pt_regs *kdb_current_regs; +#ifdef CONFIG_KALLSYMS +extern const char *kdb_walk_kallsyms(loff_t *pos); +#else /* ! CONFIG_KALLSYMS */ +static inline const char *kdb_walk_kallsyms(loff_t *pos) +{ + return NULL; +} +#endif /* ! CONFIG_KALLSYMS */ /* Dynamic kdb shell command registration */ extern int kdb_register(char *, kdb_func_t, char *, char *, short); diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 1921e6e4c0b..35d69ed1dfb 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -105,6 +105,7 @@ extern int kdb_getword(unsigned long *, unsigned long, size_t); extern int kdb_putword(unsigned long, unsigned long, size_t); extern int kdbgetularg(const char *, unsigned long *); +extern int kdbgetu64arg(const char *, u64 *); extern char *kdbgetenv(const char *); extern int kdbgetaddrarg(int, const char **, int*, unsigned long *, long *, char **); @@ -216,14 +217,6 @@ extern void kdb_ps1(const struct task_struct *p); extern void kdb_print_nameval(const char *name, unsigned long val); extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info); extern void kdb_meminfo_proc_show(void); -#ifdef CONFIG_KALLSYMS -extern const char *kdb_walk_kallsyms(loff_t *pos); -#else /* ! CONFIG_KALLSYMS */ -static inline const char *kdb_walk_kallsyms(loff_t *pos) -{ - return NULL; -} -#endif /* ! CONFIG_KALLSYMS */ extern char *kdb_getstr(char *, size_t, char *); /* Defines for kdb_symbol_print */ -- cgit v1.2.3-70-g09d2 From c1bb9a9c1911036549c5cdfb23f32d7d20ffdc5a Mon Sep 17 00:00:00 2001 From: Dongdong Deng Date: Mon, 13 Sep 2010 06:58:00 -0500 Subject: debug_core: disable hw_breakpoints on all cores in kgdb_cpu_enter() The slave cpus do not have the hw breakpoints disabled upon entry to the debug_core and as a result could cause unrecoverable recursive faults on badly placed breakpoints, or get out of sync with the arch specific hw breakpoint operations. This patch addresses the problem by invoking kgdb_disable_hw_debug() earlier in kgdb_enter_cpu for each cpu that enters the debug core. The hw breakpoint dis/enable flow should be: master_debug_cpu slave_debug_cpu \ / kgdb_cpu_enter | kgdb_disable_hw_debug --> uninstall pre-enabled hw_breakpoint | do add/rm dis/enable operates to hw_breakpoints on master_debug_cpu.. | correct_hw_break --> correct/install the enabled hw_breakpoint | leave_kgdb Signed-off-by: Dongdong Deng Signed-off-by: Jason Wessel --- kernel/debug/debug_core.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 5a3b04d2049..bb949772480 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -485,6 +485,9 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) int error; int i, cpu; int trace_on = 0; + + kgdb_disable_hw_debug(ks->linux_regs); + acquirelock: /* * Interrupts will be restored by the 'trap return' code, except when @@ -569,8 +572,6 @@ return_normal: if (dbg_io_ops->pre_exception) dbg_io_ops->pre_exception(); - kgdb_disable_hw_debug(ks->linux_regs); - /* * Get the passive CPU lock which will hold all the non-primary * CPU in a spin state while the debugger is active @@ -661,6 +662,8 @@ kgdb_restore: else kgdb_sstep_pid = 0; } + if (arch_kgdb_ops.correct_hw_break) + arch_kgdb_ops.correct_hw_break(); if (trace_on) tracing_on(); /* Free kgdb_active */ -- cgit v1.2.3-70-g09d2 From dfee3a7b92208b30f77876068aece9ea571270c2 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Fri, 21 May 2010 08:46:00 -0500 Subject: debug_core: refactor locking for master/slave cpus For quite some time there have been problems with memory barriers and various races with NMI on multi processor systems using the kernel debugger. The algorithm for entering the kernel debug core and resuming kernel execution was racy and had several known edge case problems with attempting to debug something on a heavily loaded system using breakpoints that are hit repeatedly and quickly. The prior "locking" design entry worked as follows: * The atomic counter kgdb_active was used with atomic exchange in order to elect a master cpu out of all the cpus that may have taken a debug exception. * The master cpu increments all elements of passive_cpu_wait[]. * The master cpu issues the round up cpus message. * Each "slave cpu" that enters the debug core increments its own element in cpu_in_kgdb[]. * Each "slave cpu" spins on passive_cpu_wait[] until it becomes 0. * The master cpu debugs the system. The new scheme removes the two arrays of atomic counters and replaces them with 2 single counters. One counter is used to count the number of cpus waiting to become a master cpu (because one or more hit an exception). The second counter is use to indicate how many cpus have entered as slave cpus. The new entry logic works as follows: * One or more cpus enters via kgdb_handle_exception() and increments the masters_in_kgdb. Each cpu attempts to get the spin lock called dbg_master_lock. * The master cpu sets kgdb_active to the current cpu. * The master cpu takes the spinlock dbg_slave_lock. * The master cpu asks to round up all the other cpus. * Each slave cpu that is not already in kgdb_handle_exception() will enter and increment slaves_in_kgdb. Each slave will now spin try_locking on dbg_slave_lock. * The master cpu waits for the sum of masters_in_kgdb and slaves_in_kgdb to be equal to the sum of the online cpus. * The master cpu debugs the system. In the new design the kgdb_active can only be changed while holding dbg_master_lock. Stress testing has not turned up any further entry/exit races that existed in the prior locking design. The prior locking design suffered from atomic variables not being truly atomic (in the capacity as used by kgdb) along with memory barrier races. Signed-off-by: Jason Wessel Acked-by: Dongdong Deng --- kernel/debug/debug_core.c | 105 ++++++++++++++++++++++++---------------------- kernel/debug/debug_core.h | 1 + 2 files changed, 56 insertions(+), 50 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index bb949772480..26dbdc37d21 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -110,13 +110,15 @@ static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = { */ atomic_t kgdb_active = ATOMIC_INIT(-1); EXPORT_SYMBOL_GPL(kgdb_active); +static DEFINE_RAW_SPINLOCK(dbg_master_lock); +static DEFINE_RAW_SPINLOCK(dbg_slave_lock); /* * We use NR_CPUs not PERCPU, in case kgdb is used to debug early * bootup code (which might not have percpu set up yet): */ -static atomic_t passive_cpu_wait[NR_CPUS]; -static atomic_t cpu_in_kgdb[NR_CPUS]; +static atomic_t masters_in_kgdb; +static atomic_t slaves_in_kgdb; static atomic_t kgdb_break_tasklet_var; atomic_t kgdb_setting_breakpoint; @@ -478,14 +480,23 @@ static void dbg_touch_watchdogs(void) rcu_cpu_stall_reset(); } -static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) +static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, + int exception_state) { unsigned long flags; int sstep_tries = 100; int error; - int i, cpu; + int cpu; int trace_on = 0; + int online_cpus = num_online_cpus(); + kgdb_info[ks->cpu].enter_kgdb++; + kgdb_info[ks->cpu].exception_state |= exception_state; + + if (exception_state == DCPU_WANT_MASTER) + atomic_inc(&masters_in_kgdb); + else + atomic_inc(&slaves_in_kgdb); kgdb_disable_hw_debug(ks->linux_regs); acquirelock: @@ -500,14 +511,15 @@ acquirelock: kgdb_info[cpu].task = current; kgdb_info[cpu].ret_state = 0; kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT; - /* - * Make sure the above info reaches the primary CPU before - * our cpu_in_kgdb[] flag setting does: - */ - atomic_inc(&cpu_in_kgdb[cpu]); - if (exception_level == 1) + /* Make sure the above info reaches the primary CPU */ + smp_mb(); + + if (exception_level == 1) { + if (raw_spin_trylock(&dbg_master_lock)) + atomic_xchg(&kgdb_active, cpu); goto cpu_master_loop; + } /* * CPU will loop if it is a slave or request to become a kgdb @@ -519,10 +531,12 @@ cpu_loop: kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER; goto cpu_master_loop; } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { - if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu) + if (raw_spin_trylock(&dbg_master_lock)) { + atomic_xchg(&kgdb_active, cpu); break; + } } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { - if (!atomic_read(&passive_cpu_wait[cpu])) + if (!raw_spin_is_locked(&dbg_slave_lock)) goto return_normal; } else { return_normal: @@ -533,7 +547,11 @@ return_normal: arch_kgdb_ops.correct_hw_break(); if (trace_on) tracing_on(); - atomic_dec(&cpu_in_kgdb[cpu]); + kgdb_info[cpu].exception_state &= + ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); + kgdb_info[cpu].enter_kgdb--; + smp_mb__before_atomic_dec(); + atomic_dec(&slaves_in_kgdb); dbg_touch_watchdogs(); local_irq_restore(flags); return 0; @@ -551,6 +569,7 @@ return_normal: (kgdb_info[cpu].task && kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { atomic_set(&kgdb_active, -1); + raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); @@ -576,10 +595,8 @@ return_normal: * Get the passive CPU lock which will hold all the non-primary * CPU in a spin state while the debugger is active */ - if (!kgdb_single_step) { - for (i = 0; i < NR_CPUS; i++) - atomic_inc(&passive_cpu_wait[i]); - } + if (!kgdb_single_step) + raw_spin_lock(&dbg_slave_lock); #ifdef CONFIG_SMP /* Signal the other CPUs to enter kgdb_wait() */ @@ -590,10 +607,9 @@ return_normal: /* * Wait for the other CPUs to be notified and be waiting for us: */ - for_each_online_cpu(i) { - while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i])) - cpu_relax(); - } + while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) + + atomic_read(&slaves_in_kgdb)) != online_cpus) + cpu_relax(); /* * At this point the primary processor is completely @@ -634,24 +650,11 @@ cpu_master_loop: if (dbg_io_ops->post_exception) dbg_io_ops->post_exception(); - atomic_dec(&cpu_in_kgdb[ks->cpu]); - if (!kgdb_single_step) { - for (i = NR_CPUS-1; i >= 0; i--) - atomic_dec(&passive_cpu_wait[i]); - /* - * Wait till all the CPUs have quit from the debugger, - * but allow a CPU that hit an exception and is - * waiting to become the master to remain in the debug - * core. - */ - for_each_online_cpu(i) { - while (kgdb_do_roundup && - atomic_read(&cpu_in_kgdb[i]) && - !(kgdb_info[i].exception_state & - DCPU_WANT_MASTER)) - cpu_relax(); - } + raw_spin_unlock(&dbg_slave_lock); + /* Wait till all the CPUs have quit from the debugger. */ + while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb)) + cpu_relax(); } kgdb_restore: @@ -666,8 +669,15 @@ kgdb_restore: arch_kgdb_ops.correct_hw_break(); if (trace_on) tracing_on(); + + kgdb_info[cpu].exception_state &= + ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); + kgdb_info[cpu].enter_kgdb--; + smp_mb__before_atomic_dec(); + atomic_dec(&masters_in_kgdb); /* Free kgdb_active */ atomic_set(&kgdb_active, -1); + raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); @@ -686,7 +696,6 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) { struct kgdb_state kgdb_var; struct kgdb_state *ks = &kgdb_var; - int ret; ks->cpu = raw_smp_processor_id(); ks->ex_vector = evector; @@ -697,11 +706,10 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) if (kgdb_reenter_check(ks)) return 0; /* Ouch, double exception ! */ - kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER; - ret = kgdb_cpu_enter(ks, regs); - kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER | - DCPU_IS_SLAVE); - return ret; + if (kgdb_info[ks->cpu].enter_kgdb != 0) + return 0; + + return kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER); } int kgdb_nmicallback(int cpu, void *regs) @@ -714,12 +722,9 @@ int kgdb_nmicallback(int cpu, void *regs) ks->cpu = cpu; ks->linux_regs = regs; - if (!atomic_read(&cpu_in_kgdb[cpu]) && - atomic_read(&kgdb_active) != -1 && - atomic_read(&kgdb_active) != cpu) { - kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE; - kgdb_cpu_enter(ks, regs); - kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE; + if (kgdb_info[ks->cpu].enter_kgdb == 0 && + raw_spin_is_locked(&dbg_master_lock)) { + kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE); return 0; } #endif diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h index c5d753d80f6..3494c28a7e7 100644 --- a/kernel/debug/debug_core.h +++ b/kernel/debug/debug_core.h @@ -40,6 +40,7 @@ struct debuggerinfo_struct { int exception_state; int ret_state; int irq_depth; + int enter_kgdb; }; extern struct debuggerinfo_struct kgdb_info[]; -- cgit v1.2.3-70-g09d2 From 495363d380b4f4745bd8677912688654afc44020 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Fri, 21 May 2010 08:46:00 -0500 Subject: kdb,debug_core: adjust master cpu switch logic against new debug_core locking The kdb shell needs to enforce switching back to the original CPU that took the exception before restoring normal kernel execution. Resuming from a different CPU than what took the original exception will cause problems with spin locks that are freed from the a different processor than had taken the lock. The special logic in dbg_cpu_switch() can go away entirely with because the state of what cpus want to be masters or slaves will remain unchanged between entry and exit of the debug_core exception context. Signed-off-by: Jason Wessel --- kernel/debug/debug_core.c | 16 ++-------------- kernel/debug/kdb/kdb_debugger.c | 3 +-- kernel/debug/kdb/kdb_main.c | 12 ++++++------ 3 files changed, 9 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 26dbdc37d21..fec596da9bd 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -460,19 +460,6 @@ static int kgdb_reenter_check(struct kgdb_state *ks) return 1; } -static void dbg_cpu_switch(int cpu, int next_cpu) -{ - /* Mark the cpu we are switching away from as a slave when it - * holds the kgdb_active token. This must be done so that the - * that all the cpus wait in for the debug core will not enter - * again as the master. */ - if (cpu == atomic_read(&kgdb_active)) { - kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE; - kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER; - } - kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER; -} - static void dbg_touch_watchdogs(void) { touch_softlockup_watchdog_sync(); @@ -638,7 +625,8 @@ cpu_master_loop: if (error == DBG_PASS_EVENT) { dbg_kdb_mode = !dbg_kdb_mode; } else if (error == DBG_SWITCH_CPU_EVENT) { - dbg_cpu_switch(cpu, dbg_switch_cpu); + kgdb_info[dbg_switch_cpu].exception_state |= + DCPU_NEXT_MASTER; goto cpu_loop; } else { kgdb_info[cpu].ret_state = error; diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c index bf6e8270e95..dd0b1b7dd02 100644 --- a/kernel/debug/kdb/kdb_debugger.c +++ b/kernel/debug/kdb/kdb_debugger.c @@ -86,7 +86,7 @@ int kdb_stub(struct kgdb_state *ks) } /* Set initial kdb state variables */ KDB_STATE_CLEAR(KGDB_TRANS); - kdb_initial_cpu = ks->cpu; + kdb_initial_cpu = atomic_read(&kgdb_active); kdb_current_task = kgdb_info[ks->cpu].task; kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo; /* Remove any breakpoints as needed by kdb and clear single step */ @@ -105,7 +105,6 @@ int kdb_stub(struct kgdb_state *ks) ks->pass_exception = 1; KDB_FLAG_SET(CATASTROPHIC); } - kdb_initial_cpu = ks->cpu; if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { KDB_STATE_CLEAR(SSBPT); KDB_STATE_CLEAR(DOING_SS); diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 4226f32517d..d7bda21a106 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -1749,13 +1749,13 @@ static int kdb_go(int argc, const char **argv) int nextarg; long offset; + if (raw_smp_processor_id() != kdb_initial_cpu) { + kdb_printf("go must execute on the entry cpu, " + "please use \"cpu %d\" and then execute go\n", + kdb_initial_cpu); + return KDB_BADCPUNUM; + } if (argc == 1) { - if (raw_smp_processor_id() != kdb_initial_cpu) { - kdb_printf("go
must be issued from the " - "initial cpu, do cpu %d first\n", - kdb_initial_cpu); - return KDB_ARGCOUNT; - } nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); -- cgit v1.2.3-70-g09d2 From aa7b250c252cc8e6b1daf0e1eada5eba42a1a68d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 21 Oct 2010 22:17:19 -0700 Subject: tracing: Fix 'faild' -> 'failed' typo Signed-off-by: Joe Perches Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Jiri Kosina LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/trace/trace_kprobe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 544301d29de..b8d2852baa4 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -648,7 +648,7 @@ static int register_trace_probe(struct trace_probe *tp) } ret = register_probe_event(tp); if (ret) { - pr_warning("Faild to register probe event(%d)\n", ret); + pr_warning("Failed to register probe event(%d)\n", ret); goto end; } -- cgit v1.2.3-70-g09d2 From d4a6f3c32c39132318454e77d59ab14b06f6eb02 Mon Sep 17 00:00:00 2001 From: Rakib Mullick Date: Sun, 24 Oct 2010 16:28:47 +0600 Subject: sched_stat: Update sched_info_queue/dequeue() code comments Remove some sched_info_queue(), sched_info_dequeue() code comment. We no longer belongs to the era of O(1) and we don't use active or expired array anymore. Signed-off-by: Rakib Mullick Cc: Oleg Nesterov Cc: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/sched_stats.h | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 25c2f962f6f..48ddf431db0 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h @@ -157,15 +157,7 @@ static inline void sched_info_reset_dequeued(struct task_struct *t) } /* - * Called when a process is dequeued from the active array and given - * the cpu. We should note that with the exception of interactive - * tasks, the expired queue will become the active queue after the active - * queue is empty, without explicitly dequeuing and requeuing tasks in the - * expired queue. (Interactive tasks may be requeued directly to the - * active queue, thus delaying tasks in the expired queue from running; - * see scheduler_tick()). - * - * Though we are interested in knowing how long it was from the *first* time a + * We are interested in knowing how long it was from the *first* time a * task was queued to the time that it finally hit a cpu, we call this routine * from dequeue_task() to account for possible rq->clock skew across cpus. The * delta taken on each cpu would annul the skew. @@ -203,16 +195,6 @@ static void sched_info_arrive(struct task_struct *t) } /* - * Called when a process is queued into either the active or expired - * array. The time is noted and later used to determine how long we - * had to wait for us to reach the cpu. Since the expired queue will - * become the active queue after active queue is empty, without dequeuing - * and requeuing any tasks, we are interested in queuing to either. It - * is unusual but not impossible for tasks to be dequeued and immediately - * requeued in the same or another array: this can happen in sched_yield(), - * set_user_nice(), and even load_balance() as it moves tasks from runqueue - * to runqueue. - * * This function is only called from enqueue_task(), but also only updates * the timestamp if it is already not set. It's assumed that * sched_info_dequeued() will clear that stamp when appropriate. -- cgit v1.2.3-70-g09d2 From 43948f50276eca010a22726860dfe9a4e8130136 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 25 Oct 2010 22:18:01 +0900 Subject: kprobes: Remove redundant text_mutex lock in optimize Remove text_mutex locking in optimize_all_kprobes, because this function doesn't modify text. It simply queues probes on optimization list for kprobe_optimizer worker thread. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Cc: Namhyung Kim Cc: Jason Baron Cc: Peter Zijlstra LKML-Reference: <20101025131801.19160.70939.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ec4210c6501..7c44133f51e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -74,7 +74,8 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; /* NOTE: change this value only with kprobe_mutex held */ static bool kprobes_all_disarmed; -static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ +/* This protects kprobe_table and optimizing_list */ +static DEFINE_MUTEX(kprobe_mutex); static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; static struct { spinlock_t lock ____cacheline_aligned_in_smp; @@ -595,6 +596,7 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) } #ifdef CONFIG_SYSCTL +/* This should be called with kprobe_mutex locked */ static void __kprobes optimize_all_kprobes(void) { struct hlist_head *head; @@ -607,17 +609,16 @@ static void __kprobes optimize_all_kprobes(void) return; kprobes_allow_optimization = true; - mutex_lock(&text_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; hlist_for_each_entry_rcu(p, node, head, hlist) if (!kprobe_disabled(p)) optimize_kprobe(p); } - mutex_unlock(&text_mutex); printk(KERN_INFO "Kprobes globally optimized\n"); } +/* This should be called with kprobe_mutex locked */ static void __kprobes unoptimize_all_kprobes(void) { struct hlist_head *head; -- cgit v1.2.3-70-g09d2 From 5260562754c0aa4b95eebb1f851eaccce7286365 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 25 Oct 2010 23:41:11 +0100 Subject: MN10300: Fix the PERCPU() alignment to allow for workqueues In the MN10300 arch, we occasionally see an assertion being tripped in alloc_cwqs() at the following line: /* just in case, make sure it's actually aligned */ ---> BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); return wq->cpu_wq.v ? 0 : -ENOMEM; The values are: wa->cpu_wq.v => 0x902776e0 align => 0x100 and align is calculated by the following: const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, __alignof__(unsigned long long)); This is because the pointer in question (wq->cpu_wq.v) loses some of its lower bits to control flags, and so the object it points to must be sufficiently aligned to avoid the need to use those bits for pointing to things. Currently, 4 control bits and 4 colour bits are used in normal circumstances, plus a debugging bit if debugging is set. This requires the cpu_workqueue_struct struct to be at least 256 bytes aligned (or 512 bytes aligned with debugging). PERCPU() alignment on MN13000, however, is only 32 bytes as set in vmlinux.lds.S. So we set this to PAGE_SIZE (4096) to match most other arches and stick a comment in alloc_cwqs() for anyone else who triggers the assertion. Reported-by: Akira Takeuchi Signed-off-by: David Howells Acked-by: Mark Salter Cc: Tejun Heo Signed-off-by: Linus Torvalds --- arch/mn10300/kernel/vmlinux.lds.S | 2 +- kernel/workqueue.c | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S index 10549dcfb61..febbeee7f2f 100644 --- a/arch/mn10300/kernel/vmlinux.lds.S +++ b/arch/mn10300/kernel/vmlinux.lds.S @@ -70,7 +70,7 @@ SECTIONS .exit.text : { EXIT_TEXT; } .exit.data : { EXIT_DATA; } - PERCPU(32) + PERCPU(PAGE_SIZE) . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 30acdb74cc2..e5ff2cbaadc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2791,7 +2791,9 @@ static int alloc_cwqs(struct workqueue_struct *wq) } } - /* just in case, make sure it's actually aligned */ + /* just in case, make sure it's actually aligned + * - this is affected by PERCPU() alignment in vmlinux.lds.S + */ BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); return wq->cpu_wq.v ? 0 : -ENOMEM; } -- cgit v1.2.3-70-g09d2 From 7e360c38abe2c70eae3ba5a8a17f17671d8b77c5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 5 Oct 2010 09:32:55 +0200 Subject: fs: allow for more than 2^31 files Andrew, Could you please review this patch, you probably are the right guy to take it, because it crosses fs and net trees. Note : /proc/sys/fs/file-nr is a read-only file, so this patch doesnt depend on previous patch (sysctl: fix min/max handling in __do_proc_doulongvec_minmax()) Thanks ! [PATCH V4] fs: allow for more than 2^31 files Robin Holt tried to boot a 16TB system and found af_unix was overflowing a 32bit value : We were seeing a failure which prevented boot. The kernel was incapable of creating either a named pipe or unix domain socket. This comes down to a common kernel function called unix_create1() which does: atomic_inc(&unix_nr_socks); if (atomic_read(&unix_nr_socks) > 2 * get_max_files()) goto out; The function get_max_files() is a simple return of files_stat.max_files. files_stat.max_files is a signed integer and is computed in fs/file_table.c's files_init(). n = (mempages * (PAGE_SIZE / 1024)) / 10; files_stat.max_files = n; In our case, mempages (total_ram_pages) is approx 3,758,096,384 (0xe0000000). That leaves max_files at approximately 1,503,238,553. This causes 2 * get_max_files() to integer overflow. Fix is to let /proc/sys/fs/file-nr & /proc/sys/fs/file-max use long integers, and change af_unix to use an atomic_long_t instead of atomic_t. get_max_files() is changed to return an unsigned long. get_nr_files() is changed to return a long. unix_nr_socks is changed from atomic_t to atomic_long_t, while not strictly needed to address Robin problem. Before patch (on a 64bit kernel) : # echo 2147483648 >/proc/sys/fs/file-max # cat /proc/sys/fs/file-max -18446744071562067968 After patch: # echo 2147483648 >/proc/sys/fs/file-max # cat /proc/sys/fs/file-max 2147483648 # cat /proc/sys/fs/file-nr 704 0 2147483648 Reported-by: Robin Holt Signed-off-by: Eric Dumazet Acked-by: David Miller Reviewed-by: Robin Holt Tested-by: Robin Holt Signed-off-by: Al Viro --- fs/file_table.c | 17 +++++++---------- include/linux/fs.h | 8 ++++---- kernel/sysctl.c | 6 +++--- net/unix/af_unix.c | 14 +++++++------- 4 files changed, 21 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/fs/file_table.c b/fs/file_table.c index a04bdd81c11..c3dee381f1b 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -60,7 +60,7 @@ static inline void file_free(struct file *f) /* * Return the total number of open files in the system */ -static int get_nr_files(void) +static long get_nr_files(void) { return percpu_counter_read_positive(&nr_files); } @@ -68,7 +68,7 @@ static int get_nr_files(void) /* * Return the maximum number of open files in the system */ -int get_max_files(void) +unsigned long get_max_files(void) { return files_stat.max_files; } @@ -82,7 +82,7 @@ int proc_nr_files(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { files_stat.nr_files = get_nr_files(); - return proc_dointvec(table, write, buffer, lenp, ppos); + return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } #else int proc_nr_files(ctl_table *table, int write, @@ -105,7 +105,7 @@ int proc_nr_files(ctl_table *table, int write, struct file *get_empty_filp(void) { const struct cred *cred = current_cred(); - static int old_max; + static long old_max; struct file * f; /* @@ -140,8 +140,7 @@ struct file *get_empty_filp(void) over: /* Ran out of filps - report that */ if (get_nr_files() > old_max) { - printk(KERN_INFO "VFS: file-max limit %d reached\n", - get_max_files()); + pr_info("VFS: file-max limit %lu reached\n", get_max_files()); old_max = get_nr_files(); } goto fail; @@ -487,7 +486,7 @@ retry: void __init files_init(unsigned long mempages) { - int n; + unsigned long n; filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); @@ -498,9 +497,7 @@ void __init files_init(unsigned long mempages) */ n = (mempages * (PAGE_SIZE / 1024)) / 10; - files_stat.max_files = n; - if (files_stat.max_files < NR_FILE) - files_stat.max_files = NR_FILE; + files_stat.max_files = max_t(unsigned long, n, NR_FILE); files_defer_init(); lg_lock_init(files_lglock); percpu_counter_init(&nr_files, 0); diff --git a/include/linux/fs.h b/include/linux/fs.h index 0a5d8363388..0cd6821013a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -34,9 +34,9 @@ /* And dynamically-tunable limits and defaults: */ struct files_stat_struct { - int nr_files; /* read only */ - int nr_free_files; /* read only */ - int max_files; /* tunable */ + unsigned long nr_files; /* read only */ + unsigned long nr_free_files; /* read only */ + unsigned long max_files; /* tunable */ }; struct inodes_stat_t { @@ -400,7 +400,7 @@ extern void __init inode_init_early(void); extern void __init files_init(unsigned long); extern struct files_stat_struct files_stat; -extern int get_max_files(void); +extern unsigned long get_max_files(void); extern int sysctl_nr_open; extern struct inodes_stat_t inodes_stat; extern int leases_enable, lease_break_time; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3a45c224770..694b140852c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1352,16 +1352,16 @@ static struct ctl_table fs_table[] = { { .procname = "file-nr", .data = &files_stat, - .maxlen = 3*sizeof(int), + .maxlen = sizeof(files_stat), .mode = 0444, .proc_handler = proc_nr_files, }, { .procname = "file-max", .data = &files_stat.max_files, - .maxlen = sizeof(int), + .maxlen = sizeof(files_stat.max_files), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_doulongvec_minmax, }, { .procname = "nr_open", diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 0ebc777a666..3c95304a081 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -117,7 +117,7 @@ static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; static DEFINE_SPINLOCK(unix_table_lock); -static atomic_t unix_nr_socks = ATOMIC_INIT(0); +static atomic_long_t unix_nr_socks; #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE]) @@ -360,13 +360,13 @@ static void unix_sock_destructor(struct sock *sk) if (u->addr) unix_release_addr(u->addr); - atomic_dec(&unix_nr_socks); + atomic_long_dec(&unix_nr_socks); local_bh_disable(); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); local_bh_enable(); #ifdef UNIX_REFCNT_DEBUG - printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, - atomic_read(&unix_nr_socks)); + printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk, + atomic_long_read(&unix_nr_socks)); #endif } @@ -606,8 +606,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock) struct sock *sk = NULL; struct unix_sock *u; - atomic_inc(&unix_nr_socks); - if (atomic_read(&unix_nr_socks) > 2 * get_max_files()) + atomic_long_inc(&unix_nr_socks); + if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) goto out; sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto); @@ -632,7 +632,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock) unix_insert_socket(unix_sockets_unbound, sk); out: if (sk == NULL) - atomic_dec(&unix_nr_socks); + atomic_long_dec(&unix_nr_socks); else { local_bh_disable(); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); -- cgit v1.2.3-70-g09d2 From cffbc8aa334f55c9ed42d25202eb3ebf3a97c195 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Sat, 23 Oct 2010 05:03:02 -0400 Subject: fs: Convert nr_inodes and nr_unused to per-cpu counters The number of inodes allocated does not need to be tied to the addition or removal of an inode to/from a list. If we are not tied to a list lock, we could update the counters when inodes are initialised or destroyed, but to do that we need to convert the counters to be per-cpu (i.e. independent of a lock). This means that we have the freedom to change the list/locking implementation without needing to care about the counters. Based on a patch originally from Eric Dumazet. [AV: cleaned up a bit, fixed build breakage on weird configs Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/fs-writeback.c | 5 ++--- fs/inode.c | 64 ++++++++++++++++++++++++++++++++++++++---------------- fs/internal.h | 1 + include/linux/fs.h | 3 ++- kernel/sysctl.c | 4 ++-- 5 files changed, 52 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 39f44f2e709..f04d04af84f 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -723,7 +723,7 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) wb->last_old_flush = jiffies; nr_pages = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS) + - (inodes_stat.nr_inodes - inodes_stat.nr_unused); + get_nr_dirty_inodes(); if (nr_pages) { struct wb_writeback_work work = { @@ -1090,8 +1090,7 @@ void writeback_inodes_sb(struct super_block *sb) WARN_ON(!rwsem_is_locked(&sb->s_umount)); - work.nr_pages = nr_dirty + nr_unstable + - (inodes_stat.nr_inodes - inodes_stat.nr_unused); + work.nr_pages = nr_dirty + nr_unstable + get_nr_dirty_inodes(); bdi_queue_work(sb->s_bdi, &work); wait_for_completion(&done); diff --git a/fs/inode.c b/fs/inode.c index 4440cf1034e..0d5aeccbdd9 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -103,8 +103,41 @@ static DECLARE_RWSEM(iprune_sem); */ struct inodes_stat_t inodes_stat; +static struct percpu_counter nr_inodes __cacheline_aligned_in_smp; +static struct percpu_counter nr_inodes_unused __cacheline_aligned_in_smp; + static struct kmem_cache *inode_cachep __read_mostly; +static inline int get_nr_inodes(void) +{ + return percpu_counter_sum_positive(&nr_inodes); +} + +static inline int get_nr_inodes_unused(void) +{ + return percpu_counter_sum_positive(&nr_inodes_unused); +} + +int get_nr_dirty_inodes(void) +{ + int nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); + return nr_dirty > 0 ? nr_dirty : 0; + +} + +/* + * Handle nr_inode sysctl + */ +#ifdef CONFIG_SYSCTL +int proc_nr_inodes(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + inodes_stat.nr_inodes = get_nr_inodes(); + inodes_stat.nr_unused = get_nr_inodes_unused(); + return proc_dointvec(table, write, buffer, lenp, ppos); +} +#endif + static void wake_up_inode(struct inode *inode) { /* @@ -192,6 +225,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode) inode->i_fsnotify_mask = 0; #endif + percpu_counter_inc(&nr_inodes); + return 0; out: return -ENOMEM; @@ -232,6 +267,7 @@ void __destroy_inode(struct inode *inode) if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) posix_acl_release(inode->i_default_acl); #endif + percpu_counter_dec(&nr_inodes); } EXPORT_SYMBOL(__destroy_inode); @@ -286,7 +322,7 @@ void __iget(struct inode *inode) if (!(inode->i_state & (I_DIRTY|I_SYNC))) list_move(&inode->i_list, &inode_in_use); - inodes_stat.nr_unused--; + percpu_counter_dec(&nr_inodes_unused); } void end_writeback(struct inode *inode) @@ -327,8 +363,6 @@ static void evict(struct inode *inode) */ static void dispose_list(struct list_head *head) { - int nr_disposed = 0; - while (!list_empty(head)) { struct inode *inode; @@ -344,11 +378,7 @@ static void dispose_list(struct list_head *head) wake_up_inode(inode); destroy_inode(inode); - nr_disposed++; } - spin_lock(&inode_lock); - inodes_stat.nr_inodes -= nr_disposed; - spin_unlock(&inode_lock); } /* @@ -357,7 +387,7 @@ static void dispose_list(struct list_head *head) static int invalidate_list(struct list_head *head, struct list_head *dispose) { struct list_head *next; - int busy = 0, count = 0; + int busy = 0; next = head->next; for (;;) { @@ -383,13 +413,11 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) list_move(&inode->i_list, dispose); WARN_ON(inode->i_state & I_NEW); inode->i_state |= I_FREEING; - count++; + percpu_counter_dec(&nr_inodes_unused); continue; } busy = 1; } - /* only unused inodes may be cached with i_count zero */ - inodes_stat.nr_unused -= count; return busy; } @@ -447,7 +475,6 @@ static int can_unuse(struct inode *inode) static void prune_icache(int nr_to_scan) { LIST_HEAD(freeable); - int nr_pruned = 0; int nr_scanned; unsigned long reap = 0; @@ -483,9 +510,8 @@ static void prune_icache(int nr_to_scan) list_move(&inode->i_list, &freeable); WARN_ON(inode->i_state & I_NEW); inode->i_state |= I_FREEING; - nr_pruned++; + percpu_counter_dec(&nr_inodes_unused); } - inodes_stat.nr_unused -= nr_pruned; if (current_is_kswapd()) __count_vm_events(KSWAPD_INODESTEAL, reap); else @@ -517,7 +543,7 @@ static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) return -1; prune_icache(nr); } - return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; + return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure; } static struct shrinker icache_shrinker = { @@ -594,7 +620,6 @@ static inline void __inode_add_to_lists(struct super_block *sb, struct hlist_head *head, struct inode *inode) { - inodes_stat.nr_inodes++; list_add(&inode->i_list, &inode_in_use); list_add(&inode->i_sb_list, &sb->s_inodes); if (head) @@ -1214,7 +1239,7 @@ static void iput_final(struct inode *inode) if (!drop) { if (!(inode->i_state & (I_DIRTY|I_SYNC))) list_move(&inode->i_list, &inode_unused); - inodes_stat.nr_unused++; + percpu_counter_inc(&nr_inodes_unused); if (sb->s_flags & MS_ACTIVE) { spin_unlock(&inode_lock); return; @@ -1226,14 +1251,13 @@ static void iput_final(struct inode *inode) spin_lock(&inode_lock); WARN_ON(inode->i_state & I_NEW); inode->i_state &= ~I_WILL_FREE; - inodes_stat.nr_unused--; + percpu_counter_dec(&nr_inodes_unused); hlist_del_init(&inode->i_hash); } list_del_init(&inode->i_list); list_del_init(&inode->i_sb_list); WARN_ON(inode->i_state & I_NEW); inode->i_state |= I_FREEING; - inodes_stat.nr_inodes--; spin_unlock(&inode_lock); evict(inode); spin_lock(&inode_lock); @@ -1502,6 +1526,8 @@ void __init inode_init(void) SLAB_MEM_SPREAD), init_once); register_shrinker(&icache_shrinker); + percpu_counter_init(&nr_inodes, 0); + percpu_counter_init(&nr_inodes_unused, 0); /* Hash may have been set up in inode_init_early */ if (!hashdist) diff --git a/fs/internal.h b/fs/internal.h index f6dce46d80d..4cc67eb6ed5 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -105,4 +105,5 @@ extern void release_open_intent(struct nameidata *); /* * inode.c */ +extern int get_nr_dirty_inodes(void); extern int invalidate_inodes(struct super_block *); diff --git a/include/linux/fs.h b/include/linux/fs.h index 78043da85e1..a3937a8ee95 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2486,7 +2486,8 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, struct ctl_table; int proc_nr_files(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); - +int proc_nr_inodes(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); int __init get_filesystem_list(char *buf); #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 694b140852c..99a510cbfbb 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1340,14 +1340,14 @@ static struct ctl_table fs_table[] = { .data = &inodes_stat, .maxlen = 2*sizeof(int), .mode = 0444, - .proc_handler = proc_dointvec, + .proc_handler = proc_nr_inodes, }, { .procname = "inode-state", .data = &inodes_stat, .maxlen = 7*sizeof(int), .mode = 0444, - .proc_handler = proc_dointvec, + .proc_handler = proc_nr_inodes, }, { .procname = "file-nr", -- cgit v1.2.3-70-g09d2 From 7de9c6ee3ecffd99e1628e81a5ea5468f7581a1f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 23 Oct 2010 11:11:40 -0400 Subject: new helper: ihold() Clones an existing reference to inode; caller must already hold one. Signed-off-by: Al Viro --- fs/9p/vfs_inode.c | 5 +++-- fs/affs/inode.c | 2 +- fs/afs/dir.c | 2 +- fs/aio.c | 5 ++--- fs/anon_inodes.c | 5 ++--- fs/bfs/dir.c | 2 +- fs/block_dev.c | 8 ++++---- fs/btrfs/inode.c | 2 +- fs/coda/dir.c | 2 +- fs/exofs/namei.c | 2 +- fs/ext2/namei.c | 2 +- fs/ext3/namei.c | 2 +- fs/ext4/namei.c | 2 +- fs/gfs2/ops_inode.c | 2 +- fs/hfsplus/dir.c | 2 +- fs/inode.c | 9 +++++++++ fs/jffs2/dir.c | 4 ++-- fs/jfs/jfs_txnmgr.c | 2 +- fs/jfs/namei.c | 2 +- fs/libfs.c | 2 +- fs/logfs/dir.c | 2 +- fs/minix/namei.c | 2 +- fs/namei.c | 2 +- fs/nfs/dir.c | 2 +- fs/nfs/getroot.c | 3 +-- fs/nilfs2/namei.c | 2 +- fs/ntfs/super.c | 4 ++-- fs/ocfs2/namei.c | 2 +- fs/reiserfs/namei.c | 2 +- fs/sysv/namei.c | 2 +- fs/ubifs/dir.c | 2 +- fs/udf/namei.c | 2 +- fs/ufs/namei.c | 2 +- fs/xfs/linux-2.6/xfs_iops.c | 2 +- fs/xfs/xfs_inode.h | 2 +- include/linux/fs.h | 1 + ipc/mqueue.c | 2 +- kernel/futex.c | 2 +- mm/shmem.c | 2 +- net/socket.c | 2 +- 40 files changed, 57 insertions(+), 49 deletions(-) (limited to 'kernel') diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 9e670d52764..ef5905f7c8a 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -1789,9 +1789,10 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir, kfree(st); } else { /* Caching disabled. No need to get upto date stat info. - * This dentry will be released immediately. So, just i_count++ + * This dentry will be released immediately. So, just hold the + * inode */ - atomic_inc(&old_dentry->d_inode->i_count); + ihold(old_dentry->d_inode); } dentry->d_op = old_dentry->d_op; diff --git a/fs/affs/inode.c b/fs/affs/inode.c index 3a0fdec175b..5d828903ac6 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -388,7 +388,7 @@ affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s3 affs_adjust_checksum(inode_bh, block - be32_to_cpu(chain)); mark_buffer_dirty_inode(inode_bh, inode); inode->i_nlink = 2; - atomic_inc(&inode->i_count); + ihold(inode); } affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 0d38c09bd55..5439e1bc9a8 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -1045,7 +1045,7 @@ static int afs_link(struct dentry *from, struct inode *dir, if (ret < 0) goto link_error; - atomic_inc(&vnode->vfs_inode.i_count); + ihold(&vnode->vfs_inode); d_instantiate(dentry, &vnode->vfs_inode); key_put(key); _leave(" = 0"); diff --git a/fs/aio.c b/fs/aio.c index 9e319a04780..8c8f6c5b6d7 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1553,10 +1553,9 @@ static void aio_batch_add(struct address_space *mapping, * * When we're called, we always have a reference * on the file, so we must always have a reference - * on the inode, so igrab must always just - * bump the count and move on. + * on the inode, so ihold() is safe here. */ - atomic_inc(&mapping->host->i_count); + ihold(mapping->host); abe->mapping = mapping; hlist_add_head(&abe->list, &batch_hash[bucket]); return; diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index e4b75d6eda8..9c8e87b0361 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -111,10 +111,9 @@ struct file *anon_inode_getfile(const char *name, path.mnt = mntget(anon_inode_mnt); /* * We know the anon_inode inode count is always greater than zero, - * so we can avoid doing an igrab() and we can use an open-coded - * atomic_inc(). + * so ihold() is safe. */ - atomic_inc(&anon_inode_inode->i_count); + ihold(anon_inode_inode); path.dentry->d_op = &anon_inodefs_dentry_operations; d_instantiate(path.dentry, anon_inode_inode); diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index d967e052b77..685ecff3ab3 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c @@ -176,7 +176,7 @@ static int bfs_link(struct dentry *old, struct inode *dir, inc_nlink(inode); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); - atomic_inc(&inode->i_count); + ihold(inode); d_instantiate(new, inode); mutex_unlock(&info->bfs_lock); return 0; diff --git a/fs/block_dev.c b/fs/block_dev.c index b737451e2e9..81972eb34b3 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -550,7 +550,7 @@ EXPORT_SYMBOL(bdget); */ struct block_device *bdgrab(struct block_device *bdev) { - atomic_inc(&bdev->bd_inode->i_count); + ihold(bdev->bd_inode); return bdev; } @@ -580,7 +580,7 @@ static struct block_device *bd_acquire(struct inode *inode) spin_lock(&bdev_lock); bdev = inode->i_bdev; if (bdev) { - atomic_inc(&bdev->bd_inode->i_count); + ihold(bdev->bd_inode); spin_unlock(&bdev_lock); return bdev; } @@ -591,12 +591,12 @@ static struct block_device *bd_acquire(struct inode *inode) spin_lock(&bdev_lock); if (!inode->i_bdev) { /* - * We take an additional bd_inode->i_count for inode, + * We take an additional reference to bd_inode, * and it's released in clear_inode() of inode. * So, we can access it via ->i_mapping always * without igrab(). */ - atomic_inc(&bdev->bd_inode->i_count); + ihold(bdev->bd_inode); inode->i_bdev = bdev; inode->i_mapping = bdev->bd_inode->i_mapping; list_add(&inode->i_devices, &bdev->bd_inodes); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f6f2a0da269..64f99cf69ce 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4758,7 +4758,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, } btrfs_set_trans_block_group(trans, dir); - atomic_inc(&inode->i_count); + ihold(inode); err = btrfs_add_nondir(trans, dentry, inode, 1, index); diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 96fbeab77f2..5d8b3553960 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -276,7 +276,7 @@ static int coda_link(struct dentry *source_de, struct inode *dir_inode, } coda_dir_update_mtime(dir_inode); - atomic_inc(&inode->i_count); + ihold(inode); d_instantiate(de, inode); inc_nlink(inode); return 0; diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c index b7dd0c23686..264e95d0283 100644 --- a/fs/exofs/namei.c +++ b/fs/exofs/namei.c @@ -153,7 +153,7 @@ static int exofs_link(struct dentry *old_dentry, struct inode *dir, inode->i_ctime = CURRENT_TIME; inode_inc_link_count(inode); - atomic_inc(&inode->i_count); + ihold(inode); return exofs_add_nondir(dentry, inode); } diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 71efb0e9a3f..f8aecd2e329 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -206,7 +206,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, inode->i_ctime = CURRENT_TIME_SEC; inode_inc_link_count(inode); - atomic_inc(&inode->i_count); + ihold(inode); err = ext2_add_link(dentry, inode); if (!err) { diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 2b35ddb70d6..bce9dce639b 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -2260,7 +2260,7 @@ retry: inode->i_ctime = CURRENT_TIME_SEC; inc_nlink(inode); - atomic_inc(&inode->i_count); + ihold(inode); err = ext3_add_entry(handle, dentry, inode); if (!err) { diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 314c0d3b3fa..bd39885b599 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2312,7 +2312,7 @@ retry: inode->i_ctime = ext4_current_time(inode); ext4_inc_count(handle, inode); - atomic_inc(&inode->i_count); + ihold(inode); err = ext4_add_entry(handle, dentry, inode); if (!err) { diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index 48a274f1674..12cbea7502c 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -255,7 +255,7 @@ out_parent: gfs2_holder_uninit(ghs); gfs2_holder_uninit(ghs + 1); if (!error) { - atomic_inc(&inode->i_count); + ihold(inode); d_instantiate(dentry, inode); mark_inode_dirty(inode); } diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index d236d85ec9d..e318bbc0daf 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -286,7 +286,7 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir, inc_nlink(inode); hfsplus_instantiate(dst_dentry, inode, cnid); - atomic_inc(&inode->i_count); + ihold(inode); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); sbi->file_count++; diff --git a/fs/inode.c b/fs/inode.c index 430d70f2abe..05ea293d5f3 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -320,6 +320,15 @@ void __iget(struct inode *inode) atomic_inc(&inode->i_count); } +/* + * get additional reference to inode; caller must already hold one. + */ +void ihold(struct inode *inode) +{ + WARN_ON(atomic_inc_return(&inode->i_count) < 2); +} +EXPORT_SYMBOL(ihold); + static void inode_lru_list_add(struct inode *inode) { if (list_empty(&inode->i_list)) { diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index ed78a3cf3cb..79121aa5858 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -289,7 +289,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de mutex_unlock(&f->sem); d_instantiate(dentry, old_dentry->d_inode); dir_i->i_mtime = dir_i->i_ctime = ITIME(now); - atomic_inc(&old_dentry->d_inode->i_count); + ihold(old_dentry->d_inode); } return ret; } @@ -864,7 +864,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret); /* Might as well let the VFS know */ d_instantiate(new_dentry, old_dentry->d_inode); - atomic_inc(&old_dentry->d_inode->i_count); + ihold(old_dentry->d_inode); new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now); return ret; } diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index d945ea76b44..9466957ec84 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c @@ -1279,7 +1279,7 @@ int txCommit(tid_t tid, /* transaction identifier */ * lazy commit thread finishes processing */ if (tblk->xflag & COMMIT_DELETE) { - atomic_inc(&tblk->u.ip->i_count); + ihold(tblk->u.ip); /* * Avoid a rare deadlock * diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index a9cf8e8675b..231ca4af9bc 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -839,7 +839,7 @@ static int jfs_link(struct dentry *old_dentry, ip->i_ctime = CURRENT_TIME; dir->i_ctime = dir->i_mtime = CURRENT_TIME; mark_inode_dirty(dir); - atomic_inc(&ip->i_count); + ihold(ip); iplist[0] = ip; iplist[1] = dir; diff --git a/fs/libfs.c b/fs/libfs.c index 2dbf4877d7e..304a5132ca2 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -255,7 +255,7 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; inc_nlink(inode); - atomic_inc(&inode->i_count); + ihold(inode); dget(dentry); d_instantiate(dentry, inode); return 0; diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index 1eb4e89e045..409dfd65e9a 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c @@ -569,7 +569,7 @@ static int logfs_link(struct dentry *old_dentry, struct inode *dir, return -EMLINK; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; - atomic_inc(&inode->i_count); + ihold(inode); inode->i_nlink++; mark_inode_dirty_sync(inode); diff --git a/fs/minix/namei.c b/fs/minix/namei.c index f3f3578393a..c0d35a3acce 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -101,7 +101,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir, inode->i_ctime = CURRENT_TIME_SEC; inode_inc_link_count(inode); - atomic_inc(&inode->i_count); + ihold(inode); return add_nondir(dentry, inode); } diff --git a/fs/namei.c b/fs/namei.c index f1ef97dbc6c..f7dbc06857a 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2285,7 +2285,7 @@ static long do_unlinkat(int dfd, const char __user *pathname) goto slashes; inode = dentry->d_inode; if (inode) - atomic_inc(&inode->i_count); + ihold(inode); error = mnt_want_write(nd.path.mnt); if (error) goto exit2; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index e257172d438..0fac7fea18e 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1580,7 +1580,7 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) d_drop(dentry); error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name); if (error == 0) { - atomic_inc(&inode->i_count); + ihold(inode); d_add(dentry, inode); } return error; diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index a70e446e160..ac7b814ce16 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c @@ -54,8 +54,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i iput(inode); return -ENOMEM; } - /* Circumvent igrab(): we know the inode is not being freed */ - atomic_inc(&inode->i_count); + ihold(inode); /* * Ensure that this dentry is invisible to d_find_alias(). * Otherwise, it may be spliced into the tree by diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 185d1607cb0..6e9557ecf16 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -207,7 +207,7 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir, inode->i_ctime = CURRENT_TIME; inode_inc_link_count(inode); - atomic_inc(&inode->i_count); + ihold(inode); err = nilfs_add_nondir(dentry, inode); if (!err) diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index d4dec2d3211..d3fbe5730bf 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -2911,8 +2911,8 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) goto unl_upcase_iput_tmp_ino_err_out_now; } if ((sb->s_root = d_alloc_root(vol->root_ino))) { - /* We increment i_count simulating an ntfs_iget(). */ - atomic_inc(&vol->root_ino->i_count); + /* We grab a reference, simulating an ntfs_iget(). */ + ihold(vol->root_ino); ntfs_debug("Exiting, status successful."); /* Release the default upcase if it has no users. */ mutex_lock(&ntfs_lock); diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index e7bde21149a..ff5744e1e36 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -742,7 +742,7 @@ static int ocfs2_link(struct dentry *old_dentry, goto out_commit; } - atomic_inc(&inode->i_count); + ihold(inode); dentry->d_op = &ocfs2_dentry_ops; d_instantiate(dentry, inode); diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index ee78d4a0086..ba5f51ec345 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -1156,7 +1156,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir, inode->i_ctime = CURRENT_TIME_SEC; reiserfs_update_sd(&th, inode); - atomic_inc(&inode->i_count); + ihold(inode); d_instantiate(dentry, inode); retval = journal_end(&th, dir->i_sb, jbegin_count); reiserfs_write_unlock(dir->i_sb); diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index 33e047b59b8..11e7f7d11cd 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -126,7 +126,7 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir, inode->i_ctime = CURRENT_TIME_SEC; inode_inc_link_count(inode); - atomic_inc(&inode->i_count); + ihold(inode); return add_nondir(dentry, inode); } diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 87ebcce7221..14f64b689d7 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -550,7 +550,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir, lock_2_inodes(dir, inode); inc_nlink(inode); - atomic_inc(&inode->i_count); + ihold(inode); inode->i_ctime = ubifs_current_time(inode); dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; diff --git a/fs/udf/namei.c b/fs/udf/namei.c index bf5fc674193..6d8dc02baeb 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -1101,7 +1101,7 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, inc_nlink(inode); inode->i_ctime = current_fs_time(inode->i_sb); mark_inode_dirty(inode); - atomic_inc(&inode->i_count); + ihold(inode); d_instantiate(dentry, inode); unlock_kernel(); diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index b056f02b1fb..12f39b9e443 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -180,7 +180,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir, inode->i_ctime = CURRENT_TIME_SEC; inode_inc_link_count(inode); - atomic_inc(&inode->i_count); + ihold(inode); error = ufs_add_nondir(dentry, inode); unlock_kernel(); diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 71d83c93621..96107efc0c6 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -317,7 +317,7 @@ xfs_vn_link( if (unlikely(error)) return -error; - atomic_inc(&inode->i_count); + ihold(inode); d_instantiate(dentry, inode); return 0; } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index fac52290de9..fb2ca2e4cdc 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -500,7 +500,7 @@ void xfs_mark_inode_dirty_sync(xfs_inode_t *); #define IHOLD(ip) \ do { \ ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ - atomic_inc(&(VFS_I(ip)->i_count)); \ + ihold(VFS_I(ip)); \ trace_xfs_ihold(ip, _THIS_IP_); \ } while (0) diff --git a/include/linux/fs.h b/include/linux/fs.h index d43e8b6685a..bd6ae6c71fc 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2171,6 +2171,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); extern int inode_init_always(struct super_block *, struct inode *); extern void inode_init_once(struct inode *); +extern void ihold(struct inode * inode); extern void iput(struct inode *); extern struct inode * igrab(struct inode *); extern ino_t iunique(struct super_block *, ino_t); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index e1e7b9635f5..80b35ffca25 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -769,7 +769,7 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) inode = dentry->d_inode; if (inode) - atomic_inc(&inode->i_count); + ihold(inode); err = mnt_want_write(ipc_ns->mq_mnt); if (err) goto out_err; diff --git a/kernel/futex.c b/kernel/futex.c index a118bf160e0..6c683b37f2c 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -169,7 +169,7 @@ static void get_futex_key_refs(union futex_key *key) switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: - atomic_inc(&key->shared.inode->i_count); + ihold(key->shared.inode); break; case FUT_OFF_MMSHARED: atomic_inc(&key->private.mm->mm_count); diff --git a/mm/shmem.c b/mm/shmem.c index 27a58120dbd..d4e2852526e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1903,7 +1903,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr dir->i_size += BOGO_DIRENT_SIZE; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; inc_nlink(inode); - atomic_inc(&inode->i_count); /* New dentry reference */ + ihold(inode); /* New dentry reference */ dget(dentry); /* Extra pinning count for the created dentry */ d_instantiate(dentry, inode); out: diff --git a/net/socket.c b/net/socket.c index abf3e256152..d223725f99e 100644 --- a/net/socket.c +++ b/net/socket.c @@ -377,7 +377,7 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags) &socket_file_ops); if (unlikely(!file)) { /* drop dentry, keep inode */ - atomic_inc(&path.dentry->d_inode->i_count); + ihold(path.dentry->d_inode); path_put(&path); put_unused_fd(fd); return -ENFILE; -- cgit v1.2.3-70-g09d2 From 85fe4025c616a7c0ed07bc2fc8c5371b07f3888c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 23 Oct 2010 11:19:54 -0400 Subject: fs: do not assign default i_ino in new_inode Instead of always assigning an increasing inode number in new_inode move the call to assign it into those callers that actually need it. For now callers that need it is estimated conservatively, that is the call is added to all filesystems that do not assign an i_ino by themselves. For a few more filesystems we can avoid assigning any inode number given that they aren't user visible, and for others it could be done lazily when an inode number is actually needed, but that's left for later patches. Signed-off-by: Christoph Hellwig Signed-off-by: Dave Chinner Signed-off-by: Al Viro --- drivers/infiniband/hw/ipath/ipath_fs.c | 1 + drivers/infiniband/hw/qib/qib_fs.c | 1 + drivers/misc/ibmasm/ibmasmfs.c | 1 + drivers/oprofile/oprofilefs.c | 1 + drivers/usb/core/inode.c | 1 + drivers/usb/gadget/f_fs.c | 1 + drivers/usb/gadget/inode.c | 1 + fs/anon_inodes.c | 1 + fs/autofs4/inode.c | 1 + fs/binfmt_misc.c | 1 + fs/configfs/inode.c | 1 + fs/debugfs/inode.c | 1 + fs/ext4/mballoc.c | 1 + fs/freevxfs/vxfs_inode.c | 1 + fs/fuse/control.c | 1 + fs/hugetlbfs/inode.c | 1 + fs/inode.c | 4 ++-- fs/ocfs2/dlmfs/dlmfs.c | 2 ++ fs/pipe.c | 2 ++ fs/proc/base.c | 2 ++ fs/proc/proc_sysctl.c | 2 ++ fs/ramfs/inode.c | 1 + fs/xfs/linux-2.6/xfs_buf.c | 1 + include/linux/fs.h | 1 + ipc/mqueue.c | 1 + kernel/cgroup.c | 1 + mm/shmem.c | 1 + net/socket.c | 1 + net/sunrpc/rpc_pipe.c | 1 + security/inode.c | 1 + security/selinux/selinuxfs.c | 1 + 31 files changed, 36 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index d13e72685dc..12d5bf76302 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -57,6 +57,7 @@ static int ipathfs_mknod(struct inode *dir, struct dentry *dentry, goto bail; } + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_private = data; diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index a0e6613e8be..7e433d75c77 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -58,6 +58,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry, goto bail; } + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = 0; inode->i_gid = 0; diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c index af2497ae5fe..0a53500636c 100644 --- a/drivers/misc/ibmasm/ibmasmfs.c +++ b/drivers/misc/ibmasm/ibmasmfs.c @@ -146,6 +146,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode) struct inode *ret = new_inode(sb); if (ret) { + ret->i_ino = get_next_ino(); ret->i_mode = mode; ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; } diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index 95f711b251a..449de59bf35 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c @@ -28,6 +28,7 @@ static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode) struct inode *inode = new_inode(sb); if (inode) { + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; } diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 095fa536669..e2f63c0ea09 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c @@ -276,6 +276,7 @@ static struct inode *usbfs_get_inode (struct super_block *sb, int mode, dev_t de struct inode *inode = new_inode(sb); if (inode) { + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index e4f59505520..e093fd8d04d 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c @@ -980,6 +980,7 @@ ffs_sb_make_inode(struct super_block *sb, void *data, if (likely(inode)) { struct timespec current_time = CURRENT_TIME; + inode->i_ino = usbfs_get_inode(); inode->i_mode = perms->mode; inode->i_uid = perms->uid; inode->i_gid = perms->gid; diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index d1d72d946b0..ba145e7fbe0 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -1991,6 +1991,7 @@ gadgetfs_make_inode (struct super_block *sb, struct inode *inode = new_inode (sb); if (inode) { + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = default_uid; inode->i_gid = default_gid; diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 9c8e87b0361..5365527ca43 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -193,6 +193,7 @@ static struct inode *anon_inode_mkinode(void) if (!inode) return ERR_PTR(-ENOMEM); + inode->i_ino = get_next_ino(); inode->i_fop = &anon_inode_fops; inode->i_mapping->a_ops = &anon_aops; diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 821b2b955da..ac87e49fa70 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -398,6 +398,7 @@ struct inode *autofs4_get_inode(struct super_block *sb, inode->i_gid = sb->s_root->d_inode->i_gid; } inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inode->i_ino = get_next_ino(); if (S_ISDIR(inf->mode)) { inode->i_nlink = 2; diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 139fc8083f5..29990f0eee0 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -495,6 +495,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode) struct inode * inode = new_inode(sb); if (inode) { + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index cf78d44a8d6..253476d78ed 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -135,6 +135,7 @@ struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent * sd) { struct inode * inode = new_inode(configfs_sb); if (inode) { + inode->i_ino = get_next_ino(); inode->i_mapping->a_ops = &configfs_aops; inode->i_mapping->backing_dev_info = &configfs_backing_dev_info; inode->i_op = &configfs_inode_operations; diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 30a87b3dbca..a4ed8380e98 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -40,6 +40,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d struct inode *inode = new_inode(sb); if (inode) { + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch (mode & S_IFMT) { diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 19aa0d44d82..42f77b1dc72 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2373,6 +2373,7 @@ static int ext4_mb_init_backend(struct super_block *sb) printk(KERN_ERR "EXT4-fs: can't get new inode\n"); goto err_freesgi; } + sbi->s_buddy_cache->i_ino = get_next_ino(); EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; for (i = 0; i < ngroups; i++) { desc = ext4_get_group_desc(sb, i, NULL); diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c index 79d1b4ea13e..8c04eac5079 100644 --- a/fs/freevxfs/vxfs_inode.c +++ b/fs/freevxfs/vxfs_inode.c @@ -260,6 +260,7 @@ vxfs_get_fake_inode(struct super_block *sbp, struct vxfs_inode_info *vip) struct inode *ip = NULL; if ((ip = new_inode(sbp))) { + ip->i_ino = get_next_ino(); vxfs_iinit(ip, vip); ip->i_mapping->a_ops = &vxfs_aops; } diff --git a/fs/fuse/control.c b/fs/fuse/control.c index 7367e177186..4eba07661e5 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -222,6 +222,7 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent, if (!inode) return NULL; + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = fc->user_id; inode->i_gid = fc->group_id; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 113eba3d3c3..8d0607b3726 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -455,6 +455,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid, inode = new_inode(sb); if (inode) { struct hugetlbfs_inode_info *info; + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = uid; inode->i_gid = gid; diff --git a/fs/inode.c b/fs/inode.c index 46a3e120b19..2cd2e48f7a2 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -735,7 +735,7 @@ repeat: #define LAST_INO_BATCH 1024 static DEFINE_PER_CPU(unsigned int, last_ino); -static unsigned int get_next_ino(void) +unsigned int get_next_ino(void) { unsigned int *p = &get_cpu_var(last_ino); unsigned int res = *p; @@ -753,6 +753,7 @@ static unsigned int get_next_ino(void) put_cpu_var(last_ino); return res; } +EXPORT_SYMBOL(get_next_ino); /** * new_inode - obtain an inode @@ -776,7 +777,6 @@ struct inode *new_inode(struct super_block *sb) if (inode) { spin_lock(&inode_lock); __inode_sb_list_add(inode); - inode->i_ino = get_next_ino(); inode->i_state = 0; spin_unlock(&inode_lock); } diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index a7ebd9d42dc..75e115f1bd7 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -400,6 +400,7 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb) if (inode) { ip = DLMFS_I(inode); + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); @@ -425,6 +426,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent, if (!inode) return NULL; + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); diff --git a/fs/pipe.c b/fs/pipe.c index 37eb1ebeaa9..d2d7566ce68 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -954,6 +954,8 @@ static struct inode * get_pipe_inode(void) if (!inode) goto fail_inode; + inode->i_ino = get_next_ino(); + pipe = alloc_pipe_info(inode); if (!pipe) goto fail_iput; diff --git a/fs/proc/base.c b/fs/proc/base.c index fb2a5abd4e4..9883f1e1833 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1603,6 +1603,7 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st /* Common stuff */ ei = PROC_I(inode); + inode->i_ino = get_next_ino(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_op = &proc_def_inode_operations; @@ -2549,6 +2550,7 @@ static struct dentry *proc_base_instantiate(struct inode *dir, /* Initialize the inode */ ei = PROC_I(inode); + inode->i_ino = get_next_ino(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; /* diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 2fc52552271..b652cb00906 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -23,6 +23,8 @@ static struct inode *proc_sys_make_inode(struct super_block *sb, if (!inode) goto out; + inode->i_ino = get_next_ino(); + sysctl_head_get(head); ei = PROC_I(inode); ei->sysctl = head; diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index a5ebae70dc6..67fadb1ad2c 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -58,6 +58,7 @@ struct inode *ramfs_get_inode(struct super_block *sb, struct inode * inode = new_inode(sb); if (inode) { + inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); inode->i_mapping->a_ops = &ramfs_aops; inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info; diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index ba5312802aa..63fd2c07cb5 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1580,6 +1580,7 @@ xfs_mapping_buftarg( XFS_BUFTARG_NAME(btp)); return ENOMEM; } + inode->i_ino = get_next_ino(); inode->i_mode = S_IFBLK; inode->i_bdev = bdev; inode->i_rdev = bdev->bd_dev; diff --git a/include/linux/fs.h b/include/linux/fs.h index bd6ae6c71fc..4a573cf13f5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2191,6 +2191,7 @@ extern struct inode * iget_locked(struct super_block *, unsigned long); extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); extern int insert_inode_locked(struct inode *); extern void unlock_new_inode(struct inode *); +extern unsigned int get_next_ino(void); extern void __iget(struct inode * inode); extern void iget_failed(struct inode *); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 80b35ffca25..3a61ffefe88 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -116,6 +116,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb, inode = new_inode(sb); if (inode) { + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 7b69b8d0313..9270d532ec3 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -777,6 +777,7 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) struct inode *inode = new_inode(sb); if (inode) { + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); diff --git a/mm/shmem.c b/mm/shmem.c index d4e2852526e..f6d350e8adc 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1586,6 +1586,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode inode = new_inode(sb); if (inode) { + inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); inode->i_blocks = 0; inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; diff --git a/net/socket.c b/net/socket.c index d223725f99e..5cac1c70775 100644 --- a/net/socket.c +++ b/net/socket.c @@ -480,6 +480,7 @@ static struct socket *sock_alloc(void) sock = SOCKET_I(inode); kmemcheck_annotate_bitfield(sock, type); + inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 52f25243214..7df92d237cb 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -445,6 +445,7 @@ rpc_get_inode(struct super_block *sb, umode_t mode) struct inode *inode = new_inode(sb); if (!inode) return NULL; + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch(mode & S_IFMT) { diff --git a/security/inode.c b/security/inode.c index 88839866cbc..cb8f47c66a5 100644 --- a/security/inode.c +++ b/security/inode.c @@ -61,6 +61,7 @@ static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev) struct inode *inode = new_inode(sb); if (inode) { + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch (mode & S_IFMT) { diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 87e0556bae7..55a755c1a1b 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -978,6 +978,7 @@ static struct inode *sel_make_inode(struct super_block *sb, int mode) struct inode *ret = new_inode(sb); if (ret) { + ret->i_ino = get_next_ino(); ret->i_mode = mode; ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; } -- cgit v1.2.3-70-g09d2 From 312d3ca856d369bb04d0443846b85b4cdde6fa8a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 10 Oct 2010 05:36:23 -0400 Subject: fs: use percpu counter for nr_dentry and nr_dentry_unused The nr_dentry stat is a globally touched cacheline and atomic operation twice over the lifetime of a dentry. It is used for the benfit of userspace only. Turn it into a per-cpu counter and always decrement it in d_free instead of doing various batching operations to reduce lock hold times in the callers. Based on an earlier patch from Nick Piggin . Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/dcache.c | 51 ++++++++++++++++++++++++++++++++------------------- include/linux/fs.h | 2 ++ kernel/sysctl.c | 2 +- 3 files changed, 35 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/fs/dcache.c b/fs/dcache.c index 028753951e9..c37a656802b 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -67,6 +67,19 @@ struct dentry_stat_t dentry_stat = { .age_limit = 45, }; +static struct percpu_counter nr_dentry __cacheline_aligned_in_smp; +static struct percpu_counter nr_dentry_unused __cacheline_aligned_in_smp; + +#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) +int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry); + dentry_stat.nr_unused = percpu_counter_sum_positive(&nr_dentry_unused); + return proc_dointvec(table, write, buffer, lenp, ppos); +} +#endif + static void __d_free(struct rcu_head *head) { struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); @@ -78,13 +91,14 @@ static void __d_free(struct rcu_head *head) } /* - * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry - * inside dcache_lock. + * no dcache_lock, please. */ static void d_free(struct dentry *dentry) { + percpu_counter_dec(&nr_dentry); if (dentry->d_op && dentry->d_op->d_release) dentry->d_op->d_release(dentry); + /* if dentry was never inserted into hash, immediate free is OK */ if (hlist_unhashed(&dentry->d_hash)) __d_free(&dentry->d_u.d_rcu); @@ -125,14 +139,14 @@ static void dentry_lru_add(struct dentry *dentry) { list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); dentry->d_sb->s_nr_dentry_unused++; - dentry_stat.nr_unused++; + percpu_counter_inc(&nr_dentry_unused); } static void dentry_lru_add_tail(struct dentry *dentry) { list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); dentry->d_sb->s_nr_dentry_unused++; - dentry_stat.nr_unused++; + percpu_counter_inc(&nr_dentry_unused); } static void dentry_lru_del(struct dentry *dentry) @@ -140,7 +154,7 @@ static void dentry_lru_del(struct dentry *dentry) if (!list_empty(&dentry->d_lru)) { list_del(&dentry->d_lru); dentry->d_sb->s_nr_dentry_unused--; - dentry_stat.nr_unused--; + percpu_counter_dec(&nr_dentry_unused); } } @@ -149,7 +163,7 @@ static void dentry_lru_del_init(struct dentry *dentry) if (likely(!list_empty(&dentry->d_lru))) { list_del_init(&dentry->d_lru); dentry->d_sb->s_nr_dentry_unused--; - dentry_stat.nr_unused--; + percpu_counter_dec(&nr_dentry_unused); } } @@ -168,7 +182,6 @@ static struct dentry *d_kill(struct dentry *dentry) struct dentry *parent; list_del(&dentry->d_u.d_child); - dentry_stat.nr_dentry--; /* For d_free, below */ /*drops the locks, at that point nobody can reach this dentry */ dentry_iput(dentry); if (IS_ROOT(dentry)) @@ -314,7 +327,6 @@ int d_invalidate(struct dentry * dentry) EXPORT_SYMBOL(d_invalidate); /* This should be called _only_ with dcache_lock held */ - static inline struct dentry * __dget_locked(struct dentry *dentry) { atomic_inc(&dentry->d_count); @@ -534,7 +546,7 @@ static void prune_dcache(int count) { struct super_block *sb, *p = NULL; int w_count; - int unused = dentry_stat.nr_unused; + int unused = percpu_counter_sum_positive(&nr_dentry_unused); int prune_ratio; int pruned; @@ -699,20 +711,13 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) * otherwise we ascend to the parent and move to the * next sibling if there is one */ if (!parent) - goto out; - + return; dentry = parent; - } while (list_empty(&dentry->d_subdirs)); dentry = list_entry(dentry->d_subdirs.next, struct dentry, d_u.d_child); } -out: - /* several dentries were freed, need to correct nr_dentry */ - spin_lock(&dcache_lock); - dentry_stat.nr_dentry -= detached; - spin_unlock(&dcache_lock); } /* @@ -896,12 +901,16 @@ EXPORT_SYMBOL(shrink_dcache_parent); */ static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { + int nr_unused; + if (nr) { if (!(gfp_mask & __GFP_FS)) return -1; prune_dcache(nr); } - return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; + + nr_unused = percpu_counter_sum_positive(&nr_dentry_unused); + return (nr_unused / 100) * sysctl_vfs_cache_pressure; } static struct shrinker dcache_shrinker = { @@ -968,9 +977,10 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) spin_lock(&dcache_lock); if (parent) list_add(&dentry->d_u.d_child, &parent->d_subdirs); - dentry_stat.nr_dentry++; spin_unlock(&dcache_lock); + percpu_counter_inc(&nr_dentry); + return dentry; } EXPORT_SYMBOL(d_alloc); @@ -2417,6 +2427,9 @@ static void __init dcache_init(void) { int loop; + percpu_counter_init(&nr_dentry, 0); + percpu_counter_init(&nr_dentry_unused, 0); + /* * A constructor could be added for stable state like the lists, * but it is probably not worth it because of the cache nature diff --git a/include/linux/fs.h b/include/linux/fs.h index 4a573cf13f5..d5805994480 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2490,6 +2490,8 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, struct ctl_table; int proc_nr_files(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); +int proc_nr_dentry(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); int proc_nr_inodes(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); int __init get_filesystem_list(char *buf); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 99a510cbfbb..8b77ff5c502 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1377,7 +1377,7 @@ static struct ctl_table fs_table[] = { .data = &dentry_stat, .maxlen = 6*sizeof(int), .mode = 0444, - .proc_handler = proc_dointvec, + .proc_handler = proc_nr_dentry, }, { .procname = "overflowuid", -- cgit v1.2.3-70-g09d2 From a9cea017411c95ec789092971f9baaef1f826883 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 26 Oct 2010 15:41:13 -0600 Subject: resources: add a default alignf to simplify find_resource() This removes a test from find_resource(), which is getting cluttered. No functional change. Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- kernel/resource.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 7b36976e5de..7dc8ad24f91 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -357,6 +357,14 @@ int __weak page_is_ram(unsigned long pfn) return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; } +static resource_size_t simple_align_resource(void *data, + const struct resource *avail, + resource_size_t size, + resource_size_t align) +{ + return avail->start; +} + /* * Find empty slot in the resource tree given range and alignment. */ @@ -391,8 +399,8 @@ static int find_resource(struct resource *root, struct resource *new, if (tmp.end > max) tmp.end = max; tmp.start = ALIGN(tmp.start, align); - if (alignf) - tmp.start = alignf(alignf_data, &tmp, size, align); + + tmp.start = alignf(alignf_data, &tmp, size, align); if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) { new->start = tmp.start; new->end = tmp.start + size - 1; @@ -428,6 +436,9 @@ int allocate_resource(struct resource *root, struct resource *new, { int err; + if (!alignf) + alignf = simple_align_resource; + write_lock(&resource_lock); err = find_resource(root, new, size, min, max, align, alignf, alignf_data); if (err >= 0 && __request_resource(root, new)) -- cgit v1.2.3-70-g09d2 From 5d6b1fa301b13cc651ee717a9b518124dea2f814 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 26 Oct 2010 15:41:18 -0600 Subject: resources: factor out resource_clip() to simplify find_resource() This factors out the min/max clipping to simplify find_resource(). No functional change. Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- kernel/resource.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 7dc8ad24f91..26e9f254692 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -365,6 +365,15 @@ static resource_size_t simple_align_resource(void *data, return avail->start; } +static void resource_clip(struct resource *res, resource_size_t min, + resource_size_t max) +{ + if (res->start < min) + res->start = min; + if (res->end > max) + res->end = max; +} + /* * Find empty slot in the resource tree given range and alignment. */ @@ -394,10 +403,8 @@ static int find_resource(struct resource *root, struct resource *new, tmp.end = this->start - 1; else tmp.end = root->end; - if (tmp.start < min) - tmp.start = min; - if (tmp.end > max) - tmp.end = max; + + resource_clip(&tmp, min, max); tmp.start = ALIGN(tmp.start, align); tmp.start = alignf(alignf_data, &tmp, size, align); -- cgit v1.2.3-70-g09d2 From 6909ba14c25b4db6be2ff89f4fa0fac2d70151a0 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 26 Oct 2010 15:41:23 -0600 Subject: resources: ensure callback doesn't allocate outside available space The alignment callback returns a proposed location, which may have been adjusted to avoid ISA aliases or for other architecture-specific reasons. We already had a check ("tmp.start < tmp.end") to make sure the callback doesn't return an area that extends past the available area. This patch reworks the check to make sure it doesn't return an area that extends either below or above the available area. Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- kernel/resource.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 26e9f254692..89d50412508 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -374,6 +374,11 @@ static void resource_clip(struct resource *res, resource_size_t min, res->end = max; } +static bool resource_contains(struct resource *res1, struct resource *res2) +{ + return res1->start <= res2->start && res1->end >= res2->end; +} + /* * Find empty slot in the resource tree given range and alignment. */ @@ -387,7 +392,7 @@ static int find_resource(struct resource *root, struct resource *new, void *alignf_data) { struct resource *this = root->child; - struct resource tmp = *new; + struct resource tmp = *new, alloc; tmp.start = root->start; /* @@ -407,10 +412,11 @@ static int find_resource(struct resource *root, struct resource *new, resource_clip(&tmp, min, max); tmp.start = ALIGN(tmp.start, align); - tmp.start = alignf(alignf_data, &tmp, size, align); - if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) { - new->start = tmp.start; - new->end = tmp.start + size - 1; + alloc.start = alignf(alignf_data, &tmp, size, align); + alloc.end = alloc.start + size - 1; + if (resource_contains(&tmp, &alloc)) { + new->start = alloc.start; + new->end = alloc.end; return 0; } if (!this) -- cgit v1.2.3-70-g09d2 From a1862e31079149a52b6223776228c3aee493d4a7 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 26 Oct 2010 15:41:28 -0600 Subject: resources: handle overflow when aligning start of available area If tmp.start is near ~0, ALIGN(tmp.start) may overflow, which would make us think there's more available space than there really is. We would likely return something that conflicts with a previous resource, which would cause a failure when allocate_resource() requests the newly- allocated region. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=646027 Reported-by: Fabrice Bellet Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- kernel/resource.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 89d50412508..e15b922d4ba 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -392,7 +392,7 @@ static int find_resource(struct resource *root, struct resource *new, void *alignf_data) { struct resource *this = root->child; - struct resource tmp = *new, alloc; + struct resource tmp = *new, avail, alloc; tmp.start = root->start; /* @@ -410,14 +410,19 @@ static int find_resource(struct resource *root, struct resource *new, tmp.end = root->end; resource_clip(&tmp, min, max); - tmp.start = ALIGN(tmp.start, align); - alloc.start = alignf(alignf_data, &tmp, size, align); - alloc.end = alloc.start + size - 1; - if (resource_contains(&tmp, &alloc)) { - new->start = alloc.start; - new->end = alloc.end; - return 0; + /* Check for overflow after ALIGN() */ + avail = *new; + avail.start = ALIGN(tmp.start, align); + avail.end = tmp.end; + if (avail.start >= tmp.start) { + alloc.start = alignf(alignf_data, &avail, size, align); + alloc.end = alloc.start + size - 1; + if (resource_contains(&avail, &alloc)) { + new->start = alloc.start; + new->end = alloc.end; + return 0; + } } if (!this) break; -- cgit v1.2.3-70-g09d2 From e7f8567db9a7f6b3151b0b275e245c1cef0d9c70 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 26 Oct 2010 15:41:33 -0600 Subject: resources: support allocating space within a region from the top down Allocate space from the top of a region first, then work downward, if an architecture desires this. When we allocate space from a resource, we look for gaps between children of the resource. Previously, we always looked at gaps from the bottom up. For example, given this: [mem 0xbff00000-0xf7ffffff] PCI Bus 0000:00 [mem 0xbff00000-0xbfffffff] gap -- available [mem 0xc0000000-0xdfffffff] PCI Bus 0000:02 [mem 0xe0000000-0xf7ffffff] gap -- available we attempted to allocate from the [mem 0xbff00000-0xbfffffff] gap first, then the [mem 0xe0000000-0xf7ffffff] gap. With this patch an architecture can choose to allocate from the top gap [mem 0xe0000000-0xf7ffffff] first. We can't do this across the board because iomem_resource.end is initialized to 0xffffffff_ffffffff on 64-bit architectures, and most machines can't address the entire 64-bit physical address space. Therefore, we only allocate top-down if the arch requests it by clearing "resource_alloc_from_bottom". Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- Documentation/kernel-parameters.txt | 5 ++ include/linux/ioport.h | 1 + kernel/resource.c | 98 +++++++++++++++++++++++++++++++++++-- 3 files changed, 100 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 8dd7248508a..fe50cbd315b 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2156,6 +2156,11 @@ and is between 256 and 4096 characters. It is defined in the file reset_devices [KNL] Force drivers to reset the underlying device during initialization. + resource_alloc_from_bottom + Allocate new resources from the beginning of available + space, not the end. If you need to use this, please + report a bug. + resume= [SWSUSP] Specify the partition device for software suspend diff --git a/include/linux/ioport.h b/include/linux/ioport.h index b22790268b6..d377ea815d4 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -112,6 +112,7 @@ struct resource_list { /* PC/ISA/whatever - the normal PC address spaces: IO and memory */ extern struct resource ioport_resource; extern struct resource iomem_resource; +extern int resource_alloc_from_bottom; extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); extern int request_resource(struct resource *root, struct resource *new); diff --git a/kernel/resource.c b/kernel/resource.c index e15b922d4ba..716b6804077 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -40,6 +40,23 @@ EXPORT_SYMBOL(iomem_resource); static DEFINE_RWLOCK(resource_lock); +/* + * By default, we allocate free space bottom-up. The architecture can request + * top-down by clearing this flag. The user can override the architecture's + * choice with the "resource_alloc_from_bottom" kernel boot option, but that + * should only be a debugging tool. + */ +int resource_alloc_from_bottom = 1; + +static __init int setup_alloc_from_bottom(char *s) +{ + printk(KERN_INFO + "resource: allocating from bottom-up; please report a bug\n"); + resource_alloc_from_bottom = 1; + return 0; +} +early_param("resource_alloc_from_bottom", setup_alloc_from_bottom); + static void *r_next(struct seq_file *m, void *v, loff_t *pos) { struct resource *p = v; @@ -379,8 +396,75 @@ static bool resource_contains(struct resource *res1, struct resource *res2) return res1->start <= res2->start && res1->end >= res2->end; } +/* + * Find the resource before "child" in the sibling list of "root" children. + */ +static struct resource *find_sibling_prev(struct resource *root, struct resource *child) +{ + struct resource *this; + + for (this = root->child; this; this = this->sibling) + if (this->sibling == child) + return this; + + return NULL; +} + +/* + * Find empty slot in the resource tree given range and alignment. + * This version allocates from the end of the root resource first. + */ +static int find_resource_from_top(struct resource *root, struct resource *new, + resource_size_t size, resource_size_t min, + resource_size_t max, resource_size_t align, + resource_size_t (*alignf)(void *, + const struct resource *, + resource_size_t, + resource_size_t), + void *alignf_data) +{ + struct resource *this; + struct resource tmp, avail, alloc; + + tmp.start = root->end; + tmp.end = root->end; + + this = find_sibling_prev(root, NULL); + for (;;) { + if (this) { + if (this->end < root->end) + tmp.start = this->end + 1; + } else + tmp.start = root->start; + + resource_clip(&tmp, min, max); + + /* Check for overflow after ALIGN() */ + avail = *new; + avail.start = ALIGN(tmp.start, align); + avail.end = tmp.end; + if (avail.start >= tmp.start) { + alloc.start = alignf(alignf_data, &avail, size, align); + alloc.end = alloc.start + size - 1; + if (resource_contains(&avail, &alloc)) { + new->start = alloc.start; + new->end = alloc.end; + return 0; + } + } + + if (!this || this->start == root->start) + break; + + tmp.end = this->start - 1; + this = find_sibling_prev(root, this); + } + return -EBUSY; +} + /* * Find empty slot in the resource tree given range and alignment. + * This version allocates from the beginning of the root resource first. */ static int find_resource(struct resource *root, struct resource *new, resource_size_t size, resource_size_t min, @@ -396,14 +480,15 @@ static int find_resource(struct resource *root, struct resource *new, tmp.start = root->start; /* - * Skip past an allocated resource that starts at 0, since the assignment - * of this->start - 1 to tmp->end below would cause an underflow. + * Skip past an allocated resource that starts at 0, since the + * assignment of this->start - 1 to tmp->end below would cause an + * underflow. */ if (this && this->start == 0) { tmp.start = this->end + 1; this = this->sibling; } - for(;;) { + for (;;) { if (this) tmp.end = this->start - 1; else @@ -424,8 +509,10 @@ static int find_resource(struct resource *root, struct resource *new, return 0; } } + if (!this) break; + tmp.start = this->end + 1; this = this->sibling; } @@ -458,7 +545,10 @@ int allocate_resource(struct resource *root, struct resource *new, alignf = simple_align_resource; write_lock(&resource_lock); - err = find_resource(root, new, size, min, max, align, alignf, alignf_data); + if (resource_alloc_from_bottom) + err = find_resource(root, new, size, min, max, align, alignf, alignf_data); + else + err = find_resource_from_top(root, new, size, min, max, align, alignf, alignf_data); if (err >= 0 && __request_resource(root, new)) err = -EBUSY; write_unlock(&resource_lock); -- cgit v1.2.3-70-g09d2 From 3d5992d2ac7dc09aed8ab537cba074589f0f0a52 Mon Sep 17 00:00:00 2001 From: Ying Han Date: Tue, 26 Oct 2010 14:21:23 -0700 Subject: oom: add per-mm oom disable count It's pointless to kill a task if another thread sharing its mm cannot be killed to allow future memory freeing. A subsequent patch will prevent kills in such cases, but first it's necessary to have a way to flag a task that shares memory with an OOM_DISABLE task that doesn't incur an additional tasklist scan, which would make select_bad_process() an O(n^2) function. This patch adds an atomic counter to struct mm_struct that follows how many threads attached to it have an oom_score_adj of OOM_SCORE_ADJ_MIN. They cannot be killed by the kernel, so their memory cannot be freed in oom conditions. This only requires task_lock() on the task that we're operating on, it does not require mm->mmap_sem since task_lock() pins the mm and the operation is atomic. [rientjes@google.com: changelog and sys_unshare() code] [rientjes@google.com: protect oom_disable_count with task_lock in fork] [rientjes@google.com: use old_mm for oom_disable_count in exec] Signed-off-by: Ying Han Signed-off-by: David Rientjes Cc: KAMEZAWA Hiroyuki Cc: KOSAKI Motohiro Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/exec.c | 5 +++++ fs/proc/base.c | 30 ++++++++++++++++++++++++++++++ include/linux/mm_types.h | 2 ++ kernel/exit.c | 3 +++ kernel/fork.c | 15 ++++++++++++++- 5 files changed, 54 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/fs/exec.c b/fs/exec.c index 6d2b6f93685..3aa75b8888a 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -54,6 +54,7 @@ #include #include #include +#include #include #include @@ -759,6 +760,10 @@ static int exec_mmap(struct mm_struct *mm) tsk->mm = mm; tsk->active_mm = mm; activate_mm(active_mm, mm); + if (old_mm && tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) { + atomic_dec(&old_mm->oom_disable_count); + atomic_inc(&tsk->mm->oom_disable_count); + } task_unlock(tsk); arch_pick_mmap_layout(mm); if (old_mm) { diff --git a/fs/proc/base.c b/fs/proc/base.c index dc5d5f51f3f..6e50c8e6551 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1047,6 +1047,21 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, return -EACCES; } + task_lock(task); + if (!task->mm) { + task_unlock(task); + unlock_task_sighand(task, &flags); + put_task_struct(task); + return -EINVAL; + } + + if (oom_adjust != task->signal->oom_adj) { + if (oom_adjust == OOM_DISABLE) + atomic_inc(&task->mm->oom_disable_count); + if (task->signal->oom_adj == OOM_DISABLE) + atomic_dec(&task->mm->oom_disable_count); + } + /* * Warn that /proc/pid/oom_adj is deprecated, see * Documentation/feature-removal-schedule.txt. @@ -1065,6 +1080,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, else task->signal->oom_score_adj = (oom_adjust * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; + task_unlock(task); unlock_task_sighand(task, &flags); put_task_struct(task); @@ -1133,6 +1149,19 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, return -EACCES; } + task_lock(task); + if (!task->mm) { + task_unlock(task); + unlock_task_sighand(task, &flags); + put_task_struct(task); + return -EINVAL; + } + if (oom_score_adj != task->signal->oom_score_adj) { + if (oom_score_adj == OOM_SCORE_ADJ_MIN) + atomic_inc(&task->mm->oom_disable_count); + if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) + atomic_dec(&task->mm->oom_disable_count); + } task->signal->oom_score_adj = oom_score_adj; /* * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is @@ -1143,6 +1172,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, else task->signal->oom_adj = (oom_score_adj * OOM_ADJUST_MAX) / OOM_SCORE_ADJ_MAX; + task_unlock(task); unlock_task_sighand(task, &flags); put_task_struct(task); return count; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index cb57d657ce4..bb7288a782f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -310,6 +310,8 @@ struct mm_struct { #ifdef CONFIG_MMU_NOTIFIER struct mmu_notifier_mm *mmu_notifier_mm; #endif + /* How many tasks sharing this mm are OOM_DISABLE */ + atomic_t oom_disable_count; }; /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ diff --git a/kernel/exit.c b/kernel/exit.c index e2bdf37f9fd..894179a32ec 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -687,6 +688,8 @@ static void exit_mm(struct task_struct * tsk) enter_lazy_tlb(mm, current); /* We don't want this task to be frozen prematurely */ clear_freeze_flag(tsk); + if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) + atomic_dec(&mm->oom_disable_count); task_unlock(tsk); mm_update_next_owner(mm); mmput(mm); diff --git a/kernel/fork.c b/kernel/fork.c index c445f8cc408..e87aaaaf513 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -65,6 +65,7 @@ #include #include #include +#include #include #include @@ -488,6 +489,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) mm->cached_hole_size = ~0UL; mm_init_aio(mm); mm_init_owner(mm, p); + atomic_set(&mm->oom_disable_count, 0); if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; @@ -741,6 +743,8 @@ good_mm: /* Initializing for Swap token stuff */ mm->token_priority = 0; mm->last_interval = 0; + if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) + atomic_inc(&mm->oom_disable_count); tsk->mm = mm; tsk->active_mm = mm; @@ -1299,8 +1303,13 @@ bad_fork_cleanup_io: bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_mm: - if (p->mm) + if (p->mm) { + task_lock(p); + if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) + atomic_dec(&p->mm->oom_disable_count); + task_unlock(p); mmput(p->mm); + } bad_fork_cleanup_signal: if (!(clone_flags & CLONE_THREAD)) free_signal_struct(p->signal); @@ -1693,6 +1702,10 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) active_mm = current->active_mm; current->mm = new_mm; current->active_mm = new_mm; + if (current->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) { + atomic_dec(&mm->oom_disable_count); + atomic_inc(&new_mm->oom_disable_count); + } activate_mm(active_mm, new_mm); new_mm = mm; } -- cgit v1.2.3-70-g09d2 From 61ecdb801ef2cd28e32442383106d7837d76deac Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 26 Oct 2010 14:21:47 -0700 Subject: mm: strictly nested kmap_atomic() Ensure kmap_atomic() usage is strictly nested Signed-off-by: Peter Zijlstra Reviewed-by: Rik van Riel Acked-by: Chris Metcalf Cc: David Howells Cc: Hugh Dickins Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Steven Rostedt Cc: Russell King Cc: Ralf Baechle Cc: David Miller Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- crypto/async_tx/async_memcpy.c | 2 +- crypto/blkcipher.c | 2 +- drivers/block/loop.c | 4 ++-- include/linux/highmem.h | 4 ++-- kernel/power/snapshot.c | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 0ec1fb69d4e..518c22bd956 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -83,8 +83,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, memcpy(dest_buf, src_buf, len); - kunmap_atomic(dest_buf, KM_USER0); kunmap_atomic(src_buf, KM_USER1); + kunmap_atomic(dest_buf, KM_USER0); async_tx_sync_epilog(submit); } diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 90d26c91f4e..7a7219266e3 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -89,9 +89,9 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, memcpy(walk->dst.virt.addr, walk->page, n); blkcipher_unmap_dst(walk); } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { - blkcipher_unmap_src(walk); if (walk->flags & BLKCIPHER_WALK_DIFF) blkcipher_unmap_dst(walk); + blkcipher_unmap_src(walk); } scatterwalk_advance(&walk->in, n); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 6c48b3545f8..450c958b514 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -101,8 +101,8 @@ static int transfer_none(struct loop_device *lo, int cmd, else memcpy(raw_buf, loop_buf, size); - kunmap_atomic(raw_buf, KM_USER0); kunmap_atomic(loop_buf, KM_USER1); + kunmap_atomic(raw_buf, KM_USER0); cond_resched(); return 0; } @@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd, for (i = 0; i < size; i++) *out++ = *in++ ^ key[(i & 511) % keysize]; - kunmap_atomic(raw_buf, KM_USER0); kunmap_atomic(loop_buf, KM_USER1); + kunmap_atomic(raw_buf, KM_USER0); cond_resched(); return 0; } diff --git a/include/linux/highmem.h b/include/linux/highmem.h index e3060ef85b6..283cd47bb34 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -201,8 +201,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from, vfrom = kmap_atomic(from, KM_USER0); vto = kmap_atomic(to, KM_USER1); copy_user_page(vto, vfrom, vaddr, to); - kunmap_atomic(vfrom, KM_USER0); kunmap_atomic(vto, KM_USER1); + kunmap_atomic(vfrom, KM_USER0); } #endif @@ -214,8 +214,8 @@ static inline void copy_highpage(struct page *to, struct page *from) vfrom = kmap_atomic(from, KM_USER0); vto = kmap_atomic(to, KM_USER1); copy_page(vto, vfrom); - kunmap_atomic(vfrom, KM_USER0); kunmap_atomic(vto, KM_USER1); + kunmap_atomic(vfrom, KM_USER0); } #endif /* _LINUX_HIGHMEM_H */ diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index ac7eb109f19..9e3581f4619 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -984,8 +984,8 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) src = kmap_atomic(s_page, KM_USER0); dst = kmap_atomic(d_page, KM_USER1); do_copy_page(dst, src); - kunmap_atomic(src, KM_USER0); kunmap_atomic(dst, KM_USER1); + kunmap_atomic(src, KM_USER0); } else { if (PageHighMem(d_page)) { /* Page pointed to by src may contain some kernel @@ -2273,8 +2273,8 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf) memcpy(buf, kaddr1, PAGE_SIZE); memcpy(kaddr1, kaddr2, PAGE_SIZE); memcpy(kaddr2, buf, PAGE_SIZE); - kunmap_atomic(kaddr1, KM_USER0); kunmap_atomic(kaddr2, KM_USER1); + kunmap_atomic(kaddr1, KM_USER0); } /** -- cgit v1.2.3-70-g09d2 From 3ecb01df3261d3b1f02ccfcf8384e2a255d2a1d0 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 26 Oct 2010 14:22:27 -0700 Subject: use clear_page()/copy_page() in favor of memset()/memcpy() on whole pages After all that's what they are intended for. Signed-off-by: Jan Beulich Cc: Miklos Szeredi Cc: "Eric W. Biederman" Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fuse/dev.c | 2 +- kernel/kexec.c | 2 +- kernel/power/snapshot.c | 14 +++++++------- kernel/power/swap.c | 6 +++--- mm/memory.c | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index cde755cca56..69037118b2c 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -811,7 +811,7 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, if (page && zeroing && count < PAGE_SIZE) { void *mapaddr = kmap_atomic(page, KM_USER1); - memset(mapaddr, 0, PAGE_SIZE); + clear_page(mapaddr); kunmap_atomic(mapaddr, KM_USER1); } while (count) { diff --git a/kernel/kexec.c b/kernel/kexec.c index c0613f7d673..b55045bc756 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -816,7 +816,7 @@ static int kimage_load_normal_segment(struct kimage *image, ptr = kmap(page); /* Start with a clear page */ - memset(ptr, 0, PAGE_SIZE); + clear_page(ptr); ptr += maddr & ~PAGE_MASK; mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); if (mchunk > mbytes) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 9e3581f4619..0dac75ea445 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -993,7 +993,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) */ safe_copy_page(buffer, s_page); dst = kmap_atomic(d_page, KM_USER0); - memcpy(dst, buffer, PAGE_SIZE); + copy_page(dst, buffer); kunmap_atomic(dst, KM_USER0); } else { safe_copy_page(page_address(d_page), s_page); @@ -1687,7 +1687,7 @@ int snapshot_read_next(struct snapshot_handle *handle) memory_bm_position_reset(&orig_bm); memory_bm_position_reset(©_bm); } else if (handle->cur <= nr_meta_pages) { - memset(buffer, 0, PAGE_SIZE); + clear_page(buffer); pack_pfns(buffer, &orig_bm); } else { struct page *page; @@ -1701,7 +1701,7 @@ int snapshot_read_next(struct snapshot_handle *handle) void *kaddr; kaddr = kmap_atomic(page, KM_USER0); - memcpy(buffer, kaddr, PAGE_SIZE); + copy_page(buffer, kaddr); kunmap_atomic(kaddr, KM_USER0); handle->buffer = buffer; } else { @@ -1984,7 +1984,7 @@ static void copy_last_highmem_page(void) void *dst; dst = kmap_atomic(last_highmem_page, KM_USER0); - memcpy(dst, buffer, PAGE_SIZE); + copy_page(dst, buffer); kunmap_atomic(dst, KM_USER0); last_highmem_page = NULL; } @@ -2270,9 +2270,9 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf) kaddr1 = kmap_atomic(p1, KM_USER0); kaddr2 = kmap_atomic(p2, KM_USER1); - memcpy(buf, kaddr1, PAGE_SIZE); - memcpy(kaddr1, kaddr2, PAGE_SIZE); - memcpy(kaddr2, buf, PAGE_SIZE); + copy_page(buf, kaddr1); + copy_page(kaddr1, kaddr2); + copy_page(kaddr2, buf); kunmap_atomic(kaddr2, KM_USER1); kunmap_atomic(kaddr1, KM_USER0); } diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 916eaa79039..a0e4a86ccf9 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -251,7 +251,7 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain) if (bio_chain) { src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); if (src) { - memcpy(src, buf, PAGE_SIZE); + copy_page(src, buf); } else { WARN_ON_ONCE(1); bio_chain = NULL; /* Go synchronous */ @@ -325,7 +325,7 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf, error = write_page(handle->cur, handle->cur_swap, NULL); if (error) goto out; - memset(handle->cur, 0, PAGE_SIZE); + clear_page(handle->cur); handle->cur_swap = offset; handle->k = 0; } @@ -910,7 +910,7 @@ int swsusp_check(void) hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); if (!IS_ERR(hib_resume_bdev)) { set_blocksize(hib_resume_bdev, PAGE_SIZE); - memset(swsusp_header, 0, PAGE_SIZE); + clear_page(swsusp_header); error = hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL); if (error) diff --git a/mm/memory.c b/mm/memory.c index 861f7982dd5..02e48aa0ed1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2080,7 +2080,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo * zeroes. */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) - memset(kaddr, 0, PAGE_SIZE); + clear_page(kaddr); kunmap_atomic(kaddr, KM_USER0); flush_dcache_page(dst); } else -- cgit v1.2.3-70-g09d2 From ca1cab37d91cbe8a8333732540d43cabb54cfa85 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 26 Oct 2010 14:22:34 -0700 Subject: workqueues: s/ON_STACK/ONSTACK/ Silly though it is, completions and wait_queue_heads use foo_ONSTACK (COMPLETION_INITIALIZER_ONSTACK, DECLARE_COMPLETION_ONSTACK, __WAIT_QUEUE_HEAD_INIT_ONSTACK and DECLARE_WAIT_QUEUE_HEAD_ONSTACK) so I guess workqueues should do the same thing. s/INIT_WORK_ON_STACK/INIT_WORK_ONSTACK/ s/INIT_DELAYED_WORK_ON_STACK/INIT_DELAYED_WORK_ONSTACK/ Cc: Peter Zijlstra Acked-by: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/hpet.c | 2 +- arch/x86/kernel/smpboot.c | 2 +- drivers/md/dm-snap-persistent.c | 2 +- include/linux/workqueue.h | 6 +++--- kernel/workqueue.c | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index aff0b3c2750..ae03cab4352 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -713,7 +713,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, switch (action & 0xf) { case CPU_ONLINE: - INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); + INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work); init_completion(&work.complete); /* FIXME: add schedule_work_on() */ schedule_delayed_work_on(cpu, &work.work, 0); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6af118511b4..6c7faecd9e4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -747,7 +747,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), }; - INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle); + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); alternatives_smp_switch(1); diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 0b61792a278..2129cdb115d 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, * Issue the synchronous I/O from a different thread * to avoid generic_make_request recursion. */ - INIT_WORK_ON_STACK(&req.work, do_metadata); + INIT_WORK_ONSTACK(&req.work, do_metadata); queue_work(ps->metadata_wq, &req.work); flush_workqueue(ps->metadata_wq); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 070bb7a8893..0c0771f06bf 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -190,7 +190,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } __INIT_WORK((_work), (_func), 0); \ } while (0) -#define INIT_WORK_ON_STACK(_work, _func) \ +#define INIT_WORK_ONSTACK(_work, _func) \ do { \ __INIT_WORK((_work), (_func), 1); \ } while (0) @@ -201,9 +201,9 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } init_timer(&(_work)->timer); \ } while (0) -#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ +#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ do { \ - INIT_WORK_ON_STACK(&(_work)->work, (_func)); \ + INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ init_timer_on_stack(&(_work)->timer); \ } while (0) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e5ff2cbaadc..90db1bd1a97 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2064,7 +2064,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, * checks and call back into the fixup functions where we * might deadlock. */ - INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); + INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); init_completion(&barr->done); -- cgit v1.2.3-70-g09d2 From 571428be550fbe37160596995e96ad398873fcbd Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 26 Oct 2010 14:22:43 -0700 Subject: kernel/user.c: add lock release annotation on free_user() free_user() releases uidhash_lock but was missing annotation. Add it. This removes following sparse warnings: include/linux/spinlock.h:339:9: warning: context imbalance in 'free_user' - unexpected unlock kernel/user.c:120:6: warning: context imbalance in 'free_uid' - wrong count at exit Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Dhaval Giani Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/user.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/user.c b/kernel/user.c index 7e72614b736..2c7d8d5914b 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -91,6 +91,7 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) * upon function exit. */ static void free_user(struct user_struct *up, unsigned long flags) + __releases(&uidhash_lock) { uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); -- cgit v1.2.3-70-g09d2 From 518de9b39e854542de59bfb8b9f61c8f7ecf808b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 26 Oct 2010 14:22:44 -0700 Subject: fs: allow for more than 2^31 files Robin Holt tried to boot a 16TB system and found af_unix was overflowing a 32bit value : We were seeing a failure which prevented boot. The kernel was incapable of creating either a named pipe or unix domain socket. This comes down to a common kernel function called unix_create1() which does: atomic_inc(&unix_nr_socks); if (atomic_read(&unix_nr_socks) > 2 * get_max_files()) goto out; The function get_max_files() is a simple return of files_stat.max_files. files_stat.max_files is a signed integer and is computed in fs/file_table.c's files_init(). n = (mempages * (PAGE_SIZE / 1024)) / 10; files_stat.max_files = n; In our case, mempages (total_ram_pages) is approx 3,758,096,384 (0xe0000000). That leaves max_files at approximately 1,503,238,553. This causes 2 * get_max_files() to integer overflow. Fix is to let /proc/sys/fs/file-nr & /proc/sys/fs/file-max use long integers, and change af_unix to use an atomic_long_t instead of atomic_t. get_max_files() is changed to return an unsigned long. get_nr_files() is changed to return a long. unix_nr_socks is changed from atomic_t to atomic_long_t, while not strictly needed to address Robin problem. Before patch (on a 64bit kernel) : # echo 2147483648 >/proc/sys/fs/file-max # cat /proc/sys/fs/file-max -18446744071562067968 After patch: # echo 2147483648 >/proc/sys/fs/file-max # cat /proc/sys/fs/file-max 2147483648 # cat /proc/sys/fs/file-nr 704 0 2147483648 Reported-by: Robin Holt Signed-off-by: Eric Dumazet Acked-by: David Miller Reviewed-by: Robin Holt Tested-by: Robin Holt Cc: Al Viro Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/file_table.c | 17 +++++++---------- include/linux/fs.h | 8 ++++---- kernel/sysctl.c | 6 +++--- net/unix/af_unix.c | 14 +++++++------- 4 files changed, 21 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/fs/file_table.c b/fs/file_table.c index a04bdd81c11..c3dee381f1b 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -60,7 +60,7 @@ static inline void file_free(struct file *f) /* * Return the total number of open files in the system */ -static int get_nr_files(void) +static long get_nr_files(void) { return percpu_counter_read_positive(&nr_files); } @@ -68,7 +68,7 @@ static int get_nr_files(void) /* * Return the maximum number of open files in the system */ -int get_max_files(void) +unsigned long get_max_files(void) { return files_stat.max_files; } @@ -82,7 +82,7 @@ int proc_nr_files(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { files_stat.nr_files = get_nr_files(); - return proc_dointvec(table, write, buffer, lenp, ppos); + return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } #else int proc_nr_files(ctl_table *table, int write, @@ -105,7 +105,7 @@ int proc_nr_files(ctl_table *table, int write, struct file *get_empty_filp(void) { const struct cred *cred = current_cred(); - static int old_max; + static long old_max; struct file * f; /* @@ -140,8 +140,7 @@ struct file *get_empty_filp(void) over: /* Ran out of filps - report that */ if (get_nr_files() > old_max) { - printk(KERN_INFO "VFS: file-max limit %d reached\n", - get_max_files()); + pr_info("VFS: file-max limit %lu reached\n", get_max_files()); old_max = get_nr_files(); } goto fail; @@ -487,7 +486,7 @@ retry: void __init files_init(unsigned long mempages) { - int n; + unsigned long n; filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); @@ -498,9 +497,7 @@ void __init files_init(unsigned long mempages) */ n = (mempages * (PAGE_SIZE / 1024)) / 10; - files_stat.max_files = n; - if (files_stat.max_files < NR_FILE) - files_stat.max_files = NR_FILE; + files_stat.max_files = max_t(unsigned long, n, NR_FILE); files_defer_init(); lg_lock_init(files_lglock); percpu_counter_init(&nr_files, 0); diff --git a/include/linux/fs.h b/include/linux/fs.h index 4f34ff6e555..b2cdb6bc828 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -34,9 +34,9 @@ /* And dynamically-tunable limits and defaults: */ struct files_stat_struct { - int nr_files; /* read only */ - int nr_free_files; /* read only */ - int max_files; /* tunable */ + unsigned long nr_files; /* read only */ + unsigned long nr_free_files; /* read only */ + unsigned long max_files; /* tunable */ }; struct inodes_stat_t { @@ -400,7 +400,7 @@ extern void __init inode_init_early(void); extern void __init files_init(unsigned long); extern struct files_stat_struct files_stat; -extern int get_max_files(void); +extern unsigned long get_max_files(void); extern int sysctl_nr_open; extern struct inodes_stat_t inodes_stat; extern int leases_enable, lease_break_time; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3a45c224770..694b140852c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1352,16 +1352,16 @@ static struct ctl_table fs_table[] = { { .procname = "file-nr", .data = &files_stat, - .maxlen = 3*sizeof(int), + .maxlen = sizeof(files_stat), .mode = 0444, .proc_handler = proc_nr_files, }, { .procname = "file-max", .data = &files_stat.max_files, - .maxlen = sizeof(int), + .maxlen = sizeof(files_stat.max_files), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_doulongvec_minmax, }, { .procname = "nr_open", diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 0ebc777a666..3c95304a081 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -117,7 +117,7 @@ static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; static DEFINE_SPINLOCK(unix_table_lock); -static atomic_t unix_nr_socks = ATOMIC_INIT(0); +static atomic_long_t unix_nr_socks; #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE]) @@ -360,13 +360,13 @@ static void unix_sock_destructor(struct sock *sk) if (u->addr) unix_release_addr(u->addr); - atomic_dec(&unix_nr_socks); + atomic_long_dec(&unix_nr_socks); local_bh_disable(); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); local_bh_enable(); #ifdef UNIX_REFCNT_DEBUG - printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, - atomic_read(&unix_nr_socks)); + printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk, + atomic_long_read(&unix_nr_socks)); #endif } @@ -606,8 +606,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock) struct sock *sk = NULL; struct unix_sock *u; - atomic_inc(&unix_nr_socks); - if (atomic_read(&unix_nr_socks) > 2 * get_max_files()) + atomic_long_inc(&unix_nr_socks); + if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) goto out; sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto); @@ -632,7 +632,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock) unix_insert_socket(unix_sockets_unbound, sk); out: if (sk == NULL) - atomic_dec(&unix_nr_socks); + atomic_long_dec(&unix_nr_socks); else { local_bh_disable(); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); -- cgit v1.2.3-70-g09d2 From ca51c5a76345b28c6f1b742f9f5f0a6fc9afd9ca Mon Sep 17 00:00:00 2001 From: Rakib Mullick Date: Tue, 26 Oct 2010 14:22:44 -0700 Subject: kernel/stop_machine.c: fix unused variable warning kernel/stop_machine.c: In function `cpu_stopper_thread': kernel/stop_machine.c:265: warning: unused variable `ksym_buf' ksym_buf[] is unused if WARN_ON() is a no-op. Signed-off-by: Rakib Mullick Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/stop_machine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 090c28812ce..3cb4a9a8ae1 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -262,7 +262,7 @@ repeat: cpu_stop_fn_t fn = work->fn; void *arg = work->arg; struct cpu_stop_done *done = work->done; - char ksym_buf[KSYM_NAME_LEN]; + char ksym_buf[KSYM_NAME_LEN] __maybe_unused; __set_current_state(TASK_RUNNING); -- cgit v1.2.3-70-g09d2 From 4ce6494dbd8909718840bb88d5a699ef6ce5c212 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Tue, 26 Oct 2010 14:22:45 -0700 Subject: stop_machine: convert cpu notifier to return encapsulate errno value In commit e6bde73b07edeb703d4c89c1daabc09c303de11f ("cpu-hotplug: return better errno on cpu hotplug failure"), the cpu notifier can return an encapsulated errno value. This converts the cpu notifier to return an encapsulated errno value for stop_machine(). Signed-off-by: Akinobu Mita Cc: Rusty Russell Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/stop_machine.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 3cb4a9a8ae1..2df820b03be 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -304,7 +304,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, p = kthread_create(cpu_stopper_thread, stopper, "migration/%d", cpu); if (IS_ERR(p)) - return NOTIFY_BAD; + return notifier_from_errno(PTR_ERR(p)); get_task_struct(p); kthread_bind(p, cpu); sched_set_stop_task(cpu, p); @@ -372,7 +372,7 @@ static int __init cpu_stop_init(void) /* start one for the boot cpu */ err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE, bcpu); - BUG_ON(err == NOTIFY_BAD); + BUG_ON(err != NOTIFY_OK); cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu); register_cpu_notifier(&cpu_stop_cpu_notifier); -- cgit v1.2.3-70-g09d2 From 6c095efd82e8f6a98515426a733110f91cf0a709 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 26 Oct 2010 14:22:47 -0700 Subject: printk: fixup declaration of kmsg_reasons Move redundant 'const' after '*' to make pointer itself const Signed-off-by: Namhyung Kim Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/printk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index 2531017795f..d18933a9281 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1511,7 +1511,7 @@ int kmsg_dump_unregister(struct kmsg_dumper *dumper) } EXPORT_SYMBOL_GPL(kmsg_dump_unregister); -static const char const *kmsg_reasons[] = { +static const char * const kmsg_reasons[] = { [KMSG_DUMP_OOPS] = "oops", [KMSG_DUMP_PANIC] = "panic", [KMSG_DUMP_KEXEC] = "kexec", -- cgit v1.2.3-70-g09d2 From 8155c02a44a95562e1ae0999360eb31288d7195a Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 26 Oct 2010 14:22:47 -0700 Subject: printk: add lock context annotation acquire_console_semaphore_for_printk() releases logbuf_lock but was missing proper annotation. Add it. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/printk.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index d18933a9281..6ff26151f4f 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -647,6 +647,7 @@ static inline int can_use_console(unsigned int cpu) * released but interrupts still disabled. */ static int acquire_console_semaphore_for_printk(unsigned int cpu) + __releases(&logbuf_lock) { int retval = 0; -- cgit v1.2.3-70-g09d2 From 674dff6507d3f9b110219ea125cf5e1213c9acef Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 26 Oct 2010 14:22:48 -0700 Subject: printk: change type of 'boot_delay' to int * get_option() takes its 2nd arg as int * so passing boot_delay to it caused following warnings from sparse: kernel/printk.c:223:27: warning: incorrect type in argument 2 (different signedness) kernel/printk.c:223:27: expected int *pint kernel/printk.c:223:27: got unsigned int static [toplevel] * Since boot_delay can't grow more than 10,000 changing it to 'int *' will not produce any problem. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/printk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index 6ff26151f4f..b2ebaee8c37 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -210,7 +210,7 @@ __setup("log_buf_len=", log_buf_len_setup); #ifdef CONFIG_BOOT_PRINTK_DELAY -static unsigned int boot_delay; /* msecs delay after each printk during bootup */ +static int boot_delay; /* msecs delay after each printk during bootup */ static unsigned long long loops_per_msec; /* based on boot_delay */ static int __init boot_delay_setup(char *str) -- cgit v1.2.3-70-g09d2 From f5d87d851d76a390d0fab2f77bd1d563d69ee586 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 26 Oct 2010 14:22:49 -0700 Subject: printk: declare printk_ratelimit_state in ratelimit.h Adding declaration of printk_ratelimit_state in ratelimit.h removes potential build breakage and following sparse warning: kernel/printk.c:1426:1: warning: symbol 'printk_ratelimit_state' was not declared. Should it be static? [akpm@linux-foundation.org: remove unneeded ifdef] Signed-off-by: Namhyung Kim Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ratelimit.h | 2 ++ kernel/sysctl.c | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 8f69d09a41a..03ff67b0cdf 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h @@ -36,6 +36,8 @@ static inline void ratelimit_state_init(struct ratelimit_state *rs, rs->begin = 0; } +extern struct ratelimit_state printk_ratelimit_state; + extern int ___ratelimit(struct ratelimit_state *rs, const char *func); #define __ratelimit(state) ___ratelimit(state, __func__) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 694b140852c..48d9d689498 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -161,8 +161,6 @@ extern int no_unaligned_warning; extern int unaligned_dump_stack; #endif -extern struct ratelimit_state printk_ratelimit_state; - #ifdef CONFIG_PROC_SYSCTL static int proc_do_cad_pid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -- cgit v1.2.3-70-g09d2 From ee2f154a598e96df2ebb01648a7699373bc085c7 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 26 Oct 2010 14:17:25 -0700 Subject: docbook: add more wait/wake/completion to device-drivers docbook Add more wait, wake, and completion interfaces to the device-drivers docbook. Fix kernel-doc notation in the added files. Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds --- Documentation/DocBook/device-drivers.tmpl | 5 +++++ include/linux/completion.h | 10 +++++----- kernel/wait.c | 6 +++--- 3 files changed, 13 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index feca0758391..22edcbb9dda 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl @@ -51,7 +51,12 @@ Delaying, scheduling, and timer routines !Iinclude/linux/sched.h !Ekernel/sched.c +!Iinclude/linux/completion.h !Ekernel/timer.c + + Wait queues and Wake events +!Iinclude/linux/wait.h +!Ekernel/wait.c High-resolution timers !Iinclude/linux/ktime.h diff --git a/include/linux/completion.h b/include/linux/completion.h index 51e3145196f..36d57f74cd0 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -10,7 +10,7 @@ #include -/** +/* * struct completion - structure used to maintain state for a "completion" * * This is the opaque structure used to maintain the state for a "completion". @@ -34,7 +34,7 @@ struct completion { ({ init_completion(&work); work; }) /** - * DECLARE_COMPLETION: - declare and initialize a completion structure + * DECLARE_COMPLETION - declare and initialize a completion structure * @work: identifier for the completion structure * * This macro declares and initializes a completion structure. Generally used @@ -50,7 +50,7 @@ struct completion { * are on the kernel stack: */ /** - * DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure + * DECLARE_COMPLETION_ONSTACK - declare and initialize a completion structure * @work: identifier for the completion structure * * This macro declares and initializes a completion structure on the kernel @@ -64,7 +64,7 @@ struct completion { #endif /** - * init_completion: - Initialize a dynamically allocated completion + * init_completion - Initialize a dynamically allocated completion * @x: completion structure that is to be initialized * * This inline function will initialize a dynamically created completion @@ -92,7 +92,7 @@ extern void complete(struct completion *); extern void complete_all(struct completion *); /** - * INIT_COMPLETION: - reinitialize a completion structure + * INIT_COMPLETION - reinitialize a completion structure * @x: completion structure to be reinitialized * * This macro should be used to reinitialize a completion structure so it can diff --git a/kernel/wait.c b/kernel/wait.c index c4bd3d825f3..b0310eb6cc1 100644 --- a/kernel/wait.c +++ b/kernel/wait.c @@ -92,7 +92,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) } EXPORT_SYMBOL(prepare_to_wait_exclusive); -/* +/** * finish_wait - clean up after waiting in a queue * @q: waitqueue waited on * @wait: wait descriptor @@ -127,11 +127,11 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) } EXPORT_SYMBOL(finish_wait); -/* +/** * abort_exclusive_wait - abort exclusive waiting in a queue * @q: waitqueue waited on * @wait: wait descriptor - * @state: runstate of the waiter to be woken + * @mode: runstate of the waiter to be woken * @key: key to identify a wait bit queue or %NULL * * Sets current thread back to running state and removes -- cgit v1.2.3-70-g09d2 From abbce906d05ec37289cd0c3b4e35b2db26eab19b Mon Sep 17 00:00:00 2001 From: MichaÅ‚ MirosÅ‚aw Date: Mon, 20 Sep 2010 01:58:08 +0200 Subject: (trivial) Fix compiler warning in kernel/modules.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Building with CONFIG_KALLSYMS=n gives following warning: /mnt/src/linux-git/kernel/module.c: In function ‘post_relocation’: /mnt/src/linux-git/kernel/module.c:2534:2: warning: passing argument 2 of ‘add_kallsyms’ discards qualifiers from pointer target type /mnt/src/linux-git/kernel/module.c:2038:13: note: expected ‘struct load_info *’ but argument is of type ‘const struct load_info *’ Signed-off-by: MichaÅ‚ MirosÅ‚aw Signed-off-by: Rusty Russell --- kernel/module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 2df46301a7a..437a74a7524 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2037,7 +2037,7 @@ static inline void layout_symtab(struct module *mod, struct load_info *info) { } -static void add_kallsyms(struct module *mod, struct load_info *info) +static void add_kallsyms(struct module *mod, const struct load_info *info) { } #endif /* CONFIG_KALLSYMS */ -- cgit v1.2.3-70-g09d2 From 3a5f65df5a0fcbaa35e5417c0420d691fee4ac56 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 27 Oct 2010 17:28:36 +0100 Subject: Typedef SMP call function pointer Typedef the pointer to the function to be called by smp_call_function() and friends: typedef void (*smp_call_func_t)(void *info); as it is used in a fair number of places. Signed-off-by: David Howells cc: linux-arch@vger.kernel.org --- include/linux/smp.h | 19 ++++++++++--------- kernel/smp.c | 8 ++++---- 2 files changed, 14 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/include/linux/smp.h b/include/linux/smp.h index cfa2d20e35f..6dc95cac6b3 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -13,9 +13,10 @@ extern void cpu_idle(void); +typedef void (*smp_call_func_t)(void *info); struct call_single_data { struct list_head list; - void (*func) (void *info); + smp_call_func_t func; void *info; u16 flags; u16 priv; @@ -24,8 +25,8 @@ struct call_single_data { /* total number of cpus in this system (may exceed NR_CPUS) */ extern unsigned int total_cpus; -int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, - int wait); +int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, + int wait); #ifdef CONFIG_SMP @@ -69,15 +70,15 @@ extern void smp_cpus_done(unsigned int max_cpus); /* * Call a function on all other processors */ -int smp_call_function(void(*func)(void *info), void *info, int wait); +int smp_call_function(smp_call_func_t func, void *info, int wait); void smp_call_function_many(const struct cpumask *mask, - void (*func)(void *info), void *info, bool wait); + smp_call_func_t func, void *info, bool wait); void __smp_call_function_single(int cpuid, struct call_single_data *data, int wait); int smp_call_function_any(const struct cpumask *mask, - void (*func)(void *info), void *info, int wait); + smp_call_func_t func, void *info, int wait); /* * Generic and arch helpers @@ -94,7 +95,7 @@ void ipi_call_unlock_irq(void); /* * Call a function on all processors */ -int on_each_cpu(void (*func) (void *info), void *info, int wait); +int on_each_cpu(smp_call_func_t func, void *info, int wait); #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ #define MSG_ALL 0x8001 @@ -122,7 +123,7 @@ static inline void smp_send_stop(void) { } * These macros fold the SMP functionality into a single CPU system */ #define raw_smp_processor_id() 0 -static inline int up_smp_call_function(void (*func)(void *), void *info) +static inline int up_smp_call_function(smp_call_func_t func, void *info) { return 0; } @@ -143,7 +144,7 @@ static inline void smp_send_reschedule(int cpu) { } static inline void init_call_single_data(void) { } static inline int -smp_call_function_any(const struct cpumask *mask, void (*func)(void *info), +smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait) { return smp_call_function_single(0, func, info, wait); diff --git a/kernel/smp.c b/kernel/smp.c index ed6aacfcb7e..12ed8b013e2 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -267,7 +267,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); * * Returns 0 on success, else a negative status code. */ -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, +int smp_call_function_single(int cpu, smp_call_func_t func, void *info, int wait) { struct call_single_data d = { @@ -336,7 +336,7 @@ EXPORT_SYMBOL(smp_call_function_single); * 3) any other online cpu in @mask */ int smp_call_function_any(const struct cpumask *mask, - void (*func)(void *info), void *info, int wait) + smp_call_func_t func, void *info, int wait) { unsigned int cpu; const struct cpumask *nodemask; @@ -416,7 +416,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, * must be disabled when calling this function. */ void smp_call_function_many(const struct cpumask *mask, - void (*func)(void *), void *info, bool wait) + smp_call_func_t func, void *info, bool wait) { struct call_function_data *data; unsigned long flags; @@ -500,7 +500,7 @@ EXPORT_SYMBOL(smp_call_function_many); * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ -int smp_call_function(void (*func)(void *), void *info, int wait) +int smp_call_function(smp_call_func_t func, void *info, int wait) { preempt_disable(); smp_call_function_many(cpu_online_mask, func, info, wait); -- cgit v1.2.3-70-g09d2 From d5de4ddb1bc430289bede76c0d87cabee93f749a Mon Sep 17 00:00:00 2001 From: Tomasz Buchert Date: Wed, 27 Oct 2010 15:33:32 -0700 Subject: cgroup_freezer: unnecessary test in cgroup_freezing_or_frozen() The root freezer_state is always CGROUP_THAWED so we can remove the special case from the code. The test itself can be handy and is extracted to static function. Signed-off-by: Tomasz Buchert Cc: Matt Helsley Cc: Paul Menage Cc: Li Zefan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cgroup_freezer.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index ce71ed53e88..321e2a007d2 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -48,20 +48,19 @@ static inline struct freezer *task_freezer(struct task_struct *task) struct freezer, css); } -int cgroup_freezing_or_frozen(struct task_struct *task) +static inline int __cgroup_freezing_or_frozen(struct task_struct *task) { - struct freezer *freezer; - enum freezer_state state; + enum freezer_state state = task_freezer(task)->state; + return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN); +} +int cgroup_freezing_or_frozen(struct task_struct *task) +{ + int result; task_lock(task); - freezer = task_freezer(task); - if (!freezer->css.cgroup->parent) - state = CGROUP_THAWED; /* root cgroup can't be frozen */ - else - state = freezer->state; + result = __cgroup_freezing_or_frozen(task); task_unlock(task); - - return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN); + return result; } /* -- cgit v1.2.3-70-g09d2 From 0bdba580ab052a21e3eda2764ed22d9ee962392b Mon Sep 17 00:00:00 2001 From: Tomasz Buchert Date: Wed, 27 Oct 2010 15:33:33 -0700 Subject: cgroup_freezer: fix can_attach() to prohibit moving from/to freezing/frozen cgroups It is possible to move a task from its cgroup even if this group is 'FREEZING'. This results in a nasty bug - the moved task will become frozen OUTSIDE its original cgroup and will remain in a permanent 'D' state. This patch allows to migrate the task only between THAWED cgroups. This behavior was observed and easily reproduced on a single core laptop. Notice that reproducibility depends highly on the machine used. Program and instructions how to reproduce the bug can be fetched from: http://pentium.hopto.org/~thinred/repos/linux-misc/freezer_bug.c Signed-off-by: Tomasz Buchert Cc: Matt Helsley Cc: Paul Menage Cc: Li Zefan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cgroup_freezer.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 321e2a007d2..c287627bed3 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -173,24 +173,25 @@ static int freezer_can_attach(struct cgroup_subsys *ss, /* * Anything frozen can't move or be moved to/from. - * - * Since orig_freezer->state == FROZEN means that @task has been - * frozen, so it's sufficient to check the latter condition. */ - if (is_task_frozen_enough(task)) + freezer = cgroup_freezer(new_cgroup); + if (freezer->state != CGROUP_THAWED) return -EBUSY; - freezer = cgroup_freezer(new_cgroup); - if (freezer->state == CGROUP_FROZEN) + rcu_read_lock(); + if (__cgroup_freezing_or_frozen(task)) { + rcu_read_unlock(); return -EBUSY; + } + rcu_read_unlock(); if (threadgroup) { struct task_struct *c; rcu_read_lock(); list_for_each_entry_rcu(c, &task->thread_group, thread_group) { - if (is_task_frozen_enough(c)) { + if (__cgroup_freezing_or_frozen(c)) { rcu_read_unlock(); return -EBUSY; } -- cgit v1.2.3-70-g09d2 From 2d3cbf8bc852ac1bc3d098186143c5973f87b753 Mon Sep 17 00:00:00 2001 From: Tomasz Buchert Date: Wed, 27 Oct 2010 15:33:34 -0700 Subject: cgroup_freezer: update_freezer_state() does incorrect state transitions There are 4 state transitions possible for a freezer. Only FREEZING -> FROZEN transaction is done lazily. This patch allows update_freezer_state only to perform this transaction and renames the function to update_if_frozen. Moreover is_task_frozen_enough function is removed and its every occurence is replaced with frozen(). Therefore for a group to become FROZEN every task must be frozen. The previous version could trigger a following bug: When cgroup is in the process of freezing (but none of its tasks are frozen yet), update_freezer_state() (called from freezer_read or freezer_write) would incorrectly report that a group is 'THAWED' (because nfrozen = 0), allowing the transaction FREEZING -> THAWED without writing anything to 'freezer.state'. This is incorrect according to the documentation. This could result in a 'THAWED' cgroup with frozen tasks inside. A code to reproduce this bug is available here: http://pentium.hopto.org/~thinred/repos/linux-misc/freezer_bug2.c [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Tomasz Buchert Cc: Matt Helsley Cc: Paul Menage Cc: Li Zefan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cgroup_freezer.c | 38 +++++++++++++++----------------------- 1 file changed, 15 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index c287627bed3..e7bebb7c6c3 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -153,13 +153,6 @@ static void freezer_destroy(struct cgroup_subsys *ss, kfree(cgroup_freezer(cgroup)); } -/* Task is frozen or will freeze immediately when next it gets woken */ -static bool is_task_frozen_enough(struct task_struct *task) -{ - return frozen(task) || - (task_is_stopped_or_traced(task) && freezing(task)); -} - /* * The call to cgroup_lock() in the freezer.state write method prevents * a write to that file racing against an attach, and hence the @@ -236,31 +229,30 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) /* * caller must hold freezer->lock */ -static void update_freezer_state(struct cgroup *cgroup, +static void update_if_frozen(struct cgroup *cgroup, struct freezer *freezer) { struct cgroup_iter it; struct task_struct *task; unsigned int nfrozen = 0, ntotal = 0; + enum freezer_state old_state = freezer->state; cgroup_iter_start(cgroup, &it); while ((task = cgroup_iter_next(cgroup, &it))) { ntotal++; - if (is_task_frozen_enough(task)) + if (frozen(task)) nfrozen++; } - /* - * Transition to FROZEN when no new tasks can be added ensures - * that we never exist in the FROZEN state while there are unfrozen - * tasks. - */ - if (nfrozen == ntotal) - freezer->state = CGROUP_FROZEN; - else if (nfrozen > 0) - freezer->state = CGROUP_FREEZING; - else - freezer->state = CGROUP_THAWED; + if (old_state == CGROUP_THAWED) { + BUG_ON(nfrozen > 0); + } else if (old_state == CGROUP_FREEZING) { + if (nfrozen == ntotal) + freezer->state = CGROUP_FROZEN; + } else { /* old_state == CGROUP_FROZEN */ + BUG_ON(nfrozen != ntotal); + } + cgroup_iter_end(cgroup, &it); } @@ -279,7 +271,7 @@ static int freezer_read(struct cgroup *cgroup, struct cftype *cft, if (state == CGROUP_FREEZING) { /* We change from FREEZING to FROZEN lazily if the cgroup was * only partially frozen when we exitted write. */ - update_freezer_state(cgroup, freezer); + update_if_frozen(cgroup, freezer); state = freezer->state; } spin_unlock_irq(&freezer->lock); @@ -301,7 +293,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer) while ((task = cgroup_iter_next(cgroup, &it))) { if (!freeze_task(task, true)) continue; - if (is_task_frozen_enough(task)) + if (frozen(task)) continue; if (!freezing(task) && !freezer_should_skip(task)) num_cant_freeze_now++; @@ -335,7 +327,7 @@ static int freezer_change_state(struct cgroup *cgroup, spin_lock_irq(&freezer->lock); - update_freezer_state(cgroup, freezer); + update_if_frozen(cgroup, freezer); if (goal_state == freezer->state) goto out; -- cgit v1.2.3-70-g09d2 From 97978e6d1f2da0073416870410459694fbdbfd9b Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Wed, 27 Oct 2010 15:33:35 -0700 Subject: cgroup: add clone_children control file The ns_cgroup is a control group interacting with the namespaces. When a new namespace is created, a corresponding cgroup is automatically created too. The cgroup name is the pid of the process who did 'unshare' or the child of 'clone'. This cgroup is tied with the namespace because it prevents a process to escape the control group and use the post_clone callback, so the child cgroup inherits the values of the parent cgroup. Unfortunately, the more we use this cgroup and the more we are facing problems with it: (1) when a process unshares, the cgroup name may conflict with a previous cgroup with the same pid, so unshare or clone return -EEXIST (2) the cgroup creation is out of control because there may have an application creating several namespaces where the system will automatically create several cgroups in his back and let them on the cgroupfs (eg. a vrf based on the network namespace). (3) the mix of (1) and (2) force an administrator to regularly check and clean these cgroups. This patchset removes the ns_cgroup by adding a new flag to the cgroup and the cgroupfs mount option. It enables the copy of the parent cgroup when a child cgroup is created. We can then safely remove the ns_cgroup as this flag brings a compatibility. We have now to manually create and add the task to a cgroup, which is consistent with the cgroup framework. This patch: Sent as an answer to a previous thread around the ns_cgroup. https://lists.linux-foundation.org/pipermail/containers/2009-June/018627.html It adds a control file 'clone_children' for a cgroup. This control file is a boolean specifying if the child cgroup should be a clone of the parent cgroup or not. The default value is 'false'. This flag makes the child cgroup to call the post_clone callback of all the subsystem, if it is available. At present, the cpuset is the only one which had implemented the post_clone callback. The option can be set at mount time by specifying the 'clone_children' mount option. Signed-off-by: Daniel Lezcano Signed-off-by: Serge E. Hallyn Cc: Eric W. Biederman Acked-by: Paul Menage Reviewed-by: Li Zefan Cc: Jamal Hadi Salim Cc: Matt Helsley Acked-by: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/cgroups/cgroups.txt | 14 ++++++++++++-- include/linux/cgroup.h | 4 ++++ kernel/cgroup.c | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt index b34823ff164..190018b0c64 100644 --- a/Documentation/cgroups/cgroups.txt +++ b/Documentation/cgroups/cgroups.txt @@ -18,7 +18,8 @@ CONTENTS: 1.2 Why are cgroups needed ? 1.3 How are cgroups implemented ? 1.4 What does notify_on_release do ? - 1.5 How do I use cgroups ? + 1.5 What does clone_children do ? + 1.6 How do I use cgroups ? 2. Usage Examples and Syntax 2.1 Basic Usage 2.2 Attaching processes @@ -293,7 +294,16 @@ notify_on_release in the root cgroup at system boot is disabled value of their parents notify_on_release setting. The default value of a cgroup hierarchy's release_agent path is empty. -1.5 How do I use cgroups ? +1.5 What does clone_children do ? +--------------------------------- + +If the clone_children flag is enabled (1) in a cgroup, then all +cgroups created beneath will call the post_clone callbacks for each +subsystem of the newly created cgroup. Usually when this callback is +implemented for a subsystem, it copies the values of the parent +subsystem, this is the case for the cpuset. + +1.6 How do I use cgroups ? -------------------------- To start a new job that is to be contained within a cgroup, using diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 709dfb901d1..ed4ba111bc8 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -154,6 +154,10 @@ enum { * A thread in rmdir() is wating for this cgroup. */ CGRP_WAIT_ON_RMDIR, + /* + * Clone cgroup values when creating a new child cgroup + */ + CGRP_CLONE_CHILDREN, }; /* which pidlist file are we talking about? */ diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 9270d532ec3..4b218a46ddd 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -243,6 +243,11 @@ static int notify_on_release(const struct cgroup *cgrp) return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); } +static int clone_children(const struct cgroup *cgrp) +{ + return test_bit(CGRP_CLONE_CHILDREN, &cgrp->flags); +} + /* * for_each_subsys() allows you to iterate on each subsystem attached to * an active hierarchy @@ -1040,6 +1045,8 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs) seq_puts(seq, ",noprefix"); if (strlen(root->release_agent_path)) seq_printf(seq, ",release_agent=%s", root->release_agent_path); + if (clone_children(&root->top_cgroup)) + seq_puts(seq, ",clone_children"); if (strlen(root->name)) seq_printf(seq, ",name=%s", root->name); mutex_unlock(&cgroup_mutex); @@ -1050,6 +1057,7 @@ struct cgroup_sb_opts { unsigned long subsys_bits; unsigned long flags; char *release_agent; + bool clone_children; char *name; /* User explicitly requested empty subsystem */ bool none; @@ -1097,6 +1105,8 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) opts->none = true; } else if (!strcmp(token, "noprefix")) { set_bit(ROOT_NOPREFIX, &opts->flags); + } else if (!strcmp(token, "clone_children")) { + opts->clone_children = true; } else if (!strncmp(token, "release_agent=", 14)) { /* Specifying two release agents is forbidden */ if (opts->release_agent) @@ -1355,6 +1365,8 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) strcpy(root->release_agent_path, opts->release_agent); if (opts->name) strcpy(root->name, opts->name); + if (opts->clone_children) + set_bit(CGRP_CLONE_CHILDREN, &root->top_cgroup.flags); return root; } @@ -3173,6 +3185,23 @@ fail: return ret; } +static u64 cgroup_clone_children_read(struct cgroup *cgrp, + struct cftype *cft) +{ + return clone_children(cgrp); +} + +static int cgroup_clone_children_write(struct cgroup *cgrp, + struct cftype *cft, + u64 val) +{ + if (val) + set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags); + else + clear_bit(CGRP_CLONE_CHILDREN, &cgrp->flags); + return 0; +} + /* * for the common functions, 'private' gives the type of file */ @@ -3203,6 +3232,11 @@ static struct cftype files[] = { .write_string = cgroup_write_event_control, .mode = S_IWUGO, }, + { + .name = "cgroup.clone_children", + .read_u64 = cgroup_clone_children_read, + .write_u64 = cgroup_clone_children_write, + }, }; static struct cftype cft_release_agent = { @@ -3332,6 +3366,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, if (notify_on_release(parent)) set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); + if (clone_children(parent)) + set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags); + for_each_subsys(root, ss) { struct cgroup_subsys_state *css = ss->create(ss, cgrp); @@ -3346,6 +3383,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, goto err_destroy; } /* At error, ->destroy() callback has to free assigned ID. */ + if (clone_children(parent) && ss->post_clone) + ss->post_clone(ss, cgrp); } cgroup_lock_hierarchy(root); -- cgit v1.2.3-70-g09d2 From 32a8cf235e2f192eb002755076994525cdbaa35a Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Wed, 27 Oct 2010 15:33:37 -0700 Subject: cgroup: make the mount options parsing more accurate Current behavior: ================= (1) When we mount a cgroup, we can specify the 'all' option which means to enable all the cgroup subsystems. This is the default option when no option is specified. (2) If we want to mount a cgroup with a subset of the supported cgroup subsystems, we have to specify a subsystems name list for the mount option. (3) If we specify another option like 'noprefix' or 'release_agent', the actual code wants the 'all' or a subsystem name option specified also. Not critical but a bit not friendly as we should assume (1) in this case. (4) Logically, the 'all' option is mutually exclusive with a subsystem name, but this is not detected. In other words: succeed : mount -t cgroup -o all,freezer cgroup /cgroup => is it 'all' or 'freezer' ? fails : mount -t cgroup -o noprefix cgroup /cgroup => succeed if we do '-o noprefix,all' The following patches consolidate a bit the mount options check. New behavior: ============= (1) untouched (2) untouched (3) the 'all' option will be by default when specifying other than a subsystem name option (4) raises an error In other words: fails : mount -t cgroup -o all,freezer cgroup /cgroup succeed : mount -t cgroup -o noprefix cgroup /cgroup For the sake of lisibility, the if ... then ... else ... if ... indentation when parsing the options has been changed to: if ... then ... continue fi Signed-off-by: Daniel Lezcano Signed-off-by: Serge E. Hallyn Reviewed-by: Li Zefan Reviewed-by: Paul Menage Cc: Eric W. Biederman Cc: Jamal Hadi Salim Cc: Matt Helsley Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cgroup.c | 90 ++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 60 insertions(+), 30 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 4b218a46ddd..3e6517e51fd 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1074,7 +1074,8 @@ struct cgroup_sb_opts { */ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) { - char *token, *o = data ?: "all"; + char *token, *o = data; + bool all_ss = false, one_ss = false; unsigned long mask = (unsigned long)-1; int i; bool module_pin_failed = false; @@ -1090,24 +1091,27 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) while ((token = strsep(&o, ",")) != NULL) { if (!*token) return -EINVAL; - if (!strcmp(token, "all")) { - /* Add all non-disabled subsystems */ - opts->subsys_bits = 0; - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = subsys[i]; - if (ss == NULL) - continue; - if (!ss->disabled) - opts->subsys_bits |= 1ul << i; - } - } else if (!strcmp(token, "none")) { + if (!strcmp(token, "none")) { /* Explicitly have no subsystems */ opts->none = true; - } else if (!strcmp(token, "noprefix")) { + continue; + } + if (!strcmp(token, "all")) { + /* Mutually exclusive option 'all' + subsystem name */ + if (one_ss) + return -EINVAL; + all_ss = true; + continue; + } + if (!strcmp(token, "noprefix")) { set_bit(ROOT_NOPREFIX, &opts->flags); - } else if (!strcmp(token, "clone_children")) { + continue; + } + if (!strcmp(token, "clone_children")) { opts->clone_children = true; - } else if (!strncmp(token, "release_agent=", 14)) { + continue; + } + if (!strncmp(token, "release_agent=", 14)) { /* Specifying two release agents is forbidden */ if (opts->release_agent) return -EINVAL; @@ -1115,7 +1119,9 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL); if (!opts->release_agent) return -ENOMEM; - } else if (!strncmp(token, "name=", 5)) { + continue; + } + if (!strncmp(token, "name=", 5)) { const char *name = token + 5; /* Can't specify an empty name */ if (!strlen(name)) @@ -1137,20 +1143,44 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) GFP_KERNEL); if (!opts->name) return -ENOMEM; - } else { - struct cgroup_subsys *ss; - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - ss = subsys[i]; - if (ss == NULL) - continue; - if (!strcmp(token, ss->name)) { - if (!ss->disabled) - set_bit(i, &opts->subsys_bits); - break; - } - } - if (i == CGROUP_SUBSYS_COUNT) - return -ENOENT; + + continue; + } + + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup_subsys *ss = subsys[i]; + if (ss == NULL) + continue; + if (strcmp(token, ss->name)) + continue; + if (ss->disabled) + continue; + + /* Mutually exclusive option 'all' + subsystem name */ + if (all_ss) + return -EINVAL; + set_bit(i, &opts->subsys_bits); + one_ss = true; + + break; + } + if (i == CGROUP_SUBSYS_COUNT) + return -ENOENT; + } + + /* + * If the 'all' option was specified select all the subsystems, + * otherwise 'all, 'none' and a subsystem name options were not + * specified, let's default to 'all' + */ + if (all_ss || (!all_ss && !one_ss && !opts->none)) { + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup_subsys *ss = subsys[i]; + if (ss == NULL) + continue; + if (ss->disabled) + continue; + set_bit(i, &opts->subsys_bits); } } -- cgit v1.2.3-70-g09d2 From f4a2589feaef0a9b737a3e582b37ee96695bb25f Mon Sep 17 00:00:00 2001 From: Evgeny Kuznetsov Date: Wed, 27 Oct 2010 15:33:37 -0700 Subject: cgroups: add check for strcpy destination string overflow Function "strcpy" is used without check for maximum allowed source string length and could cause destination string overflow. Check for string length is added before using "strcpy". Function now is return error if source string length is more than a maximum. akpm: presently considered NotABug, but add the check for general future-safeness and robustness. Signed-off-by: Evgeny Kuznetsov Acked-by: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cgroup.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3e6517e51fd..5cf366965d0 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1922,6 +1922,8 @@ static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft, const char *buffer) { BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); + if (strlen(buffer) >= PATH_MAX) + return -EINVAL; if (!cgroup_lock_live_group(cgrp)) return -ENODEV; strcpy(cgrp->root->release_agent_path, buffer); -- cgit v1.2.3-70-g09d2 From 45531757b45cae0ce64c5aff08c2534d5a0fa3e7 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Wed, 27 Oct 2010 15:33:38 -0700 Subject: cgroup: notify ns_cgroup deprecated The ns_cgroup will be removed very soon. Let's warn, for this version, ns_cgroup is deprecated. Make ns_cgroup and clone_children exclusive. If the clone_children is set and the ns_cgroup is mounted, let's fail with EINVAL when the ns_cgroup subsys is created (a printk will help the user to understand why the creation fails). Update the feature remove schedule file with the deprecated ns_cgroup. Signed-off-by: Daniel Lezcano Acked-by: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/feature-removal-schedule.txt | 17 +++++++++++++++++ kernel/ns_cgroup.c | 8 ++++++++ 2 files changed, 25 insertions(+) (limited to 'kernel') diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index d2af87ba96e..f3da8c0a3af 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -526,6 +526,23 @@ Who: FUJITA Tomonori ---------------------------- +What: namespace cgroup (ns_cgroup) +When: 2.6.38 +Why: The ns_cgroup leads to some problems: + * cgroup creation is out-of-control + * cgroup name can conflict when pids are looping + * it is not possible to have a single process handling + a lot of namespaces without falling in a exponential creation time + * we may want to create a namespace without creating a cgroup + + The ns_cgroup is replaced by a compatibility flag 'clone_children', + where a newly created cgroup will copy the parent cgroup values. + The userspace has to manually create a cgroup and add a task to + the 'tasks' file. +Who: Daniel Lezcano + +---------------------------- + What: iwlwifi disable_hw_scan module parameters When: 2.6.40 Why: Hareware scan is the prefer method for iwlwifi devices for diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c index 2a5dfec8efe..2c98ad94ba0 100644 --- a/kernel/ns_cgroup.c +++ b/kernel/ns_cgroup.c @@ -85,6 +85,14 @@ static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss, return ERR_PTR(-EPERM); if (!cgroup_is_descendant(cgroup, current)) return ERR_PTR(-EPERM); + if (test_bit(CGRP_CLONE_CHILDREN, &cgroup->flags)) { + printk("ns_cgroup can't be created with parent " + "'clone_children' set.\n"); + return ERR_PTR(-EINVAL); + } + + printk_once("ns_cgroup deprecated: consider using the " + "'clone_children' flag without the ns_cgroup.\n"); ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL); if (!ns_cgroup) -- cgit v1.2.3-70-g09d2 From c4b5ed250eebf854d40f27b43362c80f115cb57a Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 27 Oct 2010 15:33:44 -0700 Subject: ptrace: annotate lock context change on exit_ptrace() exit_ptrace() releases and regrabs tasklist_lock but was missing proper annotation. Add it. Signed-off-by: Namhyung Kim Acked-by: Roland McGrath Cc: Ingo Molnar Cc: Oleg Nesterov Signed-off-by: Linus Torvalds --- kernel/ptrace.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/ptrace.c b/kernel/ptrace.c index f34d798ef4a..4afd9b86cc0 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -329,6 +329,8 @@ int ptrace_detach(struct task_struct *child, unsigned int data) * and reacquire the lock. */ void exit_ptrace(struct task_struct *tracer) + __releases(&tasklist_lock) + __acquires(&tasklist_lock) { struct task_struct *p, *n; LIST_HEAD(ptrace_dead); -- cgit v1.2.3-70-g09d2 From 4abf986960ecda6a87fc2f795aacf888a2f0127e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 27 Oct 2010 15:33:45 -0700 Subject: ptrace: change signature of sys_ptrace() and friends Since userspace API of ptrace syscall defines @addr and @data as void pointers, it would be more appropriate to define them as unsigned long in kernel. Therefore related functions are changed also. 'unsigned long' is typically used in other places in kernel as an opaque data type and that using this helps cleaning up a lot of warnings from sparse. Suggested-by: Arnd Bergmann Signed-off-by: Namhyung Kim Acked-by: Arnd Bergmann Acked-by: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ptrace.h | 9 ++++++--- include/linux/syscalls.h | 3 ++- kernel/ptrace.c | 16 ++++++++++------ 3 files changed, 18 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 4272521e29e..67a4cd77c35 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -108,7 +108,8 @@ extern int ptrace_attach(struct task_struct *tsk); extern int ptrace_detach(struct task_struct *, unsigned int); extern void ptrace_disable(struct task_struct *); extern int ptrace_check_attach(struct task_struct *task, int kill); -extern int ptrace_request(struct task_struct *child, long request, long addr, long data); +extern int ptrace_request(struct task_struct *child, long request, + unsigned long addr, unsigned long data); extern void ptrace_notify(int exit_code); extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent); @@ -132,8 +133,10 @@ static inline void ptrace_unlink(struct task_struct *child) __ptrace_unlink(child); } -int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data); -int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data); +int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, + unsigned long data); +int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, + unsigned long data); /** * task_ptrace - return %PT_* flags that apply to a task diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index e6319d18a55..cacc27a0e28 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -701,7 +701,8 @@ asmlinkage long sys_nfsservctl(int cmd, asmlinkage long sys_syslog(int type, char __user *buf, int len); asmlinkage long sys_uselib(const char __user *library); asmlinkage long sys_ni_syscall(void); -asmlinkage long sys_ptrace(long request, long pid, long addr, long data); +asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, + unsigned long data); asmlinkage long sys_add_key(const char __user *_type, const char __user *_description, diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 4afd9b86cc0..06981a8b271 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -404,7 +404,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds return copied; } -static int ptrace_setoptions(struct task_struct *child, long data) +static int ptrace_setoptions(struct task_struct *child, unsigned long data) { child->ptrace &= ~PT_TRACE_MASK; @@ -483,7 +483,8 @@ static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) #define is_sysemu_singlestep(request) 0 #endif -static int ptrace_resume(struct task_struct *child, long request, long data) +static int ptrace_resume(struct task_struct *child, long request, + unsigned long data) { if (!valid_signal(data)) return -EIO; @@ -560,7 +561,7 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type, #endif int ptrace_request(struct task_struct *child, long request, - long addr, long data) + unsigned long addr, unsigned long data) { int ret = -EIO; siginfo_t siginfo; @@ -693,7 +694,8 @@ static struct task_struct *ptrace_get_task_struct(pid_t pid) #define arch_ptrace_attach(child) do { } while (0) #endif -SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data) +SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, + unsigned long, data) { struct task_struct *child; long ret; @@ -734,7 +736,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data) return ret; } -int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data) +int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, + unsigned long data) { unsigned long tmp; int copied; @@ -745,7 +748,8 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data) return put_user(tmp, (unsigned long __user *)data); } -int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data) +int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, + unsigned long data) { int copied; -- cgit v1.2.3-70-g09d2 From 9fed81dc40f5a1ac2783bcc78d4029873be72894 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 27 Oct 2010 15:33:46 -0700 Subject: ptrace: cleanup ptrace_request() Use new 'datavp' and 'datalp' variables to remove unnecesary castings. Signed-off-by: Namhyung Kim Acked-by: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/ptrace.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 06981a8b271..ea7ce0215cd 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -565,6 +565,8 @@ int ptrace_request(struct task_struct *child, long request, { int ret = -EIO; siginfo_t siginfo; + void __user *datavp = (void __user *) data; + unsigned long __user *datalp = datavp; switch (request) { case PTRACE_PEEKTEXT: @@ -581,19 +583,17 @@ int ptrace_request(struct task_struct *child, long request, ret = ptrace_setoptions(child, data); break; case PTRACE_GETEVENTMSG: - ret = put_user(child->ptrace_message, (unsigned long __user *) data); + ret = put_user(child->ptrace_message, datalp); break; case PTRACE_GETSIGINFO: ret = ptrace_getsiginfo(child, &siginfo); if (!ret) - ret = copy_siginfo_to_user((siginfo_t __user *) data, - &siginfo); + ret = copy_siginfo_to_user(datavp, &siginfo); break; case PTRACE_SETSIGINFO: - if (copy_from_user(&siginfo, (siginfo_t __user *) data, - sizeof siginfo)) + if (copy_from_user(&siginfo, datavp, sizeof siginfo)) ret = -EFAULT; else ret = ptrace_setsiginfo(child, &siginfo); @@ -624,7 +624,7 @@ int ptrace_request(struct task_struct *child, long request, } mmput(mm); - ret = put_user(tmp, (unsigned long __user *) data); + ret = put_user(tmp, datalp); break; } #endif @@ -653,7 +653,7 @@ int ptrace_request(struct task_struct *child, long request, case PTRACE_SETREGSET: { struct iovec kiov; - struct iovec __user *uiov = (struct iovec __user *) data; + struct iovec __user *uiov = datavp; if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) return -EFAULT; -- cgit v1.2.3-70-g09d2 From b8ed374e202e23caaf9bd77dcadc9de6447faaa8 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 27 Oct 2010 15:34:06 -0700 Subject: signals: annotate lock_task_sighand() lock_task_sighand() grabs sighand->siglock in case of returning non-NULL but unlock_task_sighand() releases it unconditionally. This leads sparse to complain about the lock context imbalance. Rename and wrap lock_task_sighand() using __cond_lock() macro to make sparse happy. Suggested-by: Eric Dumazet Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Oleg Nesterov Cc: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 9 ++++++++- kernel/signal.c | 3 ++- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched.h b/include/linux/sched.h index 393ce94e54b..3ff5c8519ab 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2236,9 +2236,16 @@ static inline void task_unlock(struct task_struct *p) spin_unlock(&p->alloc_lock); } -extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, +extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, unsigned long *flags); +#define lock_task_sighand(tsk, flags) \ +({ struct sighand_struct *__ss; \ + __cond_lock(&(tsk)->sighand->siglock, \ + (__ss = __lock_task_sighand(tsk, flags))); \ + __ss; \ +}) \ + static inline void unlock_task_sighand(struct task_struct *tsk, unsigned long *flags) { diff --git a/kernel/signal.c b/kernel/signal.c index 919562c3d6b..e921409b85a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1105,7 +1105,8 @@ int zap_other_threads(struct task_struct *p) return count; } -struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) +struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, + unsigned long *flags) { struct sighand_struct *sighand; -- cgit v1.2.3-70-g09d2 From b84011508360d6885a9d95a235ec77d56f133377 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 27 Oct 2010 15:34:07 -0700 Subject: signals: annotate lock context change on ptrace_stop() ptrace_stop() releases and regrabs current->sighand->siglock but was missing proper annotation. Add it. Signed-off-by: Namhyung Kim Acked-by: Roland McGrath Cc: Ingo Molnar Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/signal.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index e921409b85a..4e3cff10fdc 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1618,6 +1618,8 @@ static int sigkill_pending(struct task_struct *tsk) * is gone, we keep current->exit_code unless clear_code. */ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) + __releases(¤t->sighand->siglock) + __acquires(¤t->sighand->siglock) { if (arch_ptrace_stop_needed(exit_code, info)) { /* -- cgit v1.2.3-70-g09d2 From 9b1bf12d5d51bca178dea21b04a0805e29d60cf1 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 27 Oct 2010 15:34:08 -0700 Subject: signals: move cred_guard_mutex from task_struct to signal_struct Oleg Nesterov pointed out we have to prevent multiple-threads-inside-exec itself and we can reuse ->cred_guard_mutex for it. Yes, concurrent execve() has no worth. Let's move ->cred_guard_mutex from task_struct to signal_struct. It naturally prevent multiple-threads-inside-exec. Signed-off-by: KOSAKI Motohiro Reviewed-by: Oleg Nesterov Acked-by: Roland McGrath Acked-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/exec.c | 10 +++++----- fs/proc/base.c | 8 ++++---- include/linux/init_task.h | 4 ++-- include/linux/sched.h | 7 ++++--- include/linux/tracehook.h | 2 +- kernel/cred.c | 4 +--- kernel/fork.c | 2 ++ kernel/ptrace.c | 4 ++-- 8 files changed, 21 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/fs/exec.c b/fs/exec.c index 3aa75b8888a..9722909c4d8 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1083,14 +1083,14 @@ EXPORT_SYMBOL(setup_new_exec); */ int prepare_bprm_creds(struct linux_binprm *bprm) { - if (mutex_lock_interruptible(¤t->cred_guard_mutex)) + if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex)) return -ERESTARTNOINTR; bprm->cred = prepare_exec_creds(); if (likely(bprm->cred)) return 0; - mutex_unlock(¤t->cred_guard_mutex); + mutex_unlock(¤t->signal->cred_guard_mutex); return -ENOMEM; } @@ -1098,7 +1098,7 @@ void free_bprm(struct linux_binprm *bprm) { free_arg_pages(bprm); if (bprm->cred) { - mutex_unlock(¤t->cred_guard_mutex); + mutex_unlock(¤t->signal->cred_guard_mutex); abort_creds(bprm->cred); } kfree(bprm); @@ -1119,13 +1119,13 @@ void install_exec_creds(struct linux_binprm *bprm) * credentials; any time after this it may be unlocked. */ security_bprm_committed_creds(bprm); - mutex_unlock(¤t->cred_guard_mutex); + mutex_unlock(¤t->signal->cred_guard_mutex); } EXPORT_SYMBOL(install_exec_creds); /* * determine how safe it is to execute the proposed program - * - the caller must hold current->cred_guard_mutex to protect against + * - the caller must hold ->cred_guard_mutex to protect against * PTRACE_ATTACH */ int check_unsafe_exec(struct linux_binprm *bprm) diff --git a/fs/proc/base.c b/fs/proc/base.c index 9b094c1c846..f3d02ca461e 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -226,7 +226,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task) { struct mm_struct *mm; - if (mutex_lock_killable(&task->cred_guard_mutex)) + if (mutex_lock_killable(&task->signal->cred_guard_mutex)) return NULL; mm = get_task_mm(task); @@ -235,7 +235,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task) mmput(mm); mm = NULL; } - mutex_unlock(&task->cred_guard_mutex); + mutex_unlock(&task->signal->cred_guard_mutex); return mm; } @@ -2354,14 +2354,14 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, goto out_free; /* Guard against adverse ptrace interaction */ - length = mutex_lock_interruptible(&task->cred_guard_mutex); + length = mutex_lock_interruptible(&task->signal->cred_guard_mutex); if (length < 0) goto out_free; length = security_setprocattr(task, (char*)file->f_path.dentry->d_name.name, (void*)page, count); - mutex_unlock(&task->cred_guard_mutex); + mutex_unlock(&task->signal->cred_guard_mutex); out_free: free_page((unsigned long) page); out: diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 2fea6c8ef6b..1f8c06ce0fa 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -29,6 +29,8 @@ extern struct fs_struct init_fs; .running = 0, \ .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ }, \ + .cred_guard_mutex = \ + __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ } extern struct nsproxy init_nsproxy; @@ -145,8 +147,6 @@ extern struct cred init_cred; .group_leader = &tsk, \ RCU_INIT_POINTER(.real_cred, &init_cred), \ RCU_INIT_POINTER(.cred, &init_cred), \ - .cred_guard_mutex = \ - __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ .comm = "swapper", \ .thread = INIT_THREAD, \ .fs = &init_fs, \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 3ff5c8519ab..be7adb7588e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -626,6 +626,10 @@ struct signal_struct { int oom_adj; /* OOM kill score adjustment (bit shift) */ int oom_score_adj; /* OOM kill score adjustment */ + + struct mutex cred_guard_mutex; /* guard against foreign influences on + * credential calculations + * (notably. ptrace) */ }; /* Context switch must be unlocked if interrupts are to be enabled */ @@ -1305,9 +1309,6 @@ struct task_struct { * credentials (COW) */ const struct cred __rcu *cred; /* effective (overridable) subjective task * credentials (COW) */ - struct mutex cred_guard_mutex; /* guard against foreign influences on - * credential calculations - * (notably. ptrace) */ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ char comm[TASK_COMM_LEN]; /* executable name excluding path diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 10db0102a89..3a2e66d88a3 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -150,7 +150,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) * * Return %LSM_UNSAFE_* bits applied to an exec because of tracing. * - * @task->cred_guard_mutex is held by the caller through the do_execve(). + * @task->signal->cred_guard_mutex is held by the caller through the do_execve(). */ static inline int tracehook_unsafe_exec(struct task_struct *task) { diff --git a/kernel/cred.c b/kernel/cred.c index 9a3e22641fe..6a1aa004e37 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -325,7 +325,7 @@ EXPORT_SYMBOL(prepare_creds); /* * Prepare credentials for current to perform an execve() - * - The caller must hold current->cred_guard_mutex + * - The caller must hold ->cred_guard_mutex */ struct cred *prepare_exec_creds(void) { @@ -384,8 +384,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) struct cred *new; int ret; - mutex_init(&p->cred_guard_mutex); - if ( #ifdef CONFIG_KEYS !p->cred->thread_keyring && diff --git a/kernel/fork.c b/kernel/fork.c index e87aaaaf513..3b159c5991b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -908,6 +908,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) sig->oom_adj = current->signal->oom_adj; sig->oom_score_adj = current->signal->oom_score_adj; + mutex_init(&sig->cred_guard_mutex); + return 0; } diff --git a/kernel/ptrace.c b/kernel/ptrace.c index ea7ce0215cd..99bbaa3e5b0 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -181,7 +181,7 @@ int ptrace_attach(struct task_struct *task) * under ptrace. */ retval = -ERESTARTNOINTR; - if (mutex_lock_interruptible(&task->cred_guard_mutex)) + if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) goto out; task_lock(task); @@ -208,7 +208,7 @@ int ptrace_attach(struct task_struct *task) unlock_tasklist: write_unlock_irq(&tasklist_lock); unlock_creds: - mutex_unlock(&task->cred_guard_mutex); + mutex_unlock(&task->signal->cred_guard_mutex); out: return retval; } -- cgit v1.2.3-70-g09d2 From d16e15f5b029fc7d03540ba0e5fb23b0abb0ebe0 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 27 Oct 2010 15:34:10 -0700 Subject: exit: add lock context annotation on find_new_reaper() find_new_reaper() releases and regrabs tasklist_lock but was missing proper annotations. Add it. This remove following sparse warning: warning: context imbalance in 'find_new_reaper' - unexpected unlock Signed-off-by: Namhyung Kim Acked-by: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 894179a32ec..b194febf579 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -703,6 +703,8 @@ static void exit_mm(struct task_struct * tsk) * space. */ static struct task_struct *find_new_reaper(struct task_struct *father) + __releases(&tasklist_lock) + __acquires(&tasklist_lock) { struct pid_namespace *pid_ns = task_active_pid_ns(father); struct task_struct *thread; -- cgit v1.2.3-70-g09d2 From 478735e38887077ac77a9756121b6ce0cb956e2f Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 27 Oct 2010 15:34:15 -0700 Subject: /proc/stat: fix scalability of irq sum of all cpu In /proc/stat, the number of per-IRQ event is shown by making a sum each irq's events on all cpus. But we can make use of kstat_irqs(). kstat_irqs() do the same calculation, If !CONFIG_GENERIC_HARDIRQ, it's not a big cost. (Both of the number of cpus and irqs are small.) If a system is very big and CONFIG_GENERIC_HARDIRQ, it does for_each_irq() for_each_cpu() - look up a radix tree - read desc->irq_stat[cpu] This seems not efficient. This patch adds kstat_irqs() for CONFIG_GENRIC_HARDIRQ and change the calculation as for_each_irq() look up radix tree for_each_cpu() - read desc->irq_stat[cpu] This reduces cost. A test on (4096cpusp, 256 nodes, 4592 irqs) host (by Jack Steiner) %time cat /proc/stat > /dev/null Before Patch: 2.459 sec After Patch : .561 sec [akpm@linux-foundation.org: unexport kstat_irqs, coding-style tweaks] [akpm@linux-foundation.org: fix unused variable 'per_irq_sum'] Signed-off-by: KAMEZAWA Hiroyuki Tested-by: Jack Steiner Acked-by: Jack Steiner Cc: Yinghai Lu Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/stat.c | 10 ++-------- include/linux/kernel_stat.h | 4 ++++ kernel/irq/irqdesc.c | 15 +++++++++++++++ 3 files changed, 21 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/fs/proc/stat.c b/fs/proc/stat.c index b80c620565b..e15a19c93ba 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -31,7 +31,6 @@ static int show_stat(struct seq_file *p, void *v) u64 sum_softirq = 0; unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; struct timespec boottime; - unsigned int per_irq_sum; user = nice = system = idle = iowait = irq = softirq = steal = cputime64_zero; @@ -108,13 +107,8 @@ static int show_stat(struct seq_file *p, void *v) seq_printf(p, "intr %llu", (unsigned long long)sum); /* sum again ? it could be updated? */ - for_each_irq_nr(j) { - per_irq_sum = 0; - for_each_possible_cpu(i) - per_irq_sum += kstat_irqs_cpu(j, i); - - seq_printf(p, " %u", per_irq_sum); - } + for_each_irq_nr(j) + seq_printf(p, " %u", kstat_irqs(j)); seq_printf(p, "\nctxt %llu\n" diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 8b9b8908553..ad54c846911 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -86,6 +86,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) /* * Number of interrupts per specific IRQ source, since bootup */ +#ifndef CONFIG_GENERIC_HARDIRQS static inline unsigned int kstat_irqs(unsigned int irq) { unsigned int sum = 0; @@ -96,6 +97,9 @@ static inline unsigned int kstat_irqs(unsigned int irq) return sum; } +#else +extern unsigned int kstat_irqs(unsigned int irq); +#endif /* * Number of interrupts per cpu, since bootup diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 9d917ff7267..9988d03797f 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -393,3 +393,18 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) struct irq_desc *desc = irq_to_desc(irq); return desc ? desc->kstat_irqs[cpu] : 0; } + +#ifdef CONFIG_GENERIC_HARDIRQS +unsigned int kstat_irqs(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + int cpu; + int sum = 0; + + if (!desc) + return 0; + for_each_possible_cpu(cpu) + sum += desc->kstat_irqs[cpu]; + return sum; +} +#endif /* CONFIG_GENERIC_HARDIRQS */ -- cgit v1.2.3-70-g09d2 From 85893120699f8bae8caa12a8ee18ab5fceac978e Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 27 Oct 2010 15:34:43 -0700 Subject: delayacct: align to 8 byte boundary on 64-bit systems prepare_reply() sets up an skb for the response. The payload contains: +--------------------------------+ | genlmsghdr - 4 bytes | +--------------------------------+ | NLA header - 4 bytes | /* Aggregate header */ +-+------------------------------+ | | NLA header - 4 bytes | /* PID header */ | +------------------------------+ | | pid/tgid - 4 bytes | | +------------------------------+ | | NLA header - 4 bytes | /* stats header */ | + -----------------------------+ <- oops. aligned on 4 byte boundary | | struct taskstats - 328 bytes | +-+------------------------------+ The start of the taskstats struct must be 8 byte aligned on IA64 (and other systems with 8 byte alignment rules for 64-bit types) or runtime alignment warnings will be issued. This patch pads the pid/tgid field out to sizeof(long), which forces the alignment of taskstats. The getdelays userspace code is ok with this since it assumes 32-bit pid/tgid and then honors that header's length field. An array is used to avoid exposing kernel memory contents to userspace in the response. Signed-off-by: Jeff Mahoney Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/taskstats.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 11281d5792b..5a651aa63d6 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -360,6 +360,12 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) struct nlattr *na, *ret; int aggr; + /* If we don't pad, we end up with alignment on a 4 byte boundary. + * This causes lots of runtime warnings on systems requiring 8 byte + * alignment */ + u32 pids[2] = { pid, 0 }; + int pid_size = ALIGN(sizeof(pid), sizeof(long)); + aggr = (type == TASKSTATS_TYPE_PID) ? TASKSTATS_TYPE_AGGR_PID : TASKSTATS_TYPE_AGGR_TGID; @@ -367,7 +373,7 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) na = nla_nest_start(skb, aggr); if (!na) goto err; - if (nla_put(skb, type, sizeof(pid), &pid) < 0) + if (nla_put(skb, type, pid_size, pids) < 0) goto err; ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); if (!ret) -- cgit v1.2.3-70-g09d2 From 9323312592cca636d7c2580dc85fa4846efa86a2 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Wed, 27 Oct 2010 15:34:44 -0700 Subject: taskstats: separate taskstats commands Move each taskstats command into a single function. This makes the code more readable and makes it easier to add new commands. Signed-off-by: Michael Holzheu Acked-by: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/taskstats.c | 118 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 78 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 5a651aa63d6..9970cae04f1 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -430,39 +430,46 @@ err: return rc; } -static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) +static int cmd_attr_register_cpumask(struct genl_info *info) { - int rc; - struct sk_buff *rep_skb; - struct taskstats *stats; - size_t size; cpumask_var_t mask; + int rc; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; - rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); if (rc < 0) - goto free_return_rc; - if (rc == 0) { - rc = add_del_listener(info->snd_pid, mask, REGISTER); - goto free_return_rc; - } + goto out; + rc = add_del_listener(info->snd_pid, mask, REGISTER); +out: + free_cpumask_var(mask); + return rc; +} +static int cmd_attr_deregister_cpumask(struct genl_info *info) +{ + cpumask_var_t mask; + int rc; + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return -ENOMEM; rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask); if (rc < 0) - goto free_return_rc; - if (rc == 0) { - rc = add_del_listener(info->snd_pid, mask, DEREGISTER); -free_return_rc: - free_cpumask_var(mask); - return rc; - } + goto out; + rc = add_del_listener(info->snd_pid, mask, DEREGISTER); +out: free_cpumask_var(mask); + return rc; +} + +static int cmd_attr_pid(struct genl_info *info) +{ + struct taskstats *stats; + struct sk_buff *rep_skb; + size_t size; + u32 pid; + int rc; - /* - * Size includes space for nested attributes - */ size = nla_total_size(sizeof(u32)) + nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); @@ -471,33 +478,64 @@ free_return_rc: return rc; rc = -EINVAL; - if (info->attrs[TASKSTATS_CMD_ATTR_PID]) { - u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]); - stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid); - if (!stats) - goto err; - - rc = fill_pid(pid, NULL, stats); - if (rc < 0) - goto err; - } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) { - u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]); - stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid); - if (!stats) - goto err; - - rc = fill_tgid(tgid, NULL, stats); - if (rc < 0) - goto err; - } else + pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]); + stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid); + if (!stats) + goto err; + + rc = fill_pid(pid, NULL, stats); + if (rc < 0) + goto err; + return send_reply(rep_skb, info); +err: + nlmsg_free(rep_skb); + return rc; +} + +static int cmd_attr_tgid(struct genl_info *info) +{ + struct taskstats *stats; + struct sk_buff *rep_skb; + size_t size; + u32 tgid; + int rc; + + size = nla_total_size(sizeof(u32)) + + nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); + + rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); + if (rc < 0) + return rc; + + rc = -EINVAL; + tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]); + stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid); + if (!stats) goto err; + rc = fill_tgid(tgid, NULL, stats); + if (rc < 0) + goto err; return send_reply(rep_skb, info); err: nlmsg_free(rep_skb); return rc; } +static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) +{ + if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK]) + return cmd_attr_register_cpumask(info); + else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK]) + return cmd_attr_deregister_cpumask(info); + else if (info->attrs[TASKSTATS_CMD_ATTR_PID]) + return cmd_attr_pid(info); + else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) + return cmd_attr_tgid(info); + else + return -EINVAL; +} + static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; -- cgit v1.2.3-70-g09d2 From 3d9e0cf1fe007b88db55d43dfdb6839e1a029ca5 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Wed, 27 Oct 2010 15:34:44 -0700 Subject: taskstats: split fill_pid function Separate the finding of a task_struct by pid or tgid from filling the taskstats data. This makes the code more readable. Signed-off-by: Michael Holzheu Acked-by: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/taskstats.c | 50 +++++++++++++++++++++----------------------------- 1 file changed, 21 insertions(+), 29 deletions(-) (limited to 'kernel') diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 9970cae04f1..c8231fb1570 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -175,22 +175,8 @@ static void send_cpu_listeners(struct sk_buff *skb, up_write(&listeners->sem); } -static int fill_pid(pid_t pid, struct task_struct *tsk, - struct taskstats *stats) +static void fill_stats(struct task_struct *tsk, struct taskstats *stats) { - int rc = 0; - - if (!tsk) { - rcu_read_lock(); - tsk = find_task_by_vpid(pid); - if (tsk) - get_task_struct(tsk); - rcu_read_unlock(); - if (!tsk) - return -ESRCH; - } else - get_task_struct(tsk); - memset(stats, 0, sizeof(*stats)); /* * Each accounting subsystem adds calls to its functions to @@ -209,17 +195,27 @@ static int fill_pid(pid_t pid, struct task_struct *tsk, /* fill in extended acct fields */ xacct_add_tsk(stats, tsk); +} - /* Define err: label here if needed */ - put_task_struct(tsk); - return rc; +static int fill_stats_for_pid(pid_t pid, struct taskstats *stats) +{ + struct task_struct *tsk; + rcu_read_lock(); + tsk = find_task_by_vpid(pid); + if (tsk) + get_task_struct(tsk); + rcu_read_unlock(); + if (!tsk) + return -ESRCH; + fill_stats(tsk, stats); + put_task_struct(tsk); + return 0; } -static int fill_tgid(pid_t tgid, struct task_struct *first, - struct taskstats *stats) +static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats) { - struct task_struct *tsk; + struct task_struct *tsk, *first; unsigned long flags; int rc = -ESRCH; @@ -228,8 +224,7 @@ static int fill_tgid(pid_t tgid, struct task_struct *first, * leaders who are already counted with the dead tasks */ rcu_read_lock(); - if (!first) - first = find_task_by_vpid(tgid); + first = find_task_by_vpid(tgid); if (!first || !lock_task_sighand(first, &flags)) goto out; @@ -268,7 +263,6 @@ out: return rc; } - static void fill_tgid_exit(struct task_struct *tsk) { unsigned long flags; @@ -483,7 +477,7 @@ static int cmd_attr_pid(struct genl_info *info) if (!stats) goto err; - rc = fill_pid(pid, NULL, stats); + rc = fill_stats_for_pid(pid, stats); if (rc < 0) goto err; return send_reply(rep_skb, info); @@ -513,7 +507,7 @@ static int cmd_attr_tgid(struct genl_info *info) if (!stats) goto err; - rc = fill_tgid(tgid, NULL, stats); + rc = fill_stats_for_tgid(tgid, stats); if (rc < 0) goto err; return send_reply(rep_skb, info); @@ -599,9 +593,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead) if (!stats) goto err; - rc = fill_pid(-1, tsk, stats); - if (rc < 0) - goto err; + fill_stats(tsk, stats); /* * Doesn't matter if tsk is the leader or the last group member leaving -- cgit v1.2.3-70-g09d2 From d57af9b2142f31a39dcfdeb30776baadfc802827 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Wed, 27 Oct 2010 15:34:45 -0700 Subject: taskstats: use real microsecond granularity for CPU times The taskstats interface uses microsecond granularity for the user and system time values. The conversion from cputime to the taskstats values uses the cputime_to_msecs primitive which effectively limits the granularity to milliseconds. Add the cputime_to_usecs primitive for architectures that have better, more precise CPU time values. Remove cputime_to_msecs primitive because there are no more users left. Signed-off-by: Michael Holzheu Acked-by: Balbir Singh Cc: Luck Tony Cc: Shailabh Nagar Cc: Martin Schwidefsky Cc: Oleg Nesterov Cc: Benjamin Herrenschmidt Cc: Heiko Carstens Cc: Thomas Gleixner Cc: Shailabh Nagar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/cputime.h | 6 +++--- arch/powerpc/include/asm/cputime.h | 12 ++++++------ arch/s390/include/asm/cputime.h | 10 +++++----- include/asm-generic/cputime.h | 6 +++--- kernel/tsacct.c | 10 ++++------ 5 files changed, 21 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index 7fa8a859466..6073b187528 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h @@ -56,10 +56,10 @@ typedef u64 cputime64_t; #define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) /* - * Convert cputime <-> milliseconds + * Convert cputime <-> microseconds */ -#define cputime_to_msecs(__ct) ((__ct) / NSEC_PER_MSEC) -#define msecs_to_cputime(__msecs) ((__msecs) * NSEC_PER_MSEC) +#define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC) +#define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC) /* * Convert cputime <-> seconds diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 8bdc6a9e577..1cf20bdfbec 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -124,23 +124,23 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct) } /* - * Convert cputime <-> milliseconds + * Convert cputime <-> microseconds */ extern u64 __cputime_msec_factor; -static inline unsigned long cputime_to_msecs(const cputime_t ct) +static inline unsigned long cputime_to_usecs(const cputime_t ct) { - return mulhdu(ct, __cputime_msec_factor); + return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC; } -static inline cputime_t msecs_to_cputime(const unsigned long ms) +static inline cputime_t usecs_to_cputime(const unsigned long us) { cputime_t ct; unsigned long sec; /* have to be a little careful about overflow */ - ct = ms % 1000; - sec = ms / 1000; + ct = us % 1000000; + sec = us / 1000000; if (ct) { ct *= tb_ticks_per_sec; do_div(ct, 1000); diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 8b1a52a137c..40e2ab0fa3f 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -73,18 +73,18 @@ cputime64_to_jiffies64(cputime64_t cputime) } /* - * Convert cputime to milliseconds and back. + * Convert cputime to microseconds and back. */ static inline unsigned int -cputime_to_msecs(const cputime_t cputime) +cputime_to_usecs(const cputime_t cputime) { - return cputime_div(cputime, 4096000); + return cputime_div(cputime, 4096); } static inline cputime_t -msecs_to_cputime(const unsigned int m) +usecs_to_cputime(const unsigned int m) { - return (cputime_t) m * 4096000; + return (cputime_t) m * 4096; } /* diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index ca0f239f0e1..2bcc5c7c22a 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h @@ -33,10 +33,10 @@ typedef u64 cputime64_t; /* - * Convert cputime to milliseconds and back. + * Convert cputime to microseconds and back. */ -#define cputime_to_msecs(__ct) jiffies_to_msecs(__ct) -#define msecs_to_cputime(__msecs) msecs_to_jiffies(__msecs) +#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct); +#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs); /* * Convert cputime to seconds and back. diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 0a67e041edf..24dc60d9fa1 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -63,12 +63,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) stats->ac_ppid = pid_alive(tsk) ? rcu_dereference(tsk->real_parent)->tgid : 0; rcu_read_unlock(); - stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; - stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; - stats->ac_utimescaled = - cputime_to_msecs(tsk->utimescaled) * USEC_PER_MSEC; - stats->ac_stimescaled = - cputime_to_msecs(tsk->stimescaled) * USEC_PER_MSEC; + stats->ac_utime = cputime_to_usecs(tsk->utime); + stats->ac_stime = cputime_to_usecs(tsk->stime); + stats->ac_utimescaled = cputime_to_usecs(tsk->utimescaled); + stats->ac_stimescaled = cputime_to_usecs(tsk->stimescaled); stats->ac_minflt = tsk->min_flt; stats->ac_majflt = tsk->maj_flt; -- cgit v1.2.3-70-g09d2 From 5de1cb2d0f1c1e5475d2bedf65b76828f8cdde22 Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Wed, 27 Oct 2010 15:34:52 -0700 Subject: kernel/resource.c: handle reinsertion of an already-inserted resource If the same resource is inserted to the resource tree (maybe not on purpose), a dead loop will be created. In this situation, The kernel does not report any warning or error :( The command below will show a endless print. #cat /proc/iomem [akpm@linux-foundation.org: add WARN_ON()] Signed-off-by: Huang Shijie Cc: Jesse Barnes Cc: Bjorn Helgaas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/resource.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 7b36976e5de..9c9841cb690 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -453,6 +453,8 @@ static struct resource * __insert_resource(struct resource *parent, struct resou if (first == parent) return first; + if (WARN_ON(first == new)) /* duplicated insertion */ + return first; if ((first->start > new->start) || (first->end < new->end)) break; -- cgit v1.2.3-70-g09d2 From 61d8e11e519ee7912ab59610fba1aaf08e3c1d84 Mon Sep 17 00:00:00 2001 From: Zimny Lech Date: Wed, 27 Oct 2010 15:34:53 -0700 Subject: Remove duplicate includes from many files Signed-off-by: Zimny Lech Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mach-tegra/timer.c | 1 - arch/arm/plat-nomadik/include/plat/ste_dma40.h | 1 - arch/tile/kernel/setup.c | 2 -- arch/x86/mm/init_64.c | 1 - arch/x86/xen/enlighten.c | 1 - drivers/media/IR/lirc_dev.c | 1 - drivers/platform/x86/intel_pmic_gpio.c | 1 - include/linux/virtio_9p.h | 1 - kernel/trace/trace_kprobe.c | 1 - 9 files changed, 10 deletions(-) (limited to 'kernel') diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c index 2f420210d40..9057d6fd1d3 100644 --- a/arch/arm/mach-tegra/timer.c +++ b/arch/arm/mach-tegra/timer.c @@ -27,7 +27,6 @@ #include #include -#include #include #include diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h index 5fbde4b8dc1..93a812672d9 100644 --- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h @@ -14,7 +14,6 @@ #include #include #include -#include /* dev types for memcpy */ #define STEDMA40_DEV_DST_MEMORY (-1) diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index f3a50e74f9a..ae51cad12da 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -30,8 +30,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 84346200e78..71a59296af8 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -51,7 +51,6 @@ #include #include #include -#include static int __init parse_direct_gbpages_off(char *arg) { diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 44ab12dc2a1..0cd12db0b14 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -59,7 +59,6 @@ #include #include #include -#include #include #include diff --git a/drivers/media/IR/lirc_dev.c b/drivers/media/IR/lirc_dev.c index 0acf6396e06..202581808bd 100644 --- a/drivers/media/IR/lirc_dev.c +++ b/drivers/media/IR/lirc_dev.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c index f540ff96c53..e61db9dfebe 100644 --- a/drivers/platform/x86/intel_pmic_gpio.c +++ b/drivers/platform/x86/intel_pmic_gpio.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h index 1faa80d92f0..e68b439b286 100644 --- a/include/linux/virtio_9p.h +++ b/include/linux/virtio_9p.h @@ -5,7 +5,6 @@ #include #include #include -#include /* The feature bitmap for virtio 9P */ diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index b8d2852baa4..2dec9bcde8b 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include "trace.h" -- cgit v1.2.3-70-g09d2 From b842f8faf6c7dc2005c6a70631c1a91bac02f180 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 1 Oct 2010 17:23:41 -0400 Subject: jump label: Fix module __init section race Jump label uses is_module_text_address() to ensure that the module __init sections are valid before updating them. However, between the check for a valid module __init section and the subsequent jump label update, the module's __init section could be freed out from under us. We fix this potential race by adding a notifier callback to the MODULE_STATE_LIVE state. This notifier is called *after* the __init section has been run but before it is going to be freed. In the callback, the jump label code zeros the key value for any __init jump code within the module, and we add a check for a non-zero key value when we update jump labels. In this way we require no additional data structures. Thanks to Mathieu Desnoyers for pointing out this race condition. Reported-by: Mathieu Desnoyers Cc: Masami Hiramatsu Signed-off-by: Jason Baron LKML-Reference: [ Renamed remove_module_init() to remove_jump_label_module_init() as suggested by Masami Hiramatsu. ] Signed-off-by: Steven Rostedt --- kernel/jump_label.c | 41 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 7be868bf25c..be9e105345e 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -168,7 +168,8 @@ void jump_label_update(unsigned long key, enum jump_label_type type) count = e_module->nr_entries; iter = e_module->table; while (count--) { - if (kernel_text_address(iter->code)) + if (iter->key && + kernel_text_address(iter->code)) arch_jump_label_transform(iter, type); iter++; } @@ -366,6 +367,39 @@ static void remove_jump_label_module(struct module *mod) } } +static void remove_jump_label_module_init(struct module *mod) +{ + struct hlist_head *head; + struct hlist_node *node, *node_next, *module_node, *module_node_next; + struct jump_label_entry *e; + struct jump_label_module_entry *e_module; + struct jump_entry *iter; + int i, count; + + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return; + + for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { + head = &jump_label_table[i]; + hlist_for_each_entry_safe(e, node, node_next, head, hlist) { + hlist_for_each_entry_safe(e_module, module_node, + module_node_next, + &(e->modules), hlist) { + if (e_module->mod != mod) + continue; + count = e_module->nr_entries; + iter = e_module->table; + while (count--) { + if (within_module_init(iter->code, mod)) + iter->key = 0; + iter++; + } + } + } + } +} + static int jump_label_module_notify(struct notifier_block *self, unsigned long val, void *data) @@ -386,6 +420,11 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, remove_jump_label_module(mod); mutex_unlock(&jump_label_mutex); break; + case MODULE_STATE_LIVE: + mutex_lock(&jump_label_mutex); + remove_jump_label_module_init(mod); + mutex_unlock(&jump_label_mutex); + break; } return ret; } -- cgit v1.2.3-70-g09d2 From 91bad2f8d3057482b9afb599f14421b007136960 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 1 Oct 2010 17:23:48 -0400 Subject: jump label: Fix deadlock b/w jump_label_mutex vs. text_mutex register_kprobe() downs the 'text_mutex' and then calls jump_label_text_reserved(), which downs the 'jump_label_mutex'. However, the jump label code takes those mutexes in the reverse order. Fix by requiring the caller of jump_label_text_reserved() to do the jump label locking via the newly added: jump_label_lock(), jump_label_unlock(). Currently, kprobes is the only user of jump_label_text_reserved(). Reported-by: Ingo Molnar Acked-by: Masami Hiramatsu Signed-off-by: Jason Baron LKML-Reference: <759032c48d5e30c27f0bba003d09bffa8e9f28bb.1285965957.git.jbaron@redhat.com> Signed-off-by: Steven Rostedt --- include/linux/jump_label.h | 5 +++++ kernel/jump_label.c | 33 +++++++++++++++++++++------------ kernel/kprobes.c | 6 ++++++ 3 files changed, 32 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index b67cb180e6e..1947a121267 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -18,6 +18,8 @@ struct module; extern struct jump_entry __start___jump_table[]; extern struct jump_entry __stop___jump_table[]; +extern void jump_label_lock(void); +extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type); extern void arch_jump_label_text_poke_early(jump_label_t addr); @@ -59,6 +61,9 @@ static inline int jump_label_text_reserved(void *start, void *end) return 0; } +static inline void jump_label_lock(void) {} +static inline void jump_label_unlock(void) {} + #endif #define COND_STMT(key, stmt) \ diff --git a/kernel/jump_label.c b/kernel/jump_label.c index be9e105345e..12cce78e956 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -39,6 +39,16 @@ struct jump_label_module_entry { struct module *mod; }; +void jump_label_lock(void) +{ + mutex_lock(&jump_label_mutex); +} + +void jump_label_unlock(void) +{ + mutex_unlock(&jump_label_mutex); +} + static int jump_label_cmp(const void *a, const void *b) { const struct jump_entry *jea = a; @@ -152,7 +162,7 @@ void jump_label_update(unsigned long key, enum jump_label_type type) struct jump_label_module_entry *e_module; int count; - mutex_lock(&jump_label_mutex); + jump_label_lock(); entry = get_jump_label_entry((jump_label_t)key); if (entry) { count = entry->nr_entries; @@ -175,7 +185,7 @@ void jump_label_update(unsigned long key, enum jump_label_type type) } } } - mutex_unlock(&jump_label_mutex); + jump_label_unlock(); } static int addr_conflict(struct jump_entry *entry, void *start, void *end) @@ -232,6 +242,7 @@ out: * overlaps with any of the jump label patch addresses. Code * that wants to modify kernel text should first verify that * it does not overlap with any of the jump label addresses. + * Caller must hold jump_label_mutex. * * returns 1 if there is an overlap, 0 otherwise */ @@ -242,7 +253,6 @@ int jump_label_text_reserved(void *start, void *end) struct jump_entry *iter_stop = __start___jump_table; int conflict = 0; - mutex_lock(&jump_label_mutex); iter = iter_start; while (iter < iter_stop) { if (addr_conflict(iter, start, end)) { @@ -257,7 +267,6 @@ int jump_label_text_reserved(void *start, void *end) conflict = module_conflict(start, end); #endif out: - mutex_unlock(&jump_label_mutex); return conflict; } @@ -268,7 +277,7 @@ static __init int init_jump_label(void) struct jump_entry *iter_stop = __stop___jump_table; struct jump_entry *iter; - mutex_lock(&jump_label_mutex); + jump_label_lock(); ret = build_jump_label_hashtable(__start___jump_table, __stop___jump_table); iter = iter_start; @@ -276,7 +285,7 @@ static __init int init_jump_label(void) arch_jump_label_text_poke_early(iter->code); iter++; } - mutex_unlock(&jump_label_mutex); + jump_label_unlock(); return ret; } early_initcall(init_jump_label); @@ -409,21 +418,21 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, switch (val) { case MODULE_STATE_COMING: - mutex_lock(&jump_label_mutex); + jump_label_lock(); ret = add_jump_label_module(mod); if (ret) remove_jump_label_module(mod); - mutex_unlock(&jump_label_mutex); + jump_label_unlock(); break; case MODULE_STATE_GOING: - mutex_lock(&jump_label_mutex); + jump_label_lock(); remove_jump_label_module(mod); - mutex_unlock(&jump_label_mutex); + jump_label_unlock(); break; case MODULE_STATE_LIVE: - mutex_lock(&jump_label_mutex); + jump_label_lock(); remove_jump_label_module_init(mod); - mutex_unlock(&jump_label_mutex); + jump_label_unlock(); break; } return ret; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 99865c33a60..9437e14f36b 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1146,13 +1146,16 @@ int __kprobes register_kprobe(struct kprobe *p) return ret; preempt_disable(); + jump_label_lock(); if (!kernel_text_address((unsigned long) p->addr) || in_kprobes_functions((unsigned long) p->addr) || ftrace_text_reserved(p->addr, p->addr) || jump_label_text_reserved(p->addr, p->addr)) { preempt_enable(); + jump_label_unlock(); return -EINVAL; } + jump_label_unlock(); /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ p->flags &= KPROBE_FLAG_DISABLED; @@ -1187,6 +1190,8 @@ int __kprobes register_kprobe(struct kprobe *p) INIT_LIST_HEAD(&p->list); mutex_lock(&kprobe_mutex); + jump_label_lock(); /* needed to call jump_label_text_reserved() */ + get_online_cpus(); /* For avoiding text_mutex deadlock. */ mutex_lock(&text_mutex); @@ -1214,6 +1219,7 @@ int __kprobes register_kprobe(struct kprobe *p) out: mutex_unlock(&text_mutex); put_online_cpus(); + jump_label_unlock(); mutex_unlock(&kprobe_mutex); if (probed_mod) -- cgit v1.2.3-70-g09d2 From f7e835710ab5f6e43933c983f38f2d2e262b718c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 26 Jul 2010 13:23:11 +0400 Subject: convert cgroup and cpuset Signed-off-by: Al Viro --- kernel/cgroup.c | 11 +++++------ kernel/cpuset.c | 13 ++++++------- 2 files changed, 11 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5cf366965d0..66a416b42c1 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1460,9 +1460,9 @@ static int cgroup_get_rootdir(struct super_block *sb) return 0; } -static int cgroup_get_sb(struct file_system_type *fs_type, +static struct dentry *cgroup_mount(struct file_system_type *fs_type, int flags, const char *unused_dev_name, - void *data, struct vfsmount *mnt) + void *data) { struct cgroup_sb_opts opts; struct cgroupfs_root *root; @@ -1596,10 +1596,9 @@ static int cgroup_get_sb(struct file_system_type *fs_type, drop_parsed_module_refcounts(opts.subsys_bits); } - simple_set_mnt(mnt, sb); kfree(opts.release_agent); kfree(opts.name); - return 0; + return dget(sb->s_root); drop_new_super: deactivate_locked_super(sb); @@ -1608,7 +1607,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type, out_err: kfree(opts.release_agent); kfree(opts.name); - return ret; + return ERR_PTR(ret); } static void cgroup_kill_sb(struct super_block *sb) { @@ -1658,7 +1657,7 @@ static void cgroup_kill_sb(struct super_block *sb) { static struct file_system_type cgroup_fs_type = { .name = "cgroup", - .get_sb = cgroup_get_sb, + .mount = cgroup_mount, .kill_sb = cgroup_kill_sb, }; diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 51b143e2a07..4349935c2ad 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -231,18 +231,17 @@ static DEFINE_SPINLOCK(cpuset_buffer_lock); * users. If someone tries to mount the "cpuset" filesystem, we * silently switch it to mount "cgroup" instead */ -static int cpuset_get_sb(struct file_system_type *fs_type, - int flags, const char *unused_dev_name, - void *data, struct vfsmount *mnt) +static struct dentry *cpuset_mount(struct file_system_type *fs_type, + int flags, const char *unused_dev_name, void *data) { struct file_system_type *cgroup_fs = get_fs_type("cgroup"); - int ret = -ENODEV; + struct dentry *ret = ERR_PTR(-ENODEV); if (cgroup_fs) { char mountopts[] = "cpuset,noprefix," "release_agent=/sbin/cpuset_release_agent"; - ret = cgroup_fs->get_sb(cgroup_fs, flags, - unused_dev_name, mountopts, mnt); + ret = cgroup_fs->mount(cgroup_fs, flags, + unused_dev_name, mountopts); put_filesystem(cgroup_fs); } return ret; @@ -250,7 +249,7 @@ static int cpuset_get_sb(struct file_system_type *fs_type, static struct file_system_type cpuset_fs_type = { .name = "cpuset", - .get_sb = cpuset_get_sb, + .mount = cpuset_mount, }; /* -- cgit v1.2.3-70-g09d2 From de31c3ca8179d7c21def7ecb56e4fec0c8659d36 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 18 Oct 2010 10:38:58 -0400 Subject: jump label: Fix error with preempt disable holding mutex Kprobes and jump label were having a race between mutexes that was fixed by reordering the jump label. But this reordering moved the jump label mutex into a preempt disable location. This patch does a little fiddling to move the grabbing of the jump label mutex from inside the preempt disable section and still keep the order correct between the mutex and the kprobes lock. Reported-by: Ingo Molnar Acked-by: Masami Hiramatsu Cc: Jason Baron Signed-off-by: Steven Rostedt --- kernel/kprobes.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 9437e14f36b..9737a76e106 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1145,17 +1145,13 @@ int __kprobes register_kprobe(struct kprobe *p) if (ret) return ret; - preempt_disable(); jump_label_lock(); + preempt_disable(); if (!kernel_text_address((unsigned long) p->addr) || in_kprobes_functions((unsigned long) p->addr) || ftrace_text_reserved(p->addr, p->addr) || - jump_label_text_reserved(p->addr, p->addr)) { - preempt_enable(); - jump_label_unlock(); - return -EINVAL; - } - jump_label_unlock(); + jump_label_text_reserved(p->addr, p->addr)) + goto fail_with_jump_label; /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ p->flags &= KPROBE_FLAG_DISABLED; @@ -1169,10 +1165,9 @@ int __kprobes register_kprobe(struct kprobe *p) * We must hold a refcount of the probed module while updating * its code to prohibit unexpected unloading. */ - if (unlikely(!try_module_get(probed_mod))) { - preempt_enable(); - return -EINVAL; - } + if (unlikely(!try_module_get(probed_mod))) + goto fail_with_jump_label; + /* * If the module freed .init.text, we couldn't insert * kprobes in there. @@ -1180,11 +1175,11 @@ int __kprobes register_kprobe(struct kprobe *p) if (within_module_init((unsigned long)p->addr, probed_mod) && probed_mod->state != MODULE_STATE_COMING) { module_put(probed_mod); - preempt_enable(); - return -EINVAL; + goto fail_with_jump_label; } } preempt_enable(); + jump_label_unlock(); p->nmissed = 0; INIT_LIST_HEAD(&p->list); @@ -1226,6 +1221,11 @@ out: module_put(probed_mod); return ret; + +fail_with_jump_label: + preempt_enable(); + jump_label_unlock(); + return -EINVAL; } EXPORT_SYMBOL_GPL(register_kprobe); -- cgit v1.2.3-70-g09d2 From 95bcd683fb694a3e2d0538bf486430a0dfbb4111 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 29 Oct 2010 11:02:43 -0400 Subject: jump label: Make arch_jump_label_text_poke_early() optional Some archs do not need to do anything special for jump labels on startup (like MIPS). This patch adds a weak function stub for arch_jump_label_text_poke_early(); Cc: Jason Baron Cc: David Miller Cc: David Daney Suggested-by: Thomas Gleixner LKML-Reference: <1286218615-24011-2-git-send-email-ddaney@caviumnetworks.com> LKML-Reference: <20101015201037.703989993@goodmis.org> Signed-off-by: Steven Rostedt --- kernel/jump_label.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'kernel') diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 12cce78e956..3b79bd93833 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -270,6 +270,13 @@ out: return conflict; } +/* + * Not all archs need this. + */ +void __weak arch_jump_label_text_poke_early(jump_label_t addr) +{ +} + static __init int init_jump_label(void) { int ret; -- cgit v1.2.3-70-g09d2 From 931ea24819f2bd40cca2dc214558bfcc3c91549e Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Fri, 29 Oct 2010 08:04:16 -0500 Subject: kdb: fix per_cpu command to remove supress mask Rusty pointed out that the per_cpu command uses up lots of space on the stack and the cpu supress mask is probably not needed. This patch removes the need for the supress mask as well as fixing up the following problems with the kdb per_cpu command: * The per_cpu command should allow an address as an argument * When you have more data than can be displayed on one screen allow the user to break out of the print loop. Reported-by: Rusty Russell Signed-off-by: Jason Wessel --- kernel/debug/kdb/kdb_main.c | 46 +++++++++++---------------------------------- 1 file changed, 11 insertions(+), 35 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index d7bda21a106..9755ac05e44 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2603,20 +2603,17 @@ static int kdb_summary(int argc, const char **argv) */ static int kdb_per_cpu(int argc, const char **argv) { - char buf[256], fmtstr[64]; - kdb_symtab_t symtab; - cpumask_t suppress = CPU_MASK_NONE; - int cpu, diag; - unsigned long addr, val, bytesperword = 0, whichcpu = ~0UL; + char fmtstr[64]; + int cpu, diag, nextarg = 1; + unsigned long addr, symaddr, val, bytesperword = 0, whichcpu = ~0UL; if (argc < 1 || argc > 3) return KDB_ARGCOUNT; - snprintf(buf, sizeof(buf), "per_cpu__%s", argv[1]); - if (!kdbgetsymval(buf, &symtab)) { - kdb_printf("%s is not a per_cpu variable\n", argv[1]); - return KDB_BADADDR; - } + diag = kdbgetaddrarg(argc, argv, &nextarg, &symaddr, NULL, NULL); + if (diag) + return diag; + if (argc >= 2) { diag = kdbgetularg(argv[2], &bytesperword); if (diag) @@ -2649,46 +2646,25 @@ static int kdb_per_cpu(int argc, const char **argv) #define KDB_PCU(cpu) 0 #endif #endif - for_each_online_cpu(cpu) { + if (KDB_FLAG(CMD_INTERRUPT)) + return 0; + if (whichcpu != ~0UL && whichcpu != cpu) continue; - addr = symtab.sym_start + KDB_PCU(cpu); + addr = symaddr + KDB_PCU(cpu); diag = kdb_getword(&val, addr, bytesperword); if (diag) { kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to " "read, diag=%d\n", cpu, addr, diag); continue; } -#ifdef CONFIG_SMP - if (!val) { - cpu_set(cpu, suppress); - continue; - } -#endif /* CONFIG_SMP */ kdb_printf("%5d ", cpu); kdb_md_line(fmtstr, addr, bytesperword == KDB_WORD_SIZE, 1, bytesperword, 1, 1, 0); } - if (cpus_weight(suppress) == 0) - return 0; - kdb_printf("Zero suppressed cpu(s):"); - for (cpu = first_cpu(suppress); cpu < num_possible_cpus(); - cpu = next_cpu(cpu, suppress)) { - kdb_printf(" %d", cpu); - if (cpu == num_possible_cpus() - 1 || - next_cpu(cpu, suppress) != cpu + 1) - continue; - while (cpu < num_possible_cpus() && - next_cpu(cpu, suppress) == cpu + 1) - ++cpu; - kdb_printf("-%d", cpu); - } - kdb_printf("\n"); - #undef KDB_PCU - return 0; } -- cgit v1.2.3-70-g09d2 From 578bd4dfcda63d2ef15f025f1d5d55c0e56b9660 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Fri, 29 Oct 2010 13:14:41 -0500 Subject: kdb: Fix early debugging crash regression The kdb_current legally be equal to NULL in the early boot of the x86 arch. The problem pcan be observed by booting with the kernel arguments: earlyprintk=vga ekgdboc=kbd kgdbwait The kdb shell will oops on entry and recursively fault because it cannot get past the final stage of shell initialization. Signed-off-by: Jason Wessel --- kernel/debug/kdb/kdb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 9755ac05e44..37755d62192 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -1127,7 +1127,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, /* special case below */ } else { kdb_printf("\nEntering kdb (current=0x%p, pid %d) ", - kdb_current, kdb_current->pid); + kdb_current, kdb_current ? kdb_current->pid : 0); #if defined(CONFIG_SMP) kdb_printf("on processor %d ", raw_smp_processor_id()); #endif -- cgit v1.2.3-70-g09d2 From d7ba979d45272385ce0fdf141d922e61ff48e07b Mon Sep 17 00:00:00 2001 From: Dongdong Deng Date: Wed, 18 Aug 2010 06:02:00 -0500 Subject: debug_core,x86,blackfin: Clean up hw debug disable API The kgdb_disable_hw_debug() was an architecture specific function for disabling all hardware breakpoints on a per cpu basis when entering the debug core. This patch will remove the weak function kdbg_disable_hw_debug() and change it into a call back which lives with the rest of hw breakpoint call backs in struct kgdb_arch. Signed-off-by: Dongdong Deng Signed-off-by: Jason Wessel --- arch/blackfin/kernel/kgdb.c | 3 ++- arch/x86/kernel/kgdb.c | 3 ++- include/linux/kgdb.h | 13 +++---------- kernel/debug/debug_core.c | 16 +++------------- 4 files changed, 10 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c index 08bc44ea688..edae461b1c5 100644 --- a/arch/blackfin/kernel/kgdb.c +++ b/arch/blackfin/kernel/kgdb.c @@ -320,7 +320,7 @@ static void bfin_correct_hw_break(void) } } -void kgdb_disable_hw_debug(struct pt_regs *regs) +static void bfin_disable_hw_debug(struct pt_regs *regs) { /* Disable hardware debugging while we are in kgdb */ bfin_write_WPIACTL(0); @@ -406,6 +406,7 @@ struct kgdb_arch arch_kgdb_ops = { #endif .set_hw_breakpoint = bfin_set_hw_break, .remove_hw_breakpoint = bfin_remove_hw_break, + .disable_hw_break = bfin_disable_hw_debug, .remove_all_hw_break = bfin_remove_all_hw_break, .correct_hw_break = bfin_correct_hw_break, }; diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index d81cfebb848..ec592caac4b 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -387,7 +387,7 @@ kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) * disable hardware debugging while it is processing gdb packets or * handling exception. */ -void kgdb_disable_hw_debug(struct pt_regs *regs) +static void kgdb_disable_hw_debug(struct pt_regs *regs) { int i; int cpu = raw_smp_processor_id(); @@ -724,6 +724,7 @@ struct kgdb_arch arch_kgdb_ops = { .flags = KGDB_HW_BREAKPOINT, .set_hw_breakpoint = kgdb_set_hw_break, .remove_hw_breakpoint = kgdb_remove_hw_break, + .disable_hw_break = kgdb_disable_hw_debug, .remove_all_hw_break = kgdb_remove_all_hw_break, .correct_hw_break = kgdb_correct_hw_break, }; diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index cc96f0f23e0..092e4250a45 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -35,16 +35,6 @@ struct pt_regs; */ extern int kgdb_skipexception(int exception, struct pt_regs *regs); -/** - * kgdb_disable_hw_debug - (optional) Disable hardware debugging hook - * @regs: Current &struct pt_regs. - * - * This function will be called if the particular architecture must - * disable hardware debugging while it is processing gdb packets or - * handling exception. - */ -extern void kgdb_disable_hw_debug(struct pt_regs *regs); - struct tasklet_struct; struct task_struct; struct uart_port; @@ -243,6 +233,8 @@ extern void kgdb_arch_late(void); * breakpoint. * @remove_hw_breakpoint: Allow an architecture to specify how to remove a * hardware breakpoint. + * @disable_hw_break: Allow an architecture to specify how to disable + * hardware breakpoints for a single cpu. * @remove_all_hw_break: Allow an architecture to specify how to remove all * hardware breakpoints. * @correct_hw_break: Allow an architecture to specify how to correct the @@ -256,6 +248,7 @@ struct kgdb_arch { int (*remove_breakpoint)(unsigned long, char *); int (*set_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); + void (*disable_hw_break)(struct pt_regs *regs); void (*remove_all_hw_break)(void); void (*correct_hw_break)(void); }; diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index fec596da9bd..cefd4a11f6d 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -209,18 +209,6 @@ int __weak kgdb_skipexception(int exception, struct pt_regs *regs) return 0; } -/** - * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. - * @regs: Current &struct pt_regs. - * - * This function will be called if the particular architecture must - * disable hardware debugging while it is processing gdb packets or - * handling exception. - */ -void __weak kgdb_disable_hw_debug(struct pt_regs *regs) -{ -} - /* * Some architectures need cache flushes when we set/clear a * breakpoint: @@ -484,7 +472,9 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, atomic_inc(&masters_in_kgdb); else atomic_inc(&slaves_in_kgdb); - kgdb_disable_hw_debug(ks->linux_regs); + + if (arch_kgdb_ops.disable_hw_break) + arch_kgdb_ops.disable_hw_break(regs); acquirelock: /* -- cgit v1.2.3-70-g09d2 From d29be158a68254f58cf1fbf60ce1e89557a321aa Mon Sep 17 00:00:00 2001 From: Miloslav Trmac Date: Thu, 16 Sep 2010 18:14:11 -0400 Subject: Audit: add support to match lsm labels on user audit messages Add support for matching by security label (e.g. SELinux context) of the sender of an user-space audit record. The audit filter code already allows user space to configure such filters, but they were ignored during evaluation. This patch implements evaluation of these filters. For example, after application of this patch, PAM authentication logs caused by cron can be disabled using auditctl -a user,never -F subj_type=crond_t Signed-off-by: Miloslav Trmac Acked-by: Eric Paris Signed-off-by: Al Viro --- kernel/auditfilter.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'kernel') diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index eb7675499fb..add2819af71 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -1252,6 +1252,18 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb, case AUDIT_LOGINUID: result = audit_comparator(cb->loginuid, f->op, f->val); break; + case AUDIT_SUBJ_USER: + case AUDIT_SUBJ_ROLE: + case AUDIT_SUBJ_TYPE: + case AUDIT_SUBJ_SEN: + case AUDIT_SUBJ_CLR: + if (f->lsm_rule) + result = security_audit_rule_match(cb->sid, + f->type, + f->op, + f->lsm_rule, + NULL); + break; } if (!result) -- cgit v1.2.3-70-g09d2 From b8800aa5d9c7e4e2869321c77b80f322a0d9663a Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 20 Oct 2010 17:23:50 -0700 Subject: audit: make functions static I was doing some namespace checks and found some simple stuff in audit that could be cleaned up. Make some functions static, and put const on make_reply payload arg. Signed-off-by: Stephen Hemminger Signed-off-by: Al Viro --- kernel/audit.c | 6 +++--- kernel/audit.h | 5 +---- kernel/audit_watch.c | 4 ++-- 3 files changed, 6 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index d96045789b5..a300931fc45 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -506,7 +506,7 @@ int audit_send_list(void *_dest) } struct sk_buff *audit_make_reply(int pid, int seq, int type, int done, - int multi, void *payload, int size) + int multi, const void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; @@ -555,8 +555,8 @@ static int audit_send_reply_thread(void *arg) * Allocates an skb, builds the netlink message, and sends it to the pid. * No failure notifications. */ -void audit_send_reply(int pid, int seq, int type, int done, int multi, - void *payload, int size) +static void audit_send_reply(int pid, int seq, int type, int done, int multi, + const void *payload, int size) { struct sk_buff *skb; struct task_struct *tsk; diff --git a/kernel/audit.h b/kernel/audit.h index f7206db4e13..91e7071c4d2 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -84,10 +84,7 @@ extern int audit_compare_dname_path(const char *dname, const char *path, int *dirlen); extern struct sk_buff * audit_make_reply(int pid, int seq, int type, int done, int multi, - void *payload, int size); -extern void audit_send_reply(int pid, int seq, int type, - int done, int multi, - void *payload, int size); + const void *payload, int size); extern void audit_panic(const char *message); struct audit_netlink_list { diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index f0c9b2e7542..d2e3c786646 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -60,7 +60,7 @@ struct audit_parent { }; /* fsnotify handle. */ -struct fsnotify_group *audit_watch_group; +static struct fsnotify_group *audit_watch_group; /* fsnotify events we care about. */ #define AUDIT_FS_WATCH (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\ @@ -123,7 +123,7 @@ void audit_put_watch(struct audit_watch *watch) } } -void audit_remove_watch(struct audit_watch *watch) +static void audit_remove_watch(struct audit_watch *watch) { list_del(&watch->wlist); audit_put_parent(watch->parent); -- cgit v1.2.3-70-g09d2 From f7a998a9491f2da1d3e44d150aa611d10093da4f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 30 Oct 2010 02:18:32 -0400 Subject: in untag_chunk() we need to do alloc_chunk() a bit earlier ... while we are not holding spinlocks. Signed-off-by: Al Viro --- kernel/audit_tree.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 7f18d3a4527..37b2bea170c 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -223,7 +223,7 @@ static void untag_chunk(struct node *p) { struct audit_chunk *chunk = find_chunk(p); struct fsnotify_mark *entry = &chunk->mark; - struct audit_chunk *new; + struct audit_chunk *new = NULL; struct audit_tree *owner; int size = chunk->count - 1; int i, j; @@ -232,9 +232,14 @@ static void untag_chunk(struct node *p) spin_unlock(&hash_lock); + if (size) + new = alloc_chunk(size); + spin_lock(&entry->lock); if (chunk->dead || !entry->i.inode) { spin_unlock(&entry->lock); + if (new) + free_chunk(new); goto out; } @@ -255,9 +260,9 @@ static void untag_chunk(struct node *p) goto out; } - new = alloc_chunk(size); if (!new) goto Fallback; + fsnotify_duplicate_mark(&new->mark, entry); if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { free_chunk(new); -- cgit v1.2.3-70-g09d2 From 3c80fe4ac9cfb13b1bfa4edf1544e8b656716694 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Dec 2009 14:19:31 +0000 Subject: audit: Call tty_audit_push_task() outside preempt disabled While auditing all tasklist_lock read_lock sites I stumbled over the following call chain: audit_prepare_user_tty() read_lock(&tasklist_lock); tty_audit_push_task(); mutex_lock(&buf->mutex); --> buf->mutex is locked with preemption disabled. Solve this by acquiring a reference to the task struct under rcu_read_lock and call tty_audit_push_task outside of the preempt disabled region. Move all code which needs to be protected by sighand lock into tty_audit_push_task() and use lock/unlock_sighand as we do not hold tasklist_lock. Signed-off-by: Thomas Gleixner Cc: Al Viro Cc: Eric Paris Cc: Oleg Nesterov Signed-off-by: Al Viro --- drivers/char/tty_audit.c | 38 ++++++++++++++++++++++++++++---------- include/linux/tty.h | 9 +++++---- kernel/audit.c | 25 +++++++++---------------- 3 files changed, 42 insertions(+), 30 deletions(-) (limited to 'kernel') diff --git a/drivers/char/tty_audit.c b/drivers/char/tty_audit.c index 1b8ee590b4c..f64582b0f62 100644 --- a/drivers/char/tty_audit.c +++ b/drivers/char/tty_audit.c @@ -188,25 +188,43 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch) } /** - * tty_audit_push_task - Flush task's pending audit data + * tty_audit_push_task - Flush task's pending audit data + * @tsk: task pointer + * @loginuid: sender login uid + * @sessionid: sender session id + * + * Called with a ref on @tsk held. Try to lock sighand and get a + * reference to the tty audit buffer if available. + * Flush the buffer or return an appropriate error code. */ -void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid) +int tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid) { - struct tty_audit_buf *buf; + struct tty_audit_buf *buf = ERR_PTR(-EPERM); + unsigned long flags; - spin_lock_irq(&tsk->sighand->siglock); - buf = tsk->signal->tty_audit_buf; - if (buf) - atomic_inc(&buf->count); - spin_unlock_irq(&tsk->sighand->siglock); - if (!buf) - return; + if (!lock_task_sighand(tsk, &flags)) + return -ESRCH; + + if (tsk->signal->audit_tty) { + buf = tsk->signal->tty_audit_buf; + if (buf) + atomic_inc(&buf->count); + } + unlock_task_sighand(tsk, &flags); + + /* + * Return 0 when signal->audit_tty set + * but tsk->signal->tty_audit_buf == NULL. + */ + if (!buf || IS_ERR(buf)) + return PTR_ERR(buf); mutex_lock(&buf->mutex); tty_audit_buf_push(tsk, loginuid, sessionid, buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); + return 0; } /** diff --git a/include/linux/tty.h b/include/linux/tty.h index e500171c745..2a754748dd5 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -541,8 +541,8 @@ extern void tty_audit_exit(void); extern void tty_audit_fork(struct signal_struct *sig); extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); extern void tty_audit_push(struct tty_struct *tty); -extern void tty_audit_push_task(struct task_struct *tsk, - uid_t loginuid, u32 sessionid); +extern int tty_audit_push_task(struct task_struct *tsk, + uid_t loginuid, u32 sessionid); #else static inline void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, size_t size) @@ -560,9 +560,10 @@ static inline void tty_audit_fork(struct signal_struct *sig) static inline void tty_audit_push(struct tty_struct *tty) { } -static inline void tty_audit_push_task(struct task_struct *tsk, - uid_t loginuid, u32 sessionid) +static inline int tty_audit_push_task(struct task_struct *tsk, + uid_t loginuid, u32 sessionid) { + return 0; } #endif diff --git a/kernel/audit.c b/kernel/audit.c index a300931fc45..8429afea37b 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -467,23 +467,16 @@ static int audit_prepare_user_tty(pid_t pid, uid_t loginuid, u32 sessionid) struct task_struct *tsk; int err; - read_lock(&tasklist_lock); + rcu_read_lock(); tsk = find_task_by_vpid(pid); - err = -ESRCH; - if (!tsk) - goto out; - err = 0; - - spin_lock_irq(&tsk->sighand->siglock); - if (!tsk->signal->audit_tty) - err = -EPERM; - spin_unlock_irq(&tsk->sighand->siglock); - if (err) - goto out; - - tty_audit_push_task(tsk, loginuid, sessionid); -out: - read_unlock(&tasklist_lock); + if (!tsk) { + rcu_read_unlock(); + return -ESRCH; + } + get_task_struct(tsk); + rcu_read_unlock(); + err = tty_audit_push_task(tsk, loginuid, sessionid); + put_task_struct(tsk); return err; } -- cgit v1.2.3-70-g09d2 From 207032051a5ed38df332729ba42e98e9a1e60434 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Dec 2009 14:19:35 +0000 Subject: audit: Do not send uninitialized data for AUDIT_TTY_GET audit_receive_msg() sends uninitialized data for AUDIT_TTY_GET when the task was not found. Send reply only when task was found. Signed-off-by: Thomas Gleixner Cc: Al Viro Cc: Eric Paris Signed-off-by: Al Viro --- kernel/audit.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index 8429afea37b..57f4038694d 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -884,8 +884,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) spin_unlock_irq(&tsk->sighand->siglock); } read_unlock(&tasklist_lock); - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0, - &s, sizeof(s)); + + if (!err) + audit_send_reply(NETLINK_CB(skb).pid, seq, + AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); break; } case AUDIT_TTY_SET: { -- cgit v1.2.3-70-g09d2 From ab263f47c9781a644de8b28013434b645082922e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Dec 2009 14:19:41 +0000 Subject: audit: Use rcu for task lookup protection Protect the task lookups in audit_receive_msg() with rcu_read_lock() instead of tasklist_lock and use lock/unlock_sighand to protect against the exit race. Signed-off-by: Thomas Gleixner Cc: Al Viro Cc: Eric Paris Cc: Oleg Nesterov Signed-off-by: Al Viro --- kernel/audit.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index 57f4038694d..77770a034d5 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -873,17 +873,16 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) case AUDIT_TTY_GET: { struct audit_tty_status s; struct task_struct *tsk; + unsigned long flags; - read_lock(&tasklist_lock); + rcu_read_lock(); tsk = find_task_by_vpid(pid); - if (!tsk) - err = -ESRCH; - else { - spin_lock_irq(&tsk->sighand->siglock); + if (tsk && lock_task_sighand(tsk, &flags)) { s.enabled = tsk->signal->audit_tty != 0; - spin_unlock_irq(&tsk->sighand->siglock); - } - read_unlock(&tasklist_lock); + unlock_task_sighand(tsk, &flags); + } else + err = -ESRCH; + rcu_read_unlock(); if (!err) audit_send_reply(NETLINK_CB(skb).pid, seq, @@ -893,22 +892,21 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) case AUDIT_TTY_SET: { struct audit_tty_status *s; struct task_struct *tsk; + unsigned long flags; if (nlh->nlmsg_len < sizeof(struct audit_tty_status)) return -EINVAL; s = data; if (s->enabled != 0 && s->enabled != 1) return -EINVAL; - read_lock(&tasklist_lock); + rcu_read_lock(); tsk = find_task_by_vpid(pid); - if (!tsk) - err = -ESRCH; - else { - spin_lock_irq(&tsk->sighand->siglock); + if (tsk && lock_task_sighand(tsk, &flags)) { tsk->signal->audit_tty = s->enabled != 0; - spin_unlock_irq(&tsk->sighand->siglock); - } - read_unlock(&tasklist_lock); + unlock_task_sighand(tsk, &flags); + } else + err = -ESRCH; + rcu_read_unlock(); break; } default: -- cgit v1.2.3-70-g09d2 From 120a795da07c9a02221ca23464c28a7c6ad7de1d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 30 Oct 2010 02:54:44 -0400 Subject: audit mmap Normal syscall audit doesn't catch 5th argument of syscall. It also doesn't catch the contents of userland structures pointed to be syscall argument, so for both old and new mmap(2) ABI it doesn't record the descriptor we are mapping. For old one it also misses flags. Signed-off-by: Al Viro --- include/linux/audit.h | 9 +++++++++ kernel/auditsc.c | 16 ++++++++++++++++ mm/mmap.c | 2 ++ mm/nommu.c | 2 ++ 4 files changed, 29 insertions(+) (limited to 'kernel') diff --git a/include/linux/audit.h b/include/linux/audit.h index e24afabc548..8b5c0620abf 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -102,6 +102,7 @@ #define AUDIT_EOE 1320 /* End of multi-record event */ #define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */ #define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */ +#define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */ #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ @@ -478,6 +479,7 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old); extern void __audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old); +extern void __audit_mmap_fd(int fd, int flags); static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { @@ -531,6 +533,12 @@ static inline void audit_log_capset(pid_t pid, const struct cred *new, __audit_log_capset(pid, new, old); } +static inline void audit_mmap_fd(int fd, int flags) +{ + if (unlikely(!audit_dummy_context())) + __audit_mmap_fd(fd, flags); +} + extern int audit_n_rules; extern int audit_signals; #else @@ -564,6 +572,7 @@ extern int audit_signals; #define audit_mq_getsetattr(d,s) ((void)0) #define audit_log_bprm_fcaps(b, ncr, ocr) ({ 0; }) #define audit_log_capset(pid, ncr, ocr) ((void)0) +#define audit_mmap_fd(fd, flags) ((void)0) #define audit_ptrace(t) ((void)0) #define audit_n_rules 0 #define audit_signals 0 diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 1b31c130d03..f49a0318c2e 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -241,6 +241,10 @@ struct audit_context { pid_t pid; struct audit_cap_data cap; } capset; + struct { + int fd; + int flags; + } mmap; }; int fds[2]; @@ -1305,6 +1309,10 @@ static void show_special(struct audit_context *context, int *call_panic) audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted); audit_log_cap(ab, "cap_pe", &context->capset.cap.effective); break; } + case AUDIT_MMAP: { + audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd, + context->mmap.flags); + break; } } audit_log_end(ab); } @@ -2476,6 +2484,14 @@ void __audit_log_capset(pid_t pid, context->type = AUDIT_CAPSET; } +void __audit_mmap_fd(int fd, int flags) +{ + struct audit_context *context = current->audit_context; + context->mmap.fd = fd; + context->mmap.flags = flags; + context->type = AUDIT_MMAP; +} + /** * audit_core_dumps - record information about processes that end abnormally * @signr: signal value diff --git a/mm/mmap.c b/mm/mmap.c index 00161a48a45..b179abb1474 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -1108,6 +1109,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long retval = -EBADF; if (!(flags & MAP_ANONYMOUS)) { + audit_mmap_fd(fd, flags); if (unlikely(flags & MAP_HUGETLB)) return -EINVAL; file = fget(fd); diff --git a/mm/nommu.c b/mm/nommu.c index 30b5c20eec1..3613517c759 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -1458,6 +1459,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, struct file *file = NULL; unsigned long retval = -EBADF; + audit_mmap_fd(fd, flags); if (!(flags & MAP_ANONYMOUS)) { file = fget(fd); if (!file) -- cgit v1.2.3-70-g09d2 From 408af87a397a8ddef56ad39a79481f592aa1ac1a Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Thu, 4 Nov 2010 21:44:41 +0100 Subject: Clean up relay_alloc_page_array() slightly by using vzalloc rather than vmalloc and memset We can optimize kernel/relay.c::relay_alloc_page_array() slightly by using vzalloc. The patch makes these changes: - use vzalloc instead of vmalloc+memset. - remove redundant local variable 'array'. - declare local 'pa_size' as const. Cuts down nicely on both source and object-code size. Signed-off-by: Jesper Juhl Acked-by: Pekka Enberg Acked-by: Mathieu Desnoyers Signed-off-by: Linus Torvalds --- kernel/relay.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/relay.c b/kernel/relay.c index c7cf397fb92..859ea5a9605 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -70,17 +70,10 @@ static const struct vm_operations_struct relay_file_mmap_ops = { */ static struct page **relay_alloc_page_array(unsigned int n_pages) { - struct page **array; - size_t pa_size = n_pages * sizeof(struct page *); - - if (pa_size > PAGE_SIZE) { - array = vmalloc(pa_size); - if (array) - memset(array, 0, pa_size); - } else { - array = kzalloc(pa_size, GFP_KERNEL); - } - return array; + const size_t pa_size = n_pages * sizeof(struct page *); + if (pa_size > PAGE_SIZE) + return vzalloc(pa_size); + return kzalloc(pa_size, GFP_KERNEL); } /* -- cgit v1.2.3-70-g09d2 From e0a70217107e6f9844628120412cb27bb4cea194 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 5 Nov 2010 16:53:42 +0100 Subject: posix-cpu-timers: workaround to suppress the problems with mt exec posix-cpu-timers.c correctly assumes that the dying process does posix_cpu_timers_exit_group() and removes all !CPUCLOCK_PERTHREAD timers from signal->cpu_timers list. But, it also assumes that timer->it.cpu.task is always the group leader, and thus the dead ->task means the dead thread group. This is obviously not true after de_thread() changes the leader. After that almost every posix_cpu_timer_ method has problems. It is not simple to fix this bug correctly. First of all, I think that timer->it.cpu should use struct pid instead of task_struct. Also, the locking should be reworked completely. In particular, tasklist_lock should not be used at all. This all needs a lot of nontrivial and hard-to-test changes. Change __exit_signal() to do posix_cpu_timers_exit_group() when the old leader dies during exec. This is not the fix, just the temporary hack to hide the problem for 2.6.37 and stable. IOW, this is obviously wrong but this is what we currently have anyway: cpu timers do not work after mt exec. In theory this change adds another race. The exiting leader can detach the timers which were attached to the new leader. However, the window between de_thread() and release_task() is small, we can pretend that sys_timer_create() was called before de_thread(). Signed-off-by: Oleg Nesterov Signed-off-by: Linus Torvalds --- kernel/exit.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index b194febf579..21aa7b3001f 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -95,6 +95,14 @@ static void __exit_signal(struct task_struct *tsk) tty = sig->tty; sig->tty = NULL; } else { + /* + * This can only happen if the caller is de_thread(). + * FIXME: this is the temporary hack, we should teach + * posix-cpu-timers to handle this case correctly. + */ + if (unlikely(has_group_leader_pid(tsk))) + posix_cpu_timers_exit_group(tsk); + /* * If there is any task waiting for the group exit * then notify it: -- cgit v1.2.3-70-g09d2 From 433039e97f672b81e6c8f6daef385dcf035c6e29 Mon Sep 17 00:00:00 2001 From: David Daney Date: Fri, 5 Nov 2010 16:17:39 -0700 Subject: watchdog: Fix section mismatch and potential undefined behavior. Commit d9ca07a05ce1 ("watchdog: Avoid kernel crash when disabling watchdog") introduces a section mismatch. Now that we reference no_watchdog from non-__init code it can no longer be __initdata. Signed-off-by: David Daney Cc: Stephane Eranian Cc: Peter Zijlstra Cc: Ingo Molnar Signed-off-by: Linus Torvalds --- kernel/watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index bafba687a6d..6e3c41a4024 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -43,7 +43,7 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); #endif -static int __initdata no_watchdog; +static int no_watchdog; /* boot commands */ -- cgit v1.2.3-70-g09d2 From 02e031cbc843b010e72fcc05c76113c688b2860f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 10 Nov 2010 14:54:09 +0100 Subject: block: remove REQ_HARDBARRIER REQ_HARDBARRIER is dead now, so remove the leftovers. What's left at this point is: - various checks inside the block layer. - sanity checks in bio based drivers. - now unused bio_empty_barrier helper. - Xen blockfront use of BLKIF_OP_WRITE_BARRIER - it's dead for a while, but Xen really needs to sort out it's barrier situaton. - setting of ordered tags in uas - dead code copied from old scsi drivers. - scsi different retry for barriers - it's dead and should have been removed when flushes were converted to FS requests. - blktrace handling of barriers - removed. Someone who knows blktrace better should add support for REQ_FLUSH and REQ_FUA, though. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 7 ------- block/elevator.c | 4 ++-- drivers/block/aoe/aoeblk.c | 3 --- drivers/block/loop.c | 6 ------ drivers/block/xen-blkfront.c | 2 -- drivers/scsi/scsi_error.c | 18 +++++------------- drivers/usb/storage/uas.c | 5 +---- include/linux/bio.h | 4 ---- include/linux/blk_types.h | 6 ++---- include/linux/blkdev.h | 3 +-- kernel/trace/blktrace.c | 4 ---- 11 files changed, 11 insertions(+), 51 deletions(-) (limited to 'kernel') diff --git a/block/blk-core.c b/block/blk-core.c index 17fcb83670c..4ce953f1b39 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio) int where = ELEVATOR_INSERT_SORT; int rw_flags; - /* REQ_HARDBARRIER is no more */ - if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER, - "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) { - bio_endio(bio, -EOPNOTSUPP); - return 0; - } - /* * low level driver can indicate that it wants pages above a * certain limit bounced to low memory (ie for highmem, or even diff --git a/block/elevator.c b/block/elevator.c index 282e8308f7e..2569512830d 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) q->nr_sorted--; boundary = q->end_sector; - stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; + stop_flags = REQ_SOFTBARRIER | REQ_STARTED; list_for_each_prev(entry, &q->queue_head) { struct request *pos = list_entry_rq(entry); @@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) void __elv_add_request(struct request_queue *q, struct request *rq, int where, int plug) { - if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { + if (rq->cmd_flags & REQ_SOFTBARRIER) { /* barriers are scheduling boundary, update end_sector */ if (rq->cmd_type == REQ_TYPE_FS || (rq->cmd_flags & REQ_DISCARD)) { diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 541e1887996..528f6318ded 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) BUG(); bio_endio(bio, -ENXIO); return 0; - } else if (bio->bi_rw & REQ_HARDBARRIER) { - bio_endio(bio, -EOPNOTSUPP); - return 0; } else if (bio->bi_io_vec == NULL) { printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); BUG(); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1e5284ef65f..7ea0bea2f7e 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) if (bio_rw(bio) == WRITE) { struct file *file = lo->lo_backing_file; - /* REQ_HARDBARRIER is deprecated */ - if (bio->bi_rw & REQ_HARDBARRIER) { - ret = -EOPNOTSUPP; - goto out; - } - if (bio->bi_rw & REQ_FLUSH) { ret = vfs_fsync(file, 0); if (unlikely(ret && ret != -EINVAL)) { diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 06e2812ba12..255035cfc88 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -289,8 +289,6 @@ static int blkif_queue_request(struct request *req) ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; - if (req->cmd_flags & REQ_HARDBARRIER) - ring_req->operation = BLKIF_OP_WRITE_BARRIER; ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 1de30eb83bb..f3cf924a2cd 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -320,19 +320,11 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) "changed. The Linux SCSI layer does not " "automatically adjust these parameters.\n"); - if (scmd->request->cmd_flags & REQ_HARDBARRIER) - /* - * barrier requests should always retry on UA - * otherwise block will get a spurious error - */ - return NEEDS_RETRY; - else - /* - * for normal (non barrier) commands, pass the - * UA upwards for a determination in the - * completion functions - */ - return SUCCESS; + /* + * Pass the UA upwards for a determination in the completion + * functions. + */ + return SUCCESS; /* these three are not supported */ case COPY_ABORTED: diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 2054b1e25a6..d1268191acb 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -331,10 +331,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp, iu->iu_id = IU_ID_COMMAND; iu->tag = cpu_to_be16(stream_id); - if (sdev->ordered_tags && (cmnd->request->cmd_flags & REQ_HARDBARRIER)) - iu->prio_attr = UAS_ORDERED_TAG; - else - iu->prio_attr = UAS_SIMPLE_TAG; + iu->prio_attr = UAS_SIMPLE_TAG; iu->len = len; int_to_scsilun(sdev->lun, &iu->lun); memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len); diff --git a/include/linux/bio.h b/include/linux/bio.h index ba679992d39..35dcdb3589b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -66,10 +66,6 @@ #define bio_offset(bio) bio_iovec((bio))->bv_offset #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) #define bio_sectors(bio) ((bio)->bi_size >> 9) -#define bio_empty_barrier(bio) \ - ((bio->bi_rw & REQ_HARDBARRIER) && \ - !bio_has_data(bio) && \ - !(bio->bi_rw & REQ_DISCARD)) static inline unsigned int bio_cur_bytes(struct bio *bio) { diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 0437ab6bb54..46ad5197537 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -122,7 +122,6 @@ enum rq_flag_bits { __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ - __REQ_HARDBARRIER, /* may not be passed by drive either */ __REQ_SYNC, /* request is sync (sync write or read) */ __REQ_META, /* metadata io request */ __REQ_DISCARD, /* request to discard sectors */ @@ -159,7 +158,6 @@ enum rq_flag_bits { #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) -#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) #define REQ_SYNC (1 << __REQ_SYNC) #define REQ_META (1 << __REQ_META) #define REQ_DISCARD (1 << __REQ_DISCARD) @@ -168,8 +166,8 @@ enum rq_flag_bits { #define REQ_FAILFAST_MASK \ (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) #define REQ_COMMON_MASK \ - (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ - REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) + (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \ + REQ_NOIDLE | REQ_FLUSH | REQ_FUA) #define REQ_CLONE_MASK REQ_COMMON_MASK #define REQ_UNPLUG (1 << __REQ_UNPLUG) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5027a599077..aae86fd10c4 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -552,8 +552,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) * it already be started by driver. */ #define RQ_NOMERGE_FLAGS \ - (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \ - REQ_FLUSH | REQ_FUA) + (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) #define rq_mergeable(rq) \ (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ (((rq)->cmd_flags & REQ_DISCARD) || \ diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index bc251ed6672..7b8ec028154 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -168,7 +168,6 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; -#define BLK_TC_HARDBARRIER BLK_TC_BARRIER #define BLK_TC_RAHEAD BLK_TC_AHEAD /* The ilog2() calls fall out because they're constant */ @@ -196,7 +195,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, return; what |= ddir_act[rw & WRITE]; - what |= MASK_TC_BIT(rw, HARDBARRIER); what |= MASK_TC_BIT(rw, SYNC); what |= MASK_TC_BIT(rw, RAHEAD); what |= MASK_TC_BIT(rw, META); @@ -1807,8 +1805,6 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) if (rw & REQ_RAHEAD) rwbs[i++] = 'A'; - if (rw & REQ_HARDBARRIER) - rwbs[i++] = 'B'; if (rw & REQ_SYNC) rwbs[i++] = 'S'; if (rw & REQ_META) -- cgit v1.2.3-70-g09d2 From eed01528a45dc4138e9a08064b4b6cc1a9426899 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 26 Oct 2010 16:08:01 +0200 Subject: perf_events: Fix time tracking in samples This patch corrects time tracking in samples. Without this patch both time_enabled and time_running are bogus when user asks for PERF_SAMPLE_READ. One uses PERF_SAMPLE_READ to sample the values of other counters in each sample. Because of multiplexing, it is necessary to know both time_enabled, time_running to be able to scale counts correctly. In this second version of the patch, we maintain a shadow copy of ctx->time which allows us to compute ctx->time without calling update_context_time() from NMI context. We avoid the issue that update_context_time() must always be called with ctx->lock held. We do not keep shadow copies of the other event timings because if the lead event is overflowing then it is active and thus it's been scheduled in via event_sched_in() in which case neither tstamp_stopped, tstamp_running can be modified. This timing logic only applies to samples when PERF_SAMPLE_READ is used. Note that this patch does not address timing issues related to sampling inheritance between tasks. This will be addressed in a future patch. With this patch, the libpfm4 example task_smpl now reports correct counts (shown on 2.4GHz Core 2): $ task_smpl -p 2400000000 -e unhalted_core_cycles:u,instructions_retired:u,baclears noploop 5 noploop for 5 seconds IIP:0x000000004006d6 PID:5596 TID:5596 TIME:466,210,211,430 STREAM_ID:33 PERIOD:2,400,000,000 ENA=1,010,157,814 RUN=1,010,157,814 NR=3 2,400,000,254 unhalted_core_cycles:u (33) 2,399,273,744 instructions_retired:u (34) 53,340 baclears (35) Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <4cc6e14b.1e07e30a.256e.5190@mx.google.com> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 10 ++++++++++ kernel/perf_event.c | 42 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 44 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 057bf22a832..40150f34598 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -747,6 +747,16 @@ struct perf_event { u64 tstamp_running; u64 tstamp_stopped; + /* + * timestamp shadows the actual context timing but it can + * be safely used in NMI interrupt context. It reflects the + * context time as it was when the event was last scheduled in. + * + * ctx_time already accounts for ctx->timestamp. Therefore to + * compute ctx_time for a sample, simply add perf_clock(). + */ + u64 shadow_ctx_time; + struct perf_event_attr attr; struct hw_perf_event hw; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 517d827f498..cb6c0d2af68 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -674,6 +674,8 @@ event_sched_in(struct perf_event *event, event->tstamp_running += ctx->time - event->tstamp_stopped; + event->shadow_ctx_time = ctx->time - ctx->timestamp; + if (!is_software_event(event)) cpuctx->active_oncpu++; ctx->nr_active++; @@ -3396,7 +3398,8 @@ static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) } static void perf_output_read_one(struct perf_output_handle *handle, - struct perf_event *event) + struct perf_event *event, + u64 enabled, u64 running) { u64 read_format = event->attr.read_format; u64 values[4]; @@ -3404,11 +3407,11 @@ static void perf_output_read_one(struct perf_output_handle *handle, values[n++] = perf_event_count(event); if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { - values[n++] = event->total_time_enabled + + values[n++] = enabled + atomic64_read(&event->child_total_time_enabled); } if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { - values[n++] = event->total_time_running + + values[n++] = running + atomic64_read(&event->child_total_time_running); } if (read_format & PERF_FORMAT_ID) @@ -3421,7 +3424,8 @@ static void perf_output_read_one(struct perf_output_handle *handle, * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. */ static void perf_output_read_group(struct perf_output_handle *handle, - struct perf_event *event) + struct perf_event *event, + u64 enabled, u64 running) { struct perf_event *leader = event->group_leader, *sub; u64 read_format = event->attr.read_format; @@ -3431,10 +3435,10 @@ static void perf_output_read_group(struct perf_output_handle *handle, values[n++] = 1 + leader->nr_siblings; if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) - values[n++] = leader->total_time_enabled; + values[n++] = enabled; if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) - values[n++] = leader->total_time_running; + values[n++] = running; if (leader != event) leader->pmu->read(leader); @@ -3459,13 +3463,35 @@ static void perf_output_read_group(struct perf_output_handle *handle, } } +#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ + PERF_FORMAT_TOTAL_TIME_RUNNING) + static void perf_output_read(struct perf_output_handle *handle, struct perf_event *event) { + u64 enabled = 0, running = 0, now, ctx_time; + u64 read_format = event->attr.read_format; + + /* + * compute total_time_enabled, total_time_running + * based on snapshot values taken when the event + * was last scheduled in. + * + * we cannot simply called update_context_time() + * because of locking issue as we are called in + * NMI context + */ + if (read_format & PERF_FORMAT_TOTAL_TIMES) { + now = perf_clock(); + ctx_time = event->shadow_ctx_time + now; + enabled = ctx_time - event->tstamp_enabled; + running = ctx_time - event->tstamp_running; + } + if (event->attr.read_format & PERF_FORMAT_GROUP) - perf_output_read_group(handle, event); + perf_output_read_group(handle, event, enabled, running); else - perf_output_read_one(handle, event); + perf_output_read_one(handle, event, enabled, running); } void perf_output_sample(struct perf_output_handle *handle, -- cgit v1.2.3-70-g09d2 From 834b40380e93e36f1c9b48ec1d280cebe3d7bd8c Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Thu, 11 Nov 2010 14:05:14 -0800 Subject: kernel/range.c: fix clean_sort_range() for the case of full array clean_sort_range() should return a number of nonempty elements of range array, but if the array is full clean_sort_range() returns 0. The problem is that the number of nonempty elements is evaluated by finding the first empty element of the array. If there is no such element it returns an initial value of local variable nr_range that is zero. The fix is trivial: it changes initial value of nr_range to size of the array. The bug can lead to loss of information regarding all ranges, since typically returned value of clean_sort_range() is considered as an actual number of ranges in the array after a series of add/subtract operations. Found by Analytical Verification project of Linux Verification Center (linuxtesting.org), thanks to Alexander Kolosov. Signed-off-by: Alexey Khoroshilov Cc: Yinghai Lu Cc: "H. Peter Anvin" Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/range.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/range.c b/kernel/range.c index 471b66acabb..37fa9b99ad5 100644 --- a/kernel/range.c +++ b/kernel/range.c @@ -119,7 +119,7 @@ static int cmp_range(const void *x1, const void *x2) int clean_sort_range(struct range *range, int az) { - int i, j, k = az - 1, nr_range = 0; + int i, j, k = az - 1, nr_range = az; for (i = 0; i < k; i++) { if (range[i].end) -- cgit v1.2.3-70-g09d2 From 38715258aa2e8cd94bd4aafadc544e5104efd551 Mon Sep 17 00:00:00 2001 From: Ken Chen Date: Thu, 11 Nov 2010 14:05:16 -0800 Subject: latencytop: fix per task accumulator Per task latencytop accumulator prematurely terminates due to erroneous placement of latency_record_count. It should be incremented whenever a new record is allocated instead of increment on every latencytop event. Also fix search iterator to only search known record events instead of blindly searching all pre-allocated space. Signed-off-by: Ken Chen Reviewed-by: Arjan van de Ven Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/latencytop.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 877fb306d41..17110a4a4fc 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c @@ -194,14 +194,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) account_global_scheduler_latency(tsk, &lat); - /* - * short term hack; if we're > 32 we stop; future we recycle: - */ - tsk->latency_record_count++; - if (tsk->latency_record_count >= LT_SAVECOUNT) - goto out_unlock; - - for (i = 0; i < LT_SAVECOUNT; i++) { + for (i = 0; i < tsk->latency_record_count; i++) { struct latency_record *mylat; int same = 1; @@ -227,8 +220,14 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) } } + /* + * short term hack; if we're > 32 we stop; future we recycle: + */ + if (tsk->latency_record_count >= LT_SAVECOUNT) + goto out_unlock; + /* Allocated a new one: */ - i = tsk->latency_record_count; + i = tsk->latency_record_count++; memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); out_unlock: -- cgit v1.2.3-70-g09d2 From eaf06b241b091357e72b76863ba16e89610d31bd Mon Sep 17 00:00:00 2001 From: Dan Rosenberg Date: Thu, 11 Nov 2010 14:05:18 -0800 Subject: Restrict unprivileged access to kernel syslog The kernel syslog contains debugging information that is often useful during exploitation of other vulnerabilities, such as kernel heap addresses. Rather than futilely attempt to sanitize hundreds (or thousands) of printk statements and simultaneously cripple useful debugging functionality, it is far simpler to create an option that prevents unprivileged users from reading the syslog. This patch, loosely based on grsecurity's GRKERNSEC_DMESG, creates the dmesg_restrict sysctl. When set to "0", the default, no restrictions are enforced. When set to "1", only users with CAP_SYS_ADMIN can read the kernel syslog via dmesg(8) or other mechanisms. [akpm@linux-foundation.org: explain the config option in kernel.txt] Signed-off-by: Dan Rosenberg Acked-by: Ingo Molnar Acked-by: Eugene Teo Acked-by: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/sysctl/kernel.txt | 14 ++++++++++++++ include/linux/kernel.h | 1 + kernel/printk.c | 6 ++++++ kernel/sysctl.c | 9 +++++++++ security/Kconfig | 12 ++++++++++++ security/commoncap.c | 2 ++ 6 files changed, 44 insertions(+) (limited to 'kernel') diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 3894eaa2348..209e1584c3d 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -28,6 +28,7 @@ show up in /proc/sys/kernel: - core_uses_pid - ctrl-alt-del - dentry-state +- dmesg_restrict - domainname - hostname - hotplug @@ -213,6 +214,19 @@ to decide what to do with it. ============================================================== +dmesg_restrict: + +This toggle indicates whether unprivileged users are prevented from using +dmesg(8) to view messages from the kernel's log buffer. When +dmesg_restrict is set to (0) there are no restrictions. When +dmesg_restrict is set set to (1), users must have CAP_SYS_ADMIN to use +dmesg(8). + +The kernel config option CONFIG_SECURITY_DMESG_RESTRICT sets the default +value of dmesg_restrict. + +============================================================== + domainname & hostname: These files can be used to set the NIS/YP domainname and the diff --git a/include/linux/kernel.h b/include/linux/kernel.h index b526947bdf4..fc3da9e4da1 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -293,6 +293,7 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); extern int printk_delay_msec; +extern int dmesg_restrict; /* * Print a one-time message (analogous to WARN_ONCE() et al): diff --git a/kernel/printk.c b/kernel/printk.c index b2ebaee8c37..38e7d5868d6 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -261,6 +261,12 @@ static inline void boot_delay_msec(void) } #endif +#ifdef CONFIG_SECURITY_DMESG_RESTRICT +int dmesg_restrict = 1; +#else +int dmesg_restrict; +#endif + int do_syslog(int type, char __user *buf, int len, bool from_file) { unsigned i, j, limit, count; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c33a1edb799..b65bf634035 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -703,6 +703,15 @@ static struct ctl_table kern_table[] = { .extra2 = &ten_thousand, }, #endif + { + .procname = "dmesg_restrict", + .data = &dmesg_restrict, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, { .procname = "ngroups_max", .data = &ngroups_max, diff --git a/security/Kconfig b/security/Kconfig index bd72ae62349..e80da955e68 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -39,6 +39,18 @@ config KEYS_DEBUG_PROC_KEYS If you are unsure as to whether this is required, answer N. +config SECURITY_DMESG_RESTRICT + bool "Restrict unprivileged access to the kernel syslog" + default n + help + This enforces restrictions on unprivileged users reading the kernel + syslog via dmesg(8). + + If this option is not selected, no restrictions will be enforced + unless the dmesg_restrict sysctl is explicitly set to (1). + + If you are unsure how to answer this question, answer N. + config SECURITY bool "Enable different security models" depends on SYSFS diff --git a/security/commoncap.c b/security/commoncap.c index 5e632b4857e..04b80f9912b 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -895,6 +895,8 @@ int cap_syslog(int type, bool from_file) { if (type != SYSLOG_ACTION_OPEN && from_file) return 0; + if (dmesg_restrict && !capable(CAP_SYS_ADMIN)) + return -EPERM; if ((type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN)) return -EPERM; -- cgit v1.2.3-70-g09d2 From 12b3052c3ee8f508b2c7ee4ddd63ed03423409d8 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Mon, 15 Nov 2010 18:36:29 -0500 Subject: capabilities/syslog: open code cap_syslog logic to fix build failure The addition of CONFIG_SECURITY_DMESG_RESTRICT resulted in a build failure when CONFIG_PRINTK=n. This is because the capabilities code which used the new option was built even though the variable in question didn't exist. The patch here fixes this by moving the capabilities checks out of the LSM and into the caller. All (known) LSMs should have been calling the capabilities hook already so it actually makes the code organization better to eliminate the hook altogether. Signed-off-by: Eric Paris Acked-by: James Morris Signed-off-by: Linus Torvalds --- include/linux/security.h | 9 ++++----- kernel/printk.c | 15 ++++++++++++++- security/capability.c | 5 +++++ security/commoncap.c | 21 --------------------- security/security.c | 4 ++-- security/selinux/hooks.c | 6 +----- security/smack/smack_lsm.c | 8 ++------ 7 files changed, 28 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/include/linux/security.h b/include/linux/security.h index b8246a8df7d..fd4d55fb884 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -77,7 +77,6 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, extern int cap_task_setscheduler(struct task_struct *p); extern int cap_task_setioprio(struct task_struct *p, int ioprio); extern int cap_task_setnice(struct task_struct *p, int nice); -extern int cap_syslog(int type, bool from_file); extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); struct msghdr; @@ -1388,7 +1387,7 @@ struct security_operations { int (*sysctl) (struct ctl_table *table, int op); int (*quotactl) (int cmds, int type, int id, struct super_block *sb); int (*quota_on) (struct dentry *dentry); - int (*syslog) (int type, bool from_file); + int (*syslog) (int type); int (*settime) (struct timespec *ts, struct timezone *tz); int (*vm_enough_memory) (struct mm_struct *mm, long pages); @@ -1671,7 +1670,7 @@ int security_real_capable_noaudit(struct task_struct *tsk, int cap); int security_sysctl(struct ctl_table *table, int op); int security_quotactl(int cmds, int type, int id, struct super_block *sb); int security_quota_on(struct dentry *dentry); -int security_syslog(int type, bool from_file); +int security_syslog(int type); int security_settime(struct timespec *ts, struct timezone *tz); int security_vm_enough_memory(long pages); int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); @@ -1901,9 +1900,9 @@ static inline int security_quota_on(struct dentry *dentry) return 0; } -static inline int security_syslog(int type, bool from_file) +static inline int security_syslog(int type) { - return cap_syslog(type, from_file); + return 0; } static inline int security_settime(struct timespec *ts, struct timezone *tz) diff --git a/kernel/printk.c b/kernel/printk.c index 38e7d5868d6..9a2264fc42c 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -274,7 +274,20 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) char c; int error = 0; - error = security_syslog(type, from_file); + /* + * If this is from /proc/kmsg we only do the capabilities checks + * at open time. + */ + if (type == SYSLOG_ACTION_OPEN || !from_file) { + if (dmesg_restrict && !capable(CAP_SYS_ADMIN)) + return -EPERM; + if ((type != SYSLOG_ACTION_READ_ALL && + type != SYSLOG_ACTION_SIZE_BUFFER) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + } + + error = security_syslog(type); if (error) return error; diff --git a/security/capability.c b/security/capability.c index 30ae00fbecd..c773635ca3a 100644 --- a/security/capability.c +++ b/security/capability.c @@ -17,6 +17,11 @@ static int cap_sysctl(ctl_table *table, int op) return 0; } +static int cap_syslog(int type) +{ + return 0; +} + static int cap_quotactl(int cmds, int type, int id, struct super_block *sb) { return 0; diff --git a/security/commoncap.c b/security/commoncap.c index 04b80f9912b..64c2ed9c901 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -27,7 +27,6 @@ #include #include #include -#include /* * If a non-root user executes a setuid-root binary in @@ -883,26 +882,6 @@ error: return error; } -/** - * cap_syslog - Determine whether syslog function is permitted - * @type: Function requested - * @from_file: Whether this request came from an open file (i.e. /proc) - * - * Determine whether the current process is permitted to use a particular - * syslog function, returning 0 if permission is granted, -ve if not. - */ -int cap_syslog(int type, bool from_file) -{ - if (type != SYSLOG_ACTION_OPEN && from_file) - return 0; - if (dmesg_restrict && !capable(CAP_SYS_ADMIN)) - return -EPERM; - if ((type != SYSLOG_ACTION_READ_ALL && - type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN)) - return -EPERM; - return 0; -} - /** * cap_vm_enough_memory - Determine whether a new virtual mapping is permitted * @mm: The VM space in which the new mapping is to be made diff --git a/security/security.c b/security/security.c index 3ef5e2a7a74..1b798d3df71 100644 --- a/security/security.c +++ b/security/security.c @@ -197,9 +197,9 @@ int security_quota_on(struct dentry *dentry) return security_ops->quota_on(dentry); } -int security_syslog(int type, bool from_file) +int security_syslog(int type) { - return security_ops->syslog(type, from_file); + return security_ops->syslog(type); } int security_settime(struct timespec *ts, struct timezone *tz) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index d9154cf90ae..65fa8bf596f 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1973,14 +1973,10 @@ static int selinux_quota_on(struct dentry *dentry) return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON); } -static int selinux_syslog(int type, bool from_file) +static int selinux_syslog(int type) { int rc; - rc = cap_syslog(type, from_file); - if (rc) - return rc; - switch (type) { case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */ case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */ diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index bc39f4067af..489a85afa47 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -157,15 +157,11 @@ static int smack_ptrace_traceme(struct task_struct *ptp) * * Returns 0 on success, error code otherwise. */ -static int smack_syslog(int type, bool from_file) +static int smack_syslog(int typefrom_file) { - int rc; + int rc = 0; char *sp = current_security(); - rc = cap_syslog(type, from_file); - if (rc != 0) - return rc; - if (capable(CAP_MAC_OVERRIDE)) return 0; -- cgit v1.2.3-70-g09d2