diff options
Diffstat (limited to 'kernel/profile.c')
| -rw-r--r-- | kernel/profile.c | 240 |
1 files changed, 127 insertions, 113 deletions
diff --git a/kernel/profile.c b/kernel/profile.c index 3b7a1b05512..54bf5ba2642 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -8,12 +8,13 @@ * Scheduler profiling support, Arjan van de Ven and Ingo Molnar, * Red Hat, July 2004 * Consolidation of architecture support code for profiling, - * William Irwin, Oracle, July 2004 + * Nadia Yvette Chambers, Oracle, July 2004 * Amortized hit count accounting via per-cpu open-addressed hashtables - * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004 + * to resolve timer interrupt livelocks, Nadia Yvette Chambers, + * Oracle, 2004 */ -#include <linux/module.h> +#include <linux/export.h> #include <linux/profile.h> #include <linux/bootmem.h> #include <linux/notifier.h> @@ -22,8 +23,9 @@ #include <linux/cpu.h> #include <linux/highmem.h> #include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> #include <asm/sections.h> -#include <asm/semaphore.h> #include <asm/irq_regs.h> #include <asm/ptrace.h> @@ -35,27 +37,24 @@ struct profile_hit { #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) -/* Oprofile timer tick hook */ -static int (*timer_hook)(struct pt_regs *) __read_mostly; - static atomic_t *prof_buffer; static unsigned long prof_len, prof_shift; int prof_on __read_mostly; EXPORT_SYMBOL_GPL(prof_on); -static cpumask_t prof_cpu_mask = CPU_MASK_ALL; +static cpumask_var_t prof_cpu_mask; #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); static DEFINE_PER_CPU(int, cpu_profile_flip); static DEFINE_MUTEX(profile_flip_mutex); #endif /* CONFIG_SMP */ -static int __init profile_setup(char *str) +int profile_setup(char *str) { - static char __initdata schedstr[] = "schedule"; - static char __initdata sleepstr[] = "sleep"; - static char __initdata kvmstr[] = "kvm"; + static const char schedstr[] = "schedule"; + static const char sleepstr[] = "sleep"; + static const char kvmstr[] = "kvm"; int par; if (!strncmp(str, sleepstr, strlen(sleepstr))) { @@ -65,12 +64,10 @@ static int __init profile_setup(char *str) str += strlen(sleepstr) + 1; if (get_option(&str, &par)) prof_shift = par; - printk(KERN_INFO - "kernel sleep profiling enabled (shift: %ld)\n", + pr_info("kernel sleep profiling enabled (shift: %ld)\n", prof_shift); #else - printk(KERN_WARNING - "kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); + pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); #endif /* CONFIG_SCHEDSTATS */ } else if (!strncmp(str, schedstr, strlen(schedstr))) { prof_on = SCHED_PROFILING; @@ -78,8 +75,7 @@ static int __init profile_setup(char *str) str += strlen(schedstr) + 1; if (get_option(&str, &par)) prof_shift = par; - printk(KERN_INFO - "kernel schedule profiling enabled (shift: %ld)\n", + pr_info("kernel schedule profiling enabled (shift: %ld)\n", prof_shift); } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { prof_on = KVM_PROFILING; @@ -87,13 +83,12 @@ static int __init profile_setup(char *str) str += strlen(kvmstr) + 1; if (get_option(&str, &par)) prof_shift = par; - printk(KERN_INFO - "kernel KVM profiling enabled (shift: %ld)\n", + pr_info("kernel KVM profiling enabled (shift: %ld)\n", prof_shift); } else if (get_option(&str, &par)) { prof_shift = par; prof_on = CPU_PROFILING; - printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n", + pr_info("kernel profiling enabled (shift: %ld)\n", prof_shift); } return 1; @@ -101,20 +96,40 @@ static int __init profile_setup(char *str) __setup("profile=", profile_setup); -void __init profile_init(void) +int __ref profile_init(void) { + int buffer_bytes; if (!prof_on) - return; + return 0; /* only text is profiled */ prof_len = (_etext - _stext) >> prof_shift; - prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t)); + buffer_bytes = prof_len*sizeof(atomic_t); + + if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_copy(prof_cpu_mask, cpu_possible_mask); + + prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); + if (prof_buffer) + return 0; + + prof_buffer = alloc_pages_exact(buffer_bytes, + GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); + if (prof_buffer) + return 0; + + prof_buffer = vzalloc(buffer_bytes); + if (prof_buffer) + return 0; + + free_cpumask_var(prof_cpu_mask); + return -ENOMEM; } /* Profile event notifications */ -#ifdef CONFIG_PROFILING - static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); static ATOMIC_NOTIFIER_HEAD(task_free_notifier); static BLOCKING_NOTIFIER_HEAD(munmap_notifier); @@ -186,27 +201,6 @@ int profile_event_unregister(enum profile_type type, struct notifier_block *n) } EXPORT_SYMBOL_GPL(profile_event_unregister); -int register_timer_hook(int (*hook)(struct pt_regs *)) -{ - if (timer_hook) - return -EBUSY; - timer_hook = hook; - return 0; -} -EXPORT_SYMBOL_GPL(register_timer_hook); - -void unregister_timer_hook(int (*hook)(struct pt_regs *)) -{ - WARN_ON(hook != timer_hook); - timer_hook = NULL; - /* make sure all CPUs see the NULL hook */ - synchronize_sched(); /* Allow ongoing interrupts to complete. */ -} -EXPORT_SYMBOL_GPL(unregister_timer_hook); - -#endif /* CONFIG_PROFILING */ - - #ifdef CONFIG_SMP /* * Each cpu has a pair of open-addressed hashtables for pending @@ -237,7 +231,7 @@ EXPORT_SYMBOL_GPL(unregister_timer_hook); * pagetable hash functions, but uses a full hashtable full of finite * collision chains, not just pairs of them. * - * -- wli + * -- nyc */ static void __profile_flip_buffers(void *unused) { @@ -253,7 +247,7 @@ static void profile_flip_buffers(void) mutex_lock(&profile_flip_mutex); j = per_cpu(cpu_profile_flip, get_cpu()); put_cpu(); - on_each_cpu(__profile_flip_buffers, NULL, 0, 1); + on_each_cpu(__profile_flip_buffers, NULL, 1); for_each_online_cpu(cpu) { struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; for (i = 0; i < NR_PROFILE_HIT; ++i) { @@ -276,7 +270,7 @@ static void profile_discard_flip_buffers(void) mutex_lock(&profile_flip_mutex); i = per_cpu(cpu_profile_flip, get_cpu()); put_cpu(); - on_each_cpu(__profile_flip_buffers, NULL, 0, 1); + on_each_cpu(__profile_flip_buffers, NULL, 1); for_each_online_cpu(cpu) { struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); @@ -284,14 +278,12 @@ static void profile_discard_flip_buffers(void) mutex_unlock(&profile_flip_mutex); } -void profile_hits(int type, void *__pc, unsigned int nr_hits) +static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) { unsigned long primary, secondary, flags, pc = (unsigned long)__pc; int i, j, cpu; struct profile_hit *hits; - if (prof_on != type || !prof_buffer) - return; pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; @@ -335,7 +327,7 @@ out: put_cpu(); } -static int __devinit profile_cpu_callback(struct notifier_block *info, +static int profile_cpu_callback(struct notifier_block *info, unsigned long action, void *__cpu) { int node, cpu = (unsigned long)__cpu; @@ -344,18 +336,18 @@ static int __devinit profile_cpu_callback(struct notifier_block *info, switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - node = cpu_to_node(cpu); + node = cpu_to_mem(cpu); per_cpu(cpu_profile_flip, cpu) = 0; if (!per_cpu(cpu_profile_hits, cpu)[1]) { - page = alloc_pages_node(node, + page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) - return NOTIFY_BAD; + return notifier_from_errno(-ENOMEM); per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); } if (!per_cpu(cpu_profile_hits, cpu)[0]) { - page = alloc_pages_node(node, + page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) @@ -367,16 +359,18 @@ out_free: page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); per_cpu(cpu_profile_hits, cpu)[1] = NULL; __free_page(page); - return NOTIFY_BAD; + return notifier_from_errno(-ENOMEM); case CPU_ONLINE: case CPU_ONLINE_FROZEN: - cpu_set(cpu, prof_cpu_mask); + if (prof_cpu_mask != NULL) + cpumask_set_cpu(cpu, prof_cpu_mask); break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: - cpu_clear(cpu, prof_cpu_mask); + if (prof_cpu_mask != NULL) + cpumask_clear_cpu(cpu, prof_cpu_mask); if (per_cpu(cpu_profile_hits, cpu)[0]) { page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); per_cpu(cpu_profile_hits, cpu)[0] = NULL; @@ -396,69 +390,78 @@ out_free: #define profile_discard_flip_buffers() do { } while (0) #define profile_cpu_callback NULL -void profile_hits(int type, void *__pc, unsigned int nr_hits) +static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) { unsigned long pc; - - if (prof_on != type || !prof_buffer) - return; pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); } #endif /* !CONFIG_SMP */ + +void profile_hits(int type, void *__pc, unsigned int nr_hits) +{ + if (prof_on != type || !prof_buffer) + return; + do_profile_hits(type, __pc, nr_hits); +} EXPORT_SYMBOL_GPL(profile_hits); void profile_tick(int type) { struct pt_regs *regs = get_irq_regs(); - if (type == CPU_PROFILING && timer_hook) - timer_hook(regs); - if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) + if (!user_mode(regs) && prof_cpu_mask != NULL && + cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) profile_hit(type, (void *)profile_pc(regs)); } #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <asm/uaccess.h> -#include <asm/ptrace.h> -static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, - int count, int *eof, void *data) +static int prof_cpu_mask_proc_show(struct seq_file *m, void *v) { - int len = cpumask_scnprintf(page, count, *(cpumask_t *)data); - if (count - len < 2) - return -EINVAL; - len += sprintf(page + len, "\n"); - return len; + seq_cpumask(m, prof_cpu_mask); + seq_putc(m, '\n'); + return 0; } -static int prof_cpu_mask_write_proc(struct file *file, - const char __user *buffer, unsigned long count, void *data) +static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file) { - cpumask_t *mask = (cpumask_t *)data; - unsigned long full_count = count, err; - cpumask_t new_value; + return single_open(file, prof_cpu_mask_proc_show, NULL); +} - err = cpumask_parse_user(buffer, count, new_value); - if (err) - return err; +static ssize_t prof_cpu_mask_proc_write(struct file *file, + const char __user *buffer, size_t count, loff_t *pos) +{ + cpumask_var_t new_value; + int err; - *mask = new_value; - return full_count; + if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) + return -ENOMEM; + + err = cpumask_parse_user(buffer, count, new_value); + if (!err) { + cpumask_copy(prof_cpu_mask, new_value); + err = count; + } + free_cpumask_var(new_value); + return err; } -void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) -{ - struct proc_dir_entry *entry; +static const struct file_operations prof_cpu_mask_proc_fops = { + .open = prof_cpu_mask_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = prof_cpu_mask_proc_write, +}; +void create_prof_cpu_mask(void) +{ /* create /proc/irq/prof_cpu_mask */ - entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); - if (!entry) - return; - entry->data = (void *)&prof_cpu_mask; - entry->read_proc = prof_cpu_mask_read_proc; - entry->write_proc = prof_cpu_mask_write_proc; + proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops); } /* @@ -525,30 +528,31 @@ static ssize_t write_profile(struct file *file, const char __user *buf, static const struct file_operations proc_profile_operations = { .read = read_profile, .write = write_profile, + .llseek = default_llseek, }; #ifdef CONFIG_SMP -static void __init profile_nop(void *unused) +static void profile_nop(void *unused) { } -static int __init create_hash_tables(void) +static int create_hash_tables(void) { int cpu; for_each_online_cpu(cpu) { - int node = cpu_to_node(cpu); + int node = cpu_to_mem(cpu); struct page *page; - page = alloc_pages_node(node, - GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + page = alloc_pages_exact_node(node, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 0); if (!page) goto out_cleanup; per_cpu(cpu_profile_hits, cpu)[1] = (struct profile_hit *)page_address(page); - page = alloc_pages_node(node, - GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + page = alloc_pages_exact_node(node, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 0); if (!page) goto out_cleanup; @@ -559,7 +563,7 @@ static int __init create_hash_tables(void) out_cleanup: prof_on = 0; smp_mb(); - on_each_cpu(profile_nop, NULL, 0, 1); + on_each_cpu(profile_nop, NULL, 1); for_each_online_cpu(cpu) { struct page *page; @@ -580,21 +584,31 @@ out_cleanup: #define create_hash_tables() ({ 0; }) #endif -static int __init create_proc_profile(void) +int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ { struct proc_dir_entry *entry; + int err = 0; if (!prof_on) return 0; - if (create_hash_tables()) - return -1; - entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL); + + cpu_notifier_register_begin(); + + if (create_hash_tables()) { + err = -ENOMEM; + goto out; + } + + entry = proc_create("profile", S_IWUSR | S_IRUGO, + NULL, &proc_profile_operations); if (!entry) - return 0; - entry->proc_fops = &proc_profile_operations; - entry->size = (1+prof_len) * sizeof(atomic_t); - hotcpu_notifier(profile_cpu_callback, 0); - return 0; + goto out; + proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); + __hotcpu_notifier(profile_cpu_callback, 0); + +out: + cpu_notifier_register_done(); + return err; } -module_init(create_proc_profile); +subsys_initcall(create_proc_profile); #endif /* CONFIG_PROC_FS */ |
