diff options
Diffstat (limited to 'drivers/oprofile')
| -rw-r--r-- | drivers/oprofile/buffer_sync.c | 47 | ||||
| -rw-r--r-- | drivers/oprofile/cpu_buffer.c | 164 | ||||
| -rw-r--r-- | drivers/oprofile/cpu_buffer.h | 5 | ||||
| -rw-r--r-- | drivers/oprofile/event_buffer.c | 42 | ||||
| -rw-r--r-- | drivers/oprofile/event_buffer.h | 2 | ||||
| -rw-r--r-- | drivers/oprofile/nmi_timer_int.c | 176 | ||||
| -rw-r--r-- | drivers/oprofile/oprof.c | 122 | ||||
| -rw-r--r-- | drivers/oprofile/oprof.h | 19 | ||||
| -rw-r--r-- | drivers/oprofile/oprofile_files.c | 92 | ||||
| -rw-r--r-- | drivers/oprofile/oprofile_perf.c | 327 | ||||
| -rw-r--r-- | drivers/oprofile/oprofile_stats.c | 32 | ||||
| -rw-r--r-- | drivers/oprofile/oprofile_stats.h | 6 | ||||
| -rw-r--r-- | drivers/oprofile/oprofilefs.c | 143 | ||||
| -rw-r--r-- | drivers/oprofile/timer_int.c | 100 |
14 files changed, 1000 insertions, 277 deletions
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 9da5a4b8113..d93b2b6b1f7 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -30,6 +30,7 @@ #include <linux/fs.h> #include <linux/oprofile.h> #include <linux/sched.h> +#include <linux/gfp.h> #include "oprofile_stats.h" #include "event_buffer.h" @@ -38,7 +39,7 @@ static LIST_HEAD(dying_tasks); static LIST_HEAD(dead_tasks); -static cpumask_t marked_cpus = CPU_MASK_NONE; +static cpumask_var_t marked_cpus; static DEFINE_SPINLOCK(task_mortuary); static void process_task_mortuary(void); @@ -140,21 +141,19 @@ static struct notifier_block module_load_nb = { .notifier_call = module_load_notify, }; - -static void end_sync(void) +static void free_all_tasks(void) { - end_cpu_work(); /* make sure we don't leak task structs */ process_task_mortuary(); process_task_mortuary(); } - int sync_start(void) { int err; - start_cpu_work(); + if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) + return -ENOMEM; err = task_handoff_register(&task_free_nb); if (err) @@ -169,6 +168,8 @@ int sync_start(void) if (err) goto out4; + start_cpu_work(); + out: return err; out4: @@ -177,19 +178,26 @@ out3: profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); out2: task_handoff_unregister(&task_free_nb); + free_all_tasks(); out1: - end_sync(); + free_cpumask_var(marked_cpus); goto out; } void sync_stop(void) { + end_cpu_work(); unregister_module_notifier(&module_load_nb); profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); task_handoff_unregister(&task_free_nb); - end_sync(); + barrier(); /* do all of the above first */ + + flush_cpu_work(); + + free_all_tasks(); + free_cpumask_var(marked_cpus); } @@ -208,7 +216,7 @@ static inline unsigned long fast_get_dcookie(struct path *path) } -/* Look up the dcookie for the task's first VM_EXECUTABLE mapping, +/* Look up the dcookie for the task's mm->exe_file, * which corresponds loosely to "application name". This is * not strictly necessary but allows oprofile to associate * shared-library samples with particular applications @@ -216,21 +224,10 @@ static inline unsigned long fast_get_dcookie(struct path *path) static unsigned long get_exec_dcookie(struct mm_struct *mm) { unsigned long cookie = NO_COOKIE; - struct vm_area_struct *vma; - - if (!mm) - goto out; - for (vma = mm->mmap; vma; vma = vma->vm_next) { - if (!vma->vm_file) - continue; - if (!(vma->vm_flags & VM_EXECUTABLE)) - continue; - cookie = fast_get_dcookie(&vma->vm_file->f_path); - break; - } + if (mm && mm->exe_file) + cookie = fast_get_dcookie(&mm->exe_file->f_path); -out: return cookie; } @@ -456,10 +453,10 @@ static void mark_done(int cpu) { int i; - cpu_set(cpu, marked_cpus); + cpumask_set_cpu(cpu, marked_cpus); for_each_online_cpu(i) { - if (!cpu_isset(i, marked_cpus)) + if (!cpumask_test_cpu(i, marked_cpus)) return; } @@ -468,7 +465,7 @@ static void mark_done(int cpu) */ process_task_mortuary(); - cpus_clear(marked_cpus); + cpumask_clear(marked_cpus); } diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index e76d715e434..8aa73fac6ad 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -21,7 +21,6 @@ #include <linux/sched.h> #include <linux/oprofile.h> -#include <linux/vmalloc.h> #include <linux/errno.h> #include "event_buffer.h" @@ -31,24 +30,8 @@ #define OP_BUFFER_FLAGS 0 -/* - * Read and write access is using spin locking. Thus, writing to the - * buffer by NMI handler (x86) could occur also during critical - * sections when reading the buffer. To avoid this, there are 2 - * buffers for independent read and write access. Read access is in - * process context only, write access only in the NMI handler. If the - * read buffer runs empty, both buffers are swapped atomically. There - * is potentially a small window during swapping where the buffers are - * disabled and samples could be lost. - * - * Using 2 buffers is a little bit overhead, but the solution is clear - * and does not require changes in the ring buffer implementation. It - * can be changed to a single buffer solution when the ring buffer - * access is implemented as non-locking atomic code. - */ -static struct ring_buffer *op_ring_buffer_read; -static struct ring_buffer *op_ring_buffer_write; -DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); +static struct ring_buffer *op_ring_buffer; +DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); static void wq_sync_buffer(struct work_struct *work); @@ -62,37 +45,34 @@ unsigned long oprofile_get_cpu_buffer_size(void) void oprofile_cpu_buffer_inc_smpl_lost(void) { - struct oprofile_cpu_buffer *cpu_buf - = &__get_cpu_var(cpu_buffer); + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); cpu_buf->sample_lost_overflow++; } void free_cpu_buffers(void) { - if (op_ring_buffer_read) - ring_buffer_free(op_ring_buffer_read); - op_ring_buffer_read = NULL; - if (op_ring_buffer_write) - ring_buffer_free(op_ring_buffer_write); - op_ring_buffer_write = NULL; + if (op_ring_buffer) + ring_buffer_free(op_ring_buffer); + op_ring_buffer = NULL; } +#define RB_EVENT_HDR_SIZE 4 + int alloc_cpu_buffers(void) { int i; unsigned long buffer_size = oprofile_cpu_buffer_size; + unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + + RB_EVENT_HDR_SIZE); - op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); - if (!op_ring_buffer_read) - goto fail; - op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); - if (!op_ring_buffer_write) + op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); + if (!op_ring_buffer) goto fail; for_each_possible_cpu(i) { - struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); + struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); b->last_task = NULL; b->last_is_kernel = -1; @@ -119,7 +99,7 @@ void start_cpu_work(void) work_enabled = 1; for_each_online_cpu(i) { - struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); + struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); /* * Spread the work by 1 jiffy per cpu so they dont all @@ -131,17 +111,19 @@ void start_cpu_work(void) void end_cpu_work(void) { - int i; - work_enabled = 0; +} + +void flush_cpu_work(void) +{ + int i; for_each_online_cpu(i) { - struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); + struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); - cancel_delayed_work(&b->work); + /* these works are per-cpu, no need for flush_sync */ + flush_delayed_work(&b->work); } - - flush_scheduled_work(); } /* @@ -160,16 +142,11 @@ struct op_sample *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) { entry->event = ring_buffer_lock_reserve - (op_ring_buffer_write, sizeof(struct op_sample) + - size * sizeof(entry->sample->data[0]), &entry->irq_flags); - if (entry->event) - entry->sample = ring_buffer_event_data(entry->event); - else - entry->sample = NULL; - - if (!entry->sample) + (op_ring_buffer, sizeof(struct op_sample) + + size * sizeof(entry->sample->data[0])); + if (!entry->event) return NULL; - + entry->sample = ring_buffer_event_data(entry->event); entry->size = size; entry->data = entry->sample->data; @@ -178,26 +155,16 @@ struct op_sample int op_cpu_buffer_write_commit(struct op_entry *entry) { - return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event, - entry->irq_flags); + return ring_buffer_unlock_commit(op_ring_buffer, entry->event); } struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) { struct ring_buffer_event *e; - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); - if (e) - goto event; - if (ring_buffer_swap_cpu(op_ring_buffer_read, - op_ring_buffer_write, - cpu)) + e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL); + if (!e) return NULL; - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); - if (e) - goto event; - return NULL; -event: entry->event = e; entry->sample = ring_buffer_event_data(e); entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) @@ -208,8 +175,7 @@ event: unsigned long op_cpu_buffer_entries(int cpu) { - return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) - + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); + return ring_buffer_entries_cpu(op_ring_buffer, cpu); } static int @@ -292,8 +258,10 @@ op_add_sample(struct oprofile_cpu_buffer *cpu_buf, */ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, - unsigned long backtrace, int is_kernel, unsigned long event) + unsigned long backtrace, int is_kernel, unsigned long event, + struct task_struct *task) { + struct task_struct *tsk = task ? task : current; cpu_buf->sample_received++; if (pc == ESCAPE_CODE) { @@ -301,7 +269,7 @@ log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, return 0; } - if (op_add_code(cpu_buf, backtrace, is_kernel, current)) + if (op_add_code(cpu_buf, backtrace, is_kernel, tsk)) goto fail; if (op_add_sample(cpu_buf, pc, event)) @@ -326,16 +294,17 @@ static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) static inline void __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, - unsigned long event, int is_kernel) + unsigned long event, int is_kernel, + struct task_struct *task) { - struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); unsigned long backtrace = oprofile_backtrace_depth; /* * if log_sample() fail we can't backtrace since we lost the * source of this event */ - if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event)) + if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task)) /* failed */ return; @@ -347,18 +316,33 @@ __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, oprofile_end_trace(cpu_buf); } +void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs, + unsigned long event, int is_kernel, + struct task_struct *task) +{ + __oprofile_add_ext_sample(pc, regs, event, is_kernel, task); +} + void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel) { - __oprofile_add_ext_sample(pc, regs, event, is_kernel); + __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); } void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) { - int is_kernel = !user_mode(regs); - unsigned long pc = profile_pc(regs); + int is_kernel; + unsigned long pc; + + if (likely(regs)) { + is_kernel = !user_mode(regs); + pc = profile_pc(regs); + } else { + is_kernel = 0; /* This value will not be used */ + pc = ESCAPE_CODE; /* as this causes an early return. */ + } - __oprofile_add_ext_sample(pc, regs, event, is_kernel); + __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); } /* @@ -373,7 +357,7 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, { struct op_sample *sample; int is_kernel = !user_mode(regs); - struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); cpu_buf->sample_received++; @@ -404,6 +388,21 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val) return op_cpu_buffer_add_data(entry, val); } +int oprofile_add_data64(struct op_entry *entry, u64 val) +{ + if (!entry->event) + return 0; + if (op_cpu_buffer_get_size(entry) < 2) + /* + * the function returns 0 to indicate a too small + * buffer, even if there is some space left + */ + return 0; + if (!op_cpu_buffer_add_data(entry, (u32)val)) + return 0; + return op_cpu_buffer_add_data(entry, (u32)(val >> 32)); +} + int oprofile_write_commit(struct op_entry *entry) { if (!entry->event) @@ -413,13 +412,13 @@ int oprofile_write_commit(struct op_entry *entry) void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) { - struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); - log_sample(cpu_buf, pc, 0, is_kernel, event); + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); + log_sample(cpu_buf, pc, 0, is_kernel, event, NULL); } void oprofile_add_trace(unsigned long pc) { - struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); if (!cpu_buf->tracing) return; @@ -452,14 +451,9 @@ static void wq_sync_buffer(struct work_struct *work) { struct oprofile_cpu_buffer *b = container_of(work, struct oprofile_cpu_buffer, work.work); - if (b->cpu != smp_processor_id()) { - printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", - smp_processor_id(), b->cpu); - - if (!cpu_online(b->cpu)) { - cancel_delayed_work(&b->work); - return; - } + if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) { + cancel_delayed_work(&b->work); + return; } sync_buffer(b->cpu); diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index 272995d2029..e1d097e250a 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h @@ -25,6 +25,7 @@ void free_cpu_buffers(void); void start_cpu_work(void); void end_cpu_work(void); +void flush_cpu_work(void); /* CPU buffer is composed of such entries (which are * also used for context switch notes) @@ -50,7 +51,7 @@ struct oprofile_cpu_buffer { struct delayed_work work; }; -DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); +DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); /* * Resets the cpu buffer to a sane state. @@ -60,7 +61,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); */ static inline void op_cpu_buffer_reset(int cpu) { - struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); + struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu); cpu_buf->last_is_kernel = -1; cpu_buf->last_task = NULL; diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index 2b7ae366ceb..c0cc4e7ff02 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c @@ -35,12 +35,23 @@ static size_t buffer_pos; /* atomic_t because wait_event checks it outside of buffer_mutex */ static atomic_t buffer_ready = ATOMIC_INIT(0); -/* Add an entry to the event buffer. When we - * get near to the end we wake up the process - * sleeping on the read() of the file. +/* + * Add an entry to the event buffer. When we get near to the end we + * wake up the process sleeping on the read() of the file. To protect + * the event_buffer this function may only be called when buffer_mutex + * is set. */ void add_event_entry(unsigned long value) { + /* + * This shouldn't happen since all workqueues or handlers are + * canceled or flushed before the event buffer is freed. + */ + if (!event_buffer) { + WARN_ON_ONCE(1); + return; + } + if (buffer_pos == buffer_size) { atomic_inc(&oprofile_stats.event_lost_overflow); return; @@ -69,32 +80,32 @@ void wake_up_buffer_waiter(void) int alloc_event_buffer(void) { - int err = -ENOMEM; unsigned long flags; - spin_lock_irqsave(&oprofilefs_lock, flags); + raw_spin_lock_irqsave(&oprofilefs_lock, flags); buffer_size = oprofile_buffer_size; buffer_watershed = oprofile_buffer_watershed; - spin_unlock_irqrestore(&oprofilefs_lock, flags); + raw_spin_unlock_irqrestore(&oprofilefs_lock, flags); if (buffer_watershed >= buffer_size) return -EINVAL; + buffer_pos = 0; event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); if (!event_buffer) - goto out; + return -ENOMEM; - err = 0; -out: - return err; + return 0; } void free_event_buffer(void) { + mutex_lock(&buffer_mutex); vfree(event_buffer); - + buffer_pos = 0; event_buffer = NULL; + mutex_unlock(&buffer_mutex); } @@ -124,7 +135,7 @@ static int event_buffer_open(struct inode *inode, struct file *file) * echo 1 >/dev/oprofile/enable */ - return 0; + return nonseekable_open(inode, file); fail: dcookie_unregister(file->private_data); @@ -167,6 +178,12 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf, mutex_lock(&buffer_mutex); + /* May happen if the buffer is freed during pending reads. */ + if (!event_buffer) { + retval = -EINTR; + goto out; + } + atomic_set(&buffer_ready, 0); retval = -EFAULT; @@ -188,4 +205,5 @@ const struct file_operations event_buffer_fops = { .open = event_buffer_open, .release = event_buffer_release, .read = event_buffer_read, + .llseek = no_llseek, }; diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h index 4e70749f8d1..a8d5bb3cba8 100644 --- a/drivers/oprofile/event_buffer.h +++ b/drivers/oprofile/event_buffer.h @@ -11,7 +11,7 @@ #define EVENT_BUFFER_H #include <linux/types.h> -#include <asm/mutex.h> +#include <linux/mutex.h> int alloc_event_buffer(void); diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c new file mode 100644 index 00000000000..9559829fb23 --- /dev/null +++ b/drivers/oprofile/nmi_timer_int.c @@ -0,0 +1,176 @@ +/** + * @file nmi_timer_int.c + * + * @remark Copyright 2011 Advanced Micro Devices, Inc. + * + * @author Robert Richter <robert.richter@amd.com> + */ + +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/errno.h> +#include <linux/oprofile.h> +#include <linux/perf_event.h> + +#ifdef CONFIG_OPROFILE_NMI_TIMER + +static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events); +static int ctr_running; + +static struct perf_event_attr nmi_timer_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, +}; + +static void nmi_timer_callback(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + event->hw.interrupts = 0; /* don't throttle interrupts */ + oprofile_add_sample(regs, 0); +} + +static int nmi_timer_start_cpu(int cpu) +{ + struct perf_event *event = per_cpu(nmi_timer_events, cpu); + + if (!event) { + event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL, + nmi_timer_callback, NULL); + if (IS_ERR(event)) + return PTR_ERR(event); + per_cpu(nmi_timer_events, cpu) = event; + } + + if (event && ctr_running) + perf_event_enable(event); + + return 0; +} + +static void nmi_timer_stop_cpu(int cpu) +{ + struct perf_event *event = per_cpu(nmi_timer_events, cpu); + + if (event && ctr_running) + perf_event_disable(event); +} + +static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action, + void *data) +{ + int cpu = (unsigned long)data; + switch (action) { + case CPU_DOWN_FAILED: + case CPU_ONLINE: + nmi_timer_start_cpu(cpu); + break; + case CPU_DOWN_PREPARE: + nmi_timer_stop_cpu(cpu); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block nmi_timer_cpu_nb = { + .notifier_call = nmi_timer_cpu_notifier +}; + +static int nmi_timer_start(void) +{ + int cpu; + + get_online_cpus(); + ctr_running = 1; + for_each_online_cpu(cpu) + nmi_timer_start_cpu(cpu); + put_online_cpus(); + + return 0; +} + +static void nmi_timer_stop(void) +{ + int cpu; + + get_online_cpus(); + for_each_online_cpu(cpu) + nmi_timer_stop_cpu(cpu); + ctr_running = 0; + put_online_cpus(); +} + +static void nmi_timer_shutdown(void) +{ + struct perf_event *event; + int cpu; + + cpu_notifier_register_begin(); + __unregister_cpu_notifier(&nmi_timer_cpu_nb); + for_each_possible_cpu(cpu) { + event = per_cpu(nmi_timer_events, cpu); + if (!event) + continue; + perf_event_disable(event); + per_cpu(nmi_timer_events, cpu) = NULL; + perf_event_release_kernel(event); + } + + cpu_notifier_register_done(); +} + +static int nmi_timer_setup(void) +{ + int cpu, err; + u64 period; + + /* clock cycles per tick: */ + period = (u64)cpu_khz * 1000; + do_div(period, HZ); + nmi_timer_attr.sample_period = period; + + cpu_notifier_register_begin(); + err = __register_cpu_notifier(&nmi_timer_cpu_nb); + if (err) + goto out; + + /* can't attach events to offline cpus: */ + for_each_online_cpu(cpu) { + err = nmi_timer_start_cpu(cpu); + if (err) { + cpu_notifier_register_done(); + nmi_timer_shutdown(); + return err; + } + } + +out: + cpu_notifier_register_done(); + return err; +} + +int __init op_nmi_timer_init(struct oprofile_operations *ops) +{ + int err = 0; + + err = nmi_timer_setup(); + if (err) + return err; + nmi_timer_shutdown(); /* only check, don't alloc */ + + ops->create_files = NULL; + ops->setup = nmi_timer_setup; + ops->shutdown = nmi_timer_shutdown; + ops->start = nmi_timer_start; + ops->stop = nmi_timer_stop; + ops->cpu_type = "timer"; + + printk(KERN_INFO "oprofile: using NMI timer interrupt.\n"); + + return 0; +} + +#endif diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index 3cffce90f82..ed2c3ec0702 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c @@ -12,7 +12,9 @@ #include <linux/init.h> #include <linux/oprofile.h> #include <linux/moduleparam.h> -#include <asm/mutex.h> +#include <linux/workqueue.h> +#include <linux/time.h> +#include <linux/mutex.h> #include "oprof.h" #include "event_buffer.h" @@ -87,6 +89,69 @@ out: return err; } +#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX + +static void switch_worker(struct work_struct *work); +static DECLARE_DELAYED_WORK(switch_work, switch_worker); + +static void start_switch_worker(void) +{ + if (oprofile_ops.switch_events) + schedule_delayed_work(&switch_work, oprofile_time_slice); +} + +static void stop_switch_worker(void) +{ + cancel_delayed_work_sync(&switch_work); +} + +static void switch_worker(struct work_struct *work) +{ + if (oprofile_ops.switch_events()) + return; + + atomic_inc(&oprofile_stats.multiplex_counter); + start_switch_worker(); +} + +/* User inputs in ms, converts to jiffies */ +int oprofile_set_timeout(unsigned long val_msec) +{ + int err = 0; + unsigned long time_slice; + + mutex_lock(&start_mutex); + + if (oprofile_started) { + err = -EBUSY; + goto out; + } + + if (!oprofile_ops.switch_events) { + err = -EINVAL; + goto out; + } + + time_slice = msecs_to_jiffies(val_msec); + if (time_slice == MAX_JIFFY_OFFSET) { + err = -EINVAL; + goto out; + } + + oprofile_time_slice = time_slice; + +out: + mutex_unlock(&start_mutex); + return err; + +} + +#else + +static inline void start_switch_worker(void) { } +static inline void stop_switch_worker(void) { } + +#endif /* Actually start profiling (echo 1>/dev/oprofile/enable) */ int oprofile_start(void) @@ -108,6 +173,8 @@ int oprofile_start(void) if ((err = oprofile_ops.start())) goto out; + start_switch_worker(); + oprofile_started = 1; out: mutex_unlock(&start_mutex); @@ -123,6 +190,9 @@ void oprofile_stop(void) goto out; oprofile_ops.stop(); oprofile_started = 0; + + stop_switch_worker(); + /* wake up the daemon to read what remains */ wake_up_buffer_waiter(); out: @@ -155,53 +225,53 @@ post_sync: mutex_unlock(&start_mutex); } - -int oprofile_set_backtrace(unsigned long val) +int oprofile_set_ulong(unsigned long *addr, unsigned long val) { - int err = 0; + int err = -EBUSY; mutex_lock(&start_mutex); - - if (oprofile_started) { - err = -EBUSY; - goto out; + if (!oprofile_started) { + *addr = val; + err = 0; } - - if (!oprofile_ops.backtrace) { - err = -EINVAL; - goto out; - } - - oprofile_backtrace_depth = val; - -out: mutex_unlock(&start_mutex); + return err; } +static int timer_mode; + static int __init oprofile_init(void) { int err; + /* always init architecture to setup backtrace support */ + timer_mode = 0; err = oprofile_arch_init(&oprofile_ops); - - if (err < 0 || timer) { - printk(KERN_INFO "oprofile: using timer interrupt.\n"); - oprofile_timer_init(&oprofile_ops); + if (!err) { + if (!timer && !oprofilefs_register()) + return 0; + oprofile_arch_exit(); } - err = oprofilefs_register(); - if (err) - oprofile_arch_exit(); + /* setup timer mode: */ + timer_mode = 1; + /* no nmi timer mode if oprofile.timer is set */ + if (timer || op_nmi_timer_init(&oprofile_ops)) { + err = oprofile_timer_init(&oprofile_ops); + if (err) + return err; + } - return err; + return oprofilefs_register(); } static void __exit oprofile_exit(void) { oprofilefs_unregister(); - oprofile_arch_exit(); + if (!timer_mode) + oprofile_arch_exit(); } diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h index c288d3c24b5..d5412060ab0 100644 --- a/drivers/oprofile/oprof.h +++ b/drivers/oprofile/oprof.h @@ -24,16 +24,27 @@ struct oprofile_operations; extern unsigned long oprofile_buffer_size; extern unsigned long oprofile_cpu_buffer_size; extern unsigned long oprofile_buffer_watershed; +extern unsigned long oprofile_time_slice; + extern struct oprofile_operations oprofile_ops; extern unsigned long oprofile_started; extern unsigned long oprofile_backtrace_depth; -struct super_block; struct dentry; -void oprofile_create_files(struct super_block *sb, struct dentry *root); -void oprofile_timer_init(struct oprofile_operations *ops); +void oprofile_create_files(struct dentry *root); +int oprofile_timer_init(struct oprofile_operations *ops); +#ifdef CONFIG_OPROFILE_NMI_TIMER +int op_nmi_timer_init(struct oprofile_operations *ops); +#else +static inline int op_nmi_timer_init(struct oprofile_operations *ops) +{ + return -ENODEV; +} +#endif + -int oprofile_set_backtrace(unsigned long depth); +int oprofile_set_ulong(unsigned long *addr, unsigned long val); +int oprofile_set_timeout(unsigned long time); #endif /* OPROF_H */ diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c index 5d36ffc30dd..ee2cfce358b 100644 --- a/drivers/oprofile/oprofile_files.c +++ b/drivers/oprofile/oprofile_files.c @@ -9,6 +9,7 @@ #include <linux/fs.h> #include <linux/oprofile.h> +#include <linux/jiffies.h> #include "event_buffer.h" #include "oprofile_stats.h" @@ -17,10 +18,52 @@ #define BUFFER_SIZE_DEFAULT 131072 #define CPU_BUFFER_SIZE_DEFAULT 8192 #define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */ +#define TIME_SLICE_DEFAULT 1 unsigned long oprofile_buffer_size; unsigned long oprofile_cpu_buffer_size; unsigned long oprofile_buffer_watershed; +unsigned long oprofile_time_slice; + +#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX + +static ssize_t timeout_read(struct file *file, char __user *buf, + size_t count, loff_t *offset) +{ + return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice), + buf, count, offset); +} + + +static ssize_t timeout_write(struct file *file, char const __user *buf, + size_t count, loff_t *offset) +{ + unsigned long val; + int retval; + + if (*offset) + return -EINVAL; + + retval = oprofilefs_ulong_from_user(&val, buf, count); + if (retval <= 0) + return retval; + + retval = oprofile_set_timeout(val); + + if (retval) + return retval; + return count; +} + + +static const struct file_operations timeout_fops = { + .read = timeout_read, + .write = timeout_write, + .llseek = default_llseek, +}; + +#endif + static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { @@ -37,21 +80,25 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou if (*offset) return -EINVAL; + if (!oprofile_ops.backtrace) + return -EINVAL; + retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; - retval = oprofile_set_backtrace(val); - + retval = oprofile_set_ulong(&oprofile_backtrace_depth, val); if (retval) return retval; + return count; } static const struct file_operations depth_fops = { .read = depth_read, - .write = depth_write + .write = depth_write, + .llseek = default_llseek, }; @@ -63,6 +110,7 @@ static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t cou static const struct file_operations pointer_size_fops = { .read = pointer_size_read, + .llseek = default_llseek, }; @@ -74,6 +122,7 @@ static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, static const struct file_operations cpu_type_fops = { .read = cpu_type_read, + .llseek = default_llseek, }; @@ -92,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; + retval = 0; if (val) retval = oprofile_start(); else @@ -109,6 +159,7 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co static const struct file_operations enable_fops = { .read = enable_read, .write = enable_write, + .llseek = default_llseek, }; @@ -121,25 +172,30 @@ static ssize_t dump_write(struct file *file, char const __user *buf, size_t coun static const struct file_operations dump_fops = { .write = dump_write, + .llseek = noop_llseek, }; -void oprofile_create_files(struct super_block *sb, struct dentry *root) +void oprofile_create_files(struct dentry *root) { /* reinitialize default values */ oprofile_buffer_size = BUFFER_SIZE_DEFAULT; oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT; oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT; - - oprofilefs_create_file(sb, root, "enable", &enable_fops); - oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); - oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); - oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size); - oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed); - oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size); - oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); - oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); - oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); - oprofile_create_stats_files(sb, root); + oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT); + + oprofilefs_create_file(root, "enable", &enable_fops); + oprofilefs_create_file_perm(root, "dump", &dump_fops, 0666); + oprofilefs_create_file(root, "buffer", &event_buffer_fops); + oprofilefs_create_ulong(root, "buffer_size", &oprofile_buffer_size); + oprofilefs_create_ulong(root, "buffer_watershed", &oprofile_buffer_watershed); + oprofilefs_create_ulong(root, "cpu_buffer_size", &oprofile_cpu_buffer_size); + oprofilefs_create_file(root, "cpu_type", &cpu_type_fops); + oprofilefs_create_file(root, "backtrace_depth", &depth_fops); + oprofilefs_create_file(root, "pointer_size", &pointer_size_fops); +#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX + oprofilefs_create_file(root, "time_slice", &timeout_fops); +#endif + oprofile_create_stats_files(root); if (oprofile_ops.create_files) - oprofile_ops.create_files(sb, root); + oprofile_ops.create_files(root); } diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c new file mode 100644 index 00000000000..d5b2732b1b8 --- /dev/null +++ b/drivers/oprofile/oprofile_perf.c @@ -0,0 +1,327 @@ +/* + * Copyright 2010 ARM Ltd. + * Copyright 2012 Advanced Micro Devices, Inc., Robert Richter + * + * Perf-events backend for OProfile. + */ +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/oprofile.h> +#include <linux/slab.h> + +/* + * Per performance monitor configuration as set via oprofilefs. + */ +struct op_counter_config { + unsigned long count; + unsigned long enabled; + unsigned long event; + unsigned long unit_mask; + unsigned long kernel; + unsigned long user; + struct perf_event_attr attr; +}; + +static int oprofile_perf_enabled; +static DEFINE_MUTEX(oprofile_perf_mutex); + +static struct op_counter_config *counter_config; +static DEFINE_PER_CPU(struct perf_event **, perf_events); +static int num_counters; + +/* + * Overflow callback for oprofile. + */ +static void op_overflow_handler(struct perf_event *event, + struct perf_sample_data *data, struct pt_regs *regs) +{ + int id; + u32 cpu = smp_processor_id(); + + for (id = 0; id < num_counters; ++id) + if (per_cpu(perf_events, cpu)[id] == event) + break; + + if (id != num_counters) + oprofile_add_sample(regs, id); + else + pr_warning("oprofile: ignoring spurious overflow " + "on cpu %u\n", cpu); +} + +/* + * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile + * settings in counter_config. Attributes are created as `pinned' events and + * so are permanently scheduled on the PMU. + */ +static void op_perf_setup(void) +{ + int i; + u32 size = sizeof(struct perf_event_attr); + struct perf_event_attr *attr; + + for (i = 0; i < num_counters; ++i) { + attr = &counter_config[i].attr; + memset(attr, 0, size); + attr->type = PERF_TYPE_RAW; + attr->size = size; + attr->config = counter_config[i].event; + attr->sample_period = counter_config[i].count; + attr->pinned = 1; + } +} + +static int op_create_counter(int cpu, int event) +{ + struct perf_event *pevent; + + if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) + return 0; + + pevent = perf_event_create_kernel_counter(&counter_config[event].attr, + cpu, NULL, + op_overflow_handler, NULL); + + if (IS_ERR(pevent)) + return PTR_ERR(pevent); + + if (pevent->state != PERF_EVENT_STATE_ACTIVE) { + perf_event_release_kernel(pevent); + pr_warning("oprofile: failed to enable event %d " + "on CPU %d\n", event, cpu); + return -EBUSY; + } + + per_cpu(perf_events, cpu)[event] = pevent; + + return 0; +} + +static void op_destroy_counter(int cpu, int event) +{ + struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; + + if (pevent) { + perf_event_release_kernel(pevent); + per_cpu(perf_events, cpu)[event] = NULL; + } +} + +/* + * Called by oprofile_perf_start to create active perf events based on the + * perviously configured attributes. + */ +static int op_perf_start(void) +{ + int cpu, event, ret = 0; + + for_each_online_cpu(cpu) { + for (event = 0; event < num_counters; ++event) { + ret = op_create_counter(cpu, event); + if (ret) + return ret; + } + } + + return ret; +} + +/* + * Called by oprofile_perf_stop at the end of a profiling run. + */ +static void op_perf_stop(void) +{ + int cpu, event; + + for_each_online_cpu(cpu) + for (event = 0; event < num_counters; ++event) + op_destroy_counter(cpu, event); +} + +static int oprofile_perf_create_files(struct dentry *root) +{ + unsigned int i; + + for (i = 0; i < num_counters; i++) { + struct dentry *dir; + char buf[4]; + + snprintf(buf, sizeof buf, "%d", i); + dir = oprofilefs_mkdir(root, buf); + oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled); + oprofilefs_create_ulong(dir, "event", &counter_config[i].event); + oprofilefs_create_ulong(dir, "count", &counter_config[i].count); + oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask); + oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel); + oprofilefs_create_ulong(dir, "user", &counter_config[i].user); + } + + return 0; +} + +static int oprofile_perf_setup(void) +{ + raw_spin_lock(&oprofilefs_lock); + op_perf_setup(); + raw_spin_unlock(&oprofilefs_lock); + return 0; +} + +static int oprofile_perf_start(void) +{ + int ret = -EBUSY; + + mutex_lock(&oprofile_perf_mutex); + if (!oprofile_perf_enabled) { + ret = 0; + op_perf_start(); + oprofile_perf_enabled = 1; + } + mutex_unlock(&oprofile_perf_mutex); + return ret; +} + +static void oprofile_perf_stop(void) +{ + mutex_lock(&oprofile_perf_mutex); + if (oprofile_perf_enabled) + op_perf_stop(); + oprofile_perf_enabled = 0; + mutex_unlock(&oprofile_perf_mutex); +} + +#ifdef CONFIG_PM + +static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state) +{ + mutex_lock(&oprofile_perf_mutex); + if (oprofile_perf_enabled) + op_perf_stop(); + mutex_unlock(&oprofile_perf_mutex); + return 0; +} + +static int oprofile_perf_resume(struct platform_device *dev) +{ + mutex_lock(&oprofile_perf_mutex); + if (oprofile_perf_enabled && op_perf_start()) + oprofile_perf_enabled = 0; + mutex_unlock(&oprofile_perf_mutex); + return 0; +} + +static struct platform_driver oprofile_driver = { + .driver = { + .name = "oprofile-perf", + }, + .resume = oprofile_perf_resume, + .suspend = oprofile_perf_suspend, +}; + +static struct platform_device *oprofile_pdev; + +static int __init init_driverfs(void) +{ + int ret; + + ret = platform_driver_register(&oprofile_driver); + if (ret) + return ret; + + oprofile_pdev = platform_device_register_simple( + oprofile_driver.driver.name, 0, NULL, 0); + if (IS_ERR(oprofile_pdev)) { + ret = PTR_ERR(oprofile_pdev); + platform_driver_unregister(&oprofile_driver); + } + + return ret; +} + +static void exit_driverfs(void) +{ + platform_device_unregister(oprofile_pdev); + platform_driver_unregister(&oprofile_driver); +} + +#else + +static inline int init_driverfs(void) { return 0; } +static inline void exit_driverfs(void) { } + +#endif /* CONFIG_PM */ + +void oprofile_perf_exit(void) +{ + int cpu, id; + struct perf_event *event; + + for_each_possible_cpu(cpu) { + for (id = 0; id < num_counters; ++id) { + event = per_cpu(perf_events, cpu)[id]; + if (event) + perf_event_release_kernel(event); + } + + kfree(per_cpu(perf_events, cpu)); + } + + kfree(counter_config); + exit_driverfs(); +} + +int __init oprofile_perf_init(struct oprofile_operations *ops) +{ + int cpu, ret = 0; + + ret = init_driverfs(); + if (ret) + return ret; + + num_counters = perf_num_counters(); + if (num_counters <= 0) { + pr_info("oprofile: no performance counters\n"); + ret = -ENODEV; + goto out; + } + + counter_config = kcalloc(num_counters, + sizeof(struct op_counter_config), GFP_KERNEL); + + if (!counter_config) { + pr_info("oprofile: failed to allocate %d " + "counters\n", num_counters); + ret = -ENOMEM; + num_counters = 0; + goto out; + } + + for_each_possible_cpu(cpu) { + per_cpu(perf_events, cpu) = kcalloc(num_counters, + sizeof(struct perf_event *), GFP_KERNEL); + if (!per_cpu(perf_events, cpu)) { + pr_info("oprofile: failed to allocate %d perf events " + "for cpu %d\n", num_counters, cpu); + ret = -ENOMEM; + goto out; + } + } + + ops->create_files = oprofile_perf_create_files; + ops->setup = oprofile_perf_setup; + ops->start = oprofile_perf_start; + ops->stop = oprofile_perf_stop; + ops->shutdown = oprofile_perf_stop; + ops->cpu_type = op_name_from_perf_id(); + + if (!ops->cpu_type) + ret = -ENODEV; + else + pr_info("oprofile: using %s\n", ops->cpu_type); + +out: + if (ret) + oprofile_perf_exit(); + + return ret; +} diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c index e1f6ce03705..59659cea458 100644 --- a/drivers/oprofile/oprofile_stats.c +++ b/drivers/oprofile/oprofile_stats.c @@ -23,7 +23,7 @@ void oprofile_reset_stats(void) int i; for_each_possible_cpu(i) { - cpu_buf = &per_cpu(cpu_buffer, i); + cpu_buf = &per_cpu(op_cpu_buffer, i); cpu_buf->sample_received = 0; cpu_buf->sample_lost_overflow = 0; cpu_buf->backtrace_aborted = 0; @@ -33,10 +33,12 @@ void oprofile_reset_stats(void) atomic_set(&oprofile_stats.sample_lost_no_mm, 0); atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); atomic_set(&oprofile_stats.event_lost_overflow, 0); + atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); + atomic_set(&oprofile_stats.multiplex_counter, 0); } -void oprofile_create_stats_files(struct super_block *sb, struct dentry *root) +void oprofile_create_stats_files(struct dentry *root) { struct oprofile_cpu_buffer *cpu_buf; struct dentry *cpudir; @@ -44,35 +46,39 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root) char buf[10]; int i; - dir = oprofilefs_mkdir(sb, root, "stats"); + dir = oprofilefs_mkdir(root, "stats"); if (!dir) return; for_each_possible_cpu(i) { - cpu_buf = &per_cpu(cpu_buffer, i); + cpu_buf = &per_cpu(op_cpu_buffer, i); snprintf(buf, 10, "cpu%d", i); - cpudir = oprofilefs_mkdir(sb, dir, buf); + cpudir = oprofilefs_mkdir(dir, buf); /* Strictly speaking access to these ulongs is racy, * but we can't simply lock them, and they are * informational only. */ - oprofilefs_create_ro_ulong(sb, cpudir, "sample_received", + oprofilefs_create_ro_ulong(cpudir, "sample_received", &cpu_buf->sample_received); - oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_overflow", + oprofilefs_create_ro_ulong(cpudir, "sample_lost_overflow", &cpu_buf->sample_lost_overflow); - oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted", + oprofilefs_create_ro_ulong(cpudir, "backtrace_aborted", &cpu_buf->backtrace_aborted); - oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip", + oprofilefs_create_ro_ulong(cpudir, "sample_invalid_eip", &cpu_buf->sample_invalid_eip); } - oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm", + oprofilefs_create_ro_atomic(dir, "sample_lost_no_mm", &oprofile_stats.sample_lost_no_mm); - oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping", + oprofilefs_create_ro_atomic(dir, "sample_lost_no_mapping", &oprofile_stats.sample_lost_no_mapping); - oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow", + oprofilefs_create_ro_atomic(dir, "event_lost_overflow", &oprofile_stats.event_lost_overflow); - oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping", + oprofilefs_create_ro_atomic(dir, "bt_lost_no_mapping", &oprofile_stats.bt_lost_no_mapping); +#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX + oprofilefs_create_ro_atomic(dir, "multiplex_counter", + &oprofile_stats.multiplex_counter); +#endif } diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h index 3da0d08dc1f..1fc622bd183 100644 --- a/drivers/oprofile/oprofile_stats.h +++ b/drivers/oprofile/oprofile_stats.h @@ -10,13 +10,14 @@ #ifndef OPROFILE_STATS_H #define OPROFILE_STATS_H -#include <asm/atomic.h> +#include <linux/atomic.h> struct oprofile_stat_struct { atomic_t sample_lost_no_mm; atomic_t sample_lost_no_mapping; atomic_t bt_lost_no_mapping; atomic_t event_lost_overflow; + atomic_t multiplex_counter; }; extern struct oprofile_stat_struct oprofile_stats; @@ -24,10 +25,9 @@ extern struct oprofile_stat_struct oprofile_stats; /* reset all stats to zero */ void oprofile_reset_stats(void); -struct super_block; struct dentry; /* create the stats/ dir */ -void oprofile_create_stats_files(struct super_block *sb, struct dentry *root); +void oprofile_create_stats_files(struct dentry *root); #endif /* OPROFILE_STATS_H */ diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index b7e4cee2426..3f493459378 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c @@ -21,13 +21,14 @@ #define OPROFILEFS_MAGIC 0x6f70726f -DEFINE_SPINLOCK(oprofilefs_lock); +DEFINE_RAW_SPINLOCK(oprofilefs_lock); static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode) { struct inode *inode = new_inode(sb); if (inode) { + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; } @@ -35,7 +36,7 @@ static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode) } -static struct super_operations s_ops = { +static const struct super_operations s_ops = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, }; @@ -59,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou } +/* + * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains + * unchanged and might be uninitialized. This follows write syscall + * implementation when count is zero: "If count is zero ... [and if] + * no errors are detected, 0 will be returned without causing any + * other effect." (man 2 write) + */ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count) { char tmpbuf[TMPBUFSIZE]; @@ -75,10 +83,10 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_ if (copy_from_user(tmpbuf, buf, count)) return -EFAULT; - spin_lock_irqsave(&oprofilefs_lock, flags); + raw_spin_lock_irqsave(&oprofilefs_lock, flags); *val = simple_strtoul(tmpbuf, NULL, 0); - spin_unlock_irqrestore(&oprofilefs_lock, flags); - return 0; + raw_spin_unlock_irqrestore(&oprofilefs_lock, flags); + return count; } @@ -91,85 +99,78 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) { - unsigned long *value = file->private_data; + unsigned long value; int retval; if (*offset) return -EINVAL; - retval = oprofilefs_ulong_from_user(value, buf, count); + retval = oprofilefs_ulong_from_user(&value, buf, count); + if (retval <= 0) + return retval; + retval = oprofile_set_ulong(file->private_data, value); if (retval) return retval; - return count; -} - -static int default_open(struct inode *inode, struct file *filp) -{ - if (inode->i_private) - filp->private_data = inode->i_private; - return 0; + return count; } static const struct file_operations ulong_fops = { .read = ulong_read_file, .write = ulong_write_file, - .open = default_open, + .open = simple_open, + .llseek = default_llseek, }; static const struct file_operations ulong_ro_fops = { .read = ulong_read_file, - .open = default_open, + .open = simple_open, + .llseek = default_llseek, }; -static struct dentry *__oprofilefs_create_file(struct super_block *sb, - struct dentry *root, char const *name, const struct file_operations *fops, - int perm) +static int __oprofilefs_create_file(struct dentry *root, char const *name, + const struct file_operations *fops, int perm, void *priv) { struct dentry *dentry; struct inode *inode; + mutex_lock(&root->d_inode->i_mutex); dentry = d_alloc_name(root, name); - if (!dentry) - return NULL; - inode = oprofilefs_get_inode(sb, S_IFREG | perm); + if (!dentry) { + mutex_unlock(&root->d_inode->i_mutex); + return -ENOMEM; + } + inode = oprofilefs_get_inode(root->d_sb, S_IFREG | perm); if (!inode) { dput(dentry); - return NULL; + mutex_unlock(&root->d_inode->i_mutex); + return -ENOMEM; } inode->i_fop = fops; + inode->i_private = priv; d_add(dentry, inode); - return dentry; + mutex_unlock(&root->d_inode->i_mutex); + return 0; } -int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root, +int oprofilefs_create_ulong(struct dentry *root, char const *name, unsigned long *val) { - struct dentry *d = __oprofilefs_create_file(sb, root, name, - &ulong_fops, 0644); - if (!d) - return -EFAULT; - - d->d_inode->i_private = val; - return 0; + return __oprofilefs_create_file(root, name, + &ulong_fops, 0644, val); } -int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root, +int oprofilefs_create_ro_ulong(struct dentry *root, char const *name, unsigned long *val) { - struct dentry *d = __oprofilefs_create_file(sb, root, name, - &ulong_ro_fops, 0444); - if (!d) - return -EFAULT; - - d->d_inode->i_private = val; - return 0; + return __oprofilefs_create_file(root, name, + &ulong_ro_fops, 0444, val); } @@ -182,58 +183,54 @@ static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t coun static const struct file_operations atomic_ro_fops = { .read = atomic_read_file, - .open = default_open, + .open = simple_open, + .llseek = default_llseek, }; -int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root, +int oprofilefs_create_ro_atomic(struct dentry *root, char const *name, atomic_t *val) { - struct dentry *d = __oprofilefs_create_file(sb, root, name, - &atomic_ro_fops, 0444); - if (!d) - return -EFAULT; - - d->d_inode->i_private = val; - return 0; + return __oprofilefs_create_file(root, name, + &atomic_ro_fops, 0444, val); } -int oprofilefs_create_file(struct super_block *sb, struct dentry *root, +int oprofilefs_create_file(struct dentry *root, char const *name, const struct file_operations *fops) { - if (!__oprofilefs_create_file(sb, root, name, fops, 0644)) - return -EFAULT; - return 0; + return __oprofilefs_create_file(root, name, fops, 0644, NULL); } -int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root, +int oprofilefs_create_file_perm(struct dentry *root, char const *name, const struct file_operations *fops, int perm) { - if (!__oprofilefs_create_file(sb, root, name, fops, perm)) - return -EFAULT; - return 0; + return __oprofilefs_create_file(root, name, fops, perm, NULL); } -struct dentry *oprofilefs_mkdir(struct super_block *sb, - struct dentry *root, char const *name) +struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name) { struct dentry *dentry; struct inode *inode; - dentry = d_alloc_name(root, name); - if (!dentry) + mutex_lock(&parent->d_inode->i_mutex); + dentry = d_alloc_name(parent, name); + if (!dentry) { + mutex_unlock(&parent->d_inode->i_mutex); return NULL; - inode = oprofilefs_get_inode(sb, S_IFDIR | 0755); + } + inode = oprofilefs_get_inode(parent->d_sb, S_IFDIR | 0755); if (!inode) { dput(dentry); + mutex_unlock(&parent->d_inode->i_mutex); return NULL; } inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; d_add(dentry, inode); + mutex_unlock(&parent->d_inode->i_mutex); return dentry; } @@ -241,7 +238,6 @@ struct dentry *oprofilefs_mkdir(struct super_block *sb, static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent) { struct inode *root_inode; - struct dentry *root_dentry; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; @@ -254,34 +250,31 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent) return -ENOMEM; root_inode->i_op = &simple_dir_inode_operations; root_inode->i_fop = &simple_dir_operations; - root_dentry = d_alloc_root(root_inode); - if (!root_dentry) { - iput(root_inode); + sb->s_root = d_make_root(root_inode); + if (!sb->s_root) return -ENOMEM; - } - - sb->s_root = root_dentry; - oprofile_create_files(sb, root_dentry); + oprofile_create_files(sb->s_root); // FIXME: verify kill_litter_super removes our dentries return 0; } -static int oprofilefs_get_sb(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data, struct vfsmount *mnt) +static struct dentry *oprofilefs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) { - return get_sb_single(fs_type, flags, data, oprofilefs_fill_super, mnt); + return mount_single(fs_type, flags, data, oprofilefs_fill_super); } static struct file_system_type oprofilefs_type = { .owner = THIS_MODULE, .name = "oprofilefs", - .get_sb = oprofilefs_get_sb, + .mount = oprofilefs_mount, .kill_sb = kill_litter_super, }; +MODULE_ALIAS_FS("oprofilefs"); int __init oprofilefs_register(void) diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c index 333f915568c..61be1d9c16c 100644 --- a/drivers/oprofile/timer_int.c +++ b/drivers/oprofile/timer_int.c @@ -13,34 +13,108 @@ #include <linux/oprofile.h> #include <linux/profile.h> #include <linux/init.h> +#include <linux/cpu.h> +#include <linux/hrtimer.h> +#include <asm/irq_regs.h> #include <asm/ptrace.h> #include "oprof.h" -static int timer_notify(struct pt_regs *regs) +static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer); +static int ctr_running; + +static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer) +{ + oprofile_add_sample(get_irq_regs(), 0); + hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC)); + return HRTIMER_RESTART; +} + +static void __oprofile_hrtimer_start(void *unused) +{ + struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer); + + if (!ctr_running) + return; + + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer->function = oprofile_hrtimer_notify; + + hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC), + HRTIMER_MODE_REL_PINNED); +} + +static int oprofile_hrtimer_start(void) { - oprofile_add_sample(regs, 0); + get_online_cpus(); + ctr_running = 1; + on_each_cpu(__oprofile_hrtimer_start, NULL, 1); + put_online_cpus(); return 0; } -static int timer_start(void) +static void __oprofile_hrtimer_stop(int cpu) { - return register_timer_hook(timer_notify); + struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu); + + if (!ctr_running) + return; + + hrtimer_cancel(hrtimer); } +static void oprofile_hrtimer_stop(void) +{ + int cpu; + + get_online_cpus(); + for_each_online_cpu(cpu) + __oprofile_hrtimer_stop(cpu); + ctr_running = 0; + put_online_cpus(); +} -static void timer_stop(void) +static int oprofile_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) { - unregister_timer_hook(timer_notify); + long cpu = (long) hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + smp_call_function_single(cpu, __oprofile_hrtimer_start, + NULL, 1); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + __oprofile_hrtimer_stop(cpu); + break; + } + return NOTIFY_OK; } +static struct notifier_block __refdata oprofile_cpu_notifier = { + .notifier_call = oprofile_cpu_notify, +}; -void __init oprofile_timer_init(struct oprofile_operations *ops) +static int oprofile_hrtimer_setup(void) { - ops->create_files = NULL; - ops->setup = NULL; - ops->shutdown = NULL; - ops->start = timer_start; - ops->stop = timer_stop; - ops->cpu_type = "timer"; + return register_hotcpu_notifier(&oprofile_cpu_notifier); +} + +static void oprofile_hrtimer_shutdown(void) +{ + unregister_hotcpu_notifier(&oprofile_cpu_notifier); +} + +int oprofile_timer_init(struct oprofile_operations *ops) +{ + ops->create_files = NULL; + ops->setup = oprofile_hrtimer_setup; + ops->shutdown = oprofile_hrtimer_shutdown; + ops->start = oprofile_hrtimer_start; + ops->stop = oprofile_hrtimer_stop; + ops->cpu_type = "timer"; + printk(KERN_INFO "oprofile: using timer interrupt.\n"); + return 0; } |
