diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-13 09:54:35 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-13 09:54:45 -0700 |
commit | e7f2f9918c0e97aa98ba147ca387e2c7238f0711 (patch) | |
tree | dd85d6d2907bffeda76b42ce55a445e3142fe738 /drivers | |
parent | 11a96d1820113fde0d55c3487b7da7a9031326b8 (diff) | |
parent | c00193f9f09f9b852249a66391985f585d066084 (diff) |
Merge phase #5 (misc) of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
Merges oprofile, timers/hpet, x86/traps, x86/time, and x86/core misc items.
* 'x86-core-v4-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (132 commits)
x86: change early_ioremap to use slots instead of nesting
x86: adjust dependencies for CONFIG_X86_CMOV
dumpstack: x86: various small unification steps, fix
x86: remove additional_cpus
x86: remove additional_cpus configurability
x86: improve UP kernel when CPU-hotplug and SMP is enabled
dumpstack: x86: various small unification steps
dumpstack: i386: make kstack= an early boot-param and add oops=panic
dumpstack: x86: use log_lvl and unify trace formatting
dumptrace: x86: consistently include loglevel, print stack switch
dumpstack: x86: add "end" parameter to valid_stack_ptr and print_context_stack
dumpstack: x86: make printk_address equal
dumpstack: x86: move die_nmi to dumpstack_32.c
traps: x86: finalize unification of traps.c
traps: x86: make traps_32.c and traps_64.c equal
traps: x86: various noop-changes preparing for unification of traps_xx.c
traps: x86_64: use task_pid_nr(tsk) instead of tsk->pid in do_general_protection
traps: i386: expand clear_mem_error and remove from mach_traps.h
traps: x86_64: make io_check_error equal to the one on i386
traps: i386: use preempt_conditional_sti/cli in do_int3
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/char/hpet.c | 159 | ||||
-rw-r--r-- | drivers/oprofile/buffer_sync.c | 209 | ||||
-rw-r--r-- | drivers/oprofile/cpu_buffer.c | 74 | ||||
-rw-r--r-- | drivers/oprofile/cpu_buffer.h | 2 |
4 files changed, 306 insertions, 138 deletions
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index b3f5dbc6d88..f3cfb4c7612 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -53,6 +53,11 @@ #define HPET_RANGE_SIZE 1024 /* from HPET spec */ + +/* WARNING -- don't get confused. These macros are never used + * to write the (single) counter, and rarely to read it. + * They're badly named; to fix, someday. + */ #if BITS_PER_LONG == 64 #define write_counter(V, MC) writeq(V, MC) #define read_counter(MC) readq(MC) @@ -77,7 +82,7 @@ static struct clocksource clocksource_hpet = { .rating = 250, .read = read_hpet, .mask = CLOCKSOURCE_MASK(64), - .mult = 0, /*to be caluclated*/ + .mult = 0, /* to be calculated */ .shift = 10, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -86,8 +91,6 @@ static struct clocksource *hpet_clocksource; /* A lock for concurrent access by app and isr hpet activity. */ static DEFINE_SPINLOCK(hpet_lock); -/* A lock for concurrent intermodule access to hpet and isr hpet activity. */ -static DEFINE_SPINLOCK(hpet_task_lock); #define HPET_DEV_NAME (7) @@ -99,7 +102,6 @@ struct hpet_dev { unsigned long hd_irqdata; wait_queue_head_t hd_waitqueue; struct fasync_struct *hd_async_queue; - struct hpet_task *hd_task; unsigned int hd_flags; unsigned int hd_irq; unsigned int hd_hdwirq; @@ -173,11 +175,6 @@ static irqreturn_t hpet_interrupt(int irq, void *data) writel(isr, &devp->hd_hpet->hpet_isr); spin_unlock(&hpet_lock); - spin_lock(&hpet_task_lock); - if (devp->hd_task) - devp->hd_task->ht_func(devp->hd_task->ht_data); - spin_unlock(&hpet_task_lock); - wake_up_interruptible(&devp->hd_waitqueue); kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN); @@ -185,6 +182,67 @@ static irqreturn_t hpet_interrupt(int irq, void *data) return IRQ_HANDLED; } +static void hpet_timer_set_irq(struct hpet_dev *devp) +{ + unsigned long v; + int irq, gsi; + struct hpet_timer __iomem *timer; + + spin_lock_irq(&hpet_lock); + if (devp->hd_hdwirq) { + spin_unlock_irq(&hpet_lock); + return; + } + + timer = devp->hd_timer; + + /* we prefer level triggered mode */ + v = readl(&timer->hpet_config); + if (!(v & Tn_INT_TYPE_CNF_MASK)) { + v |= Tn_INT_TYPE_CNF_MASK; + writel(v, &timer->hpet_config); + } + spin_unlock_irq(&hpet_lock); + + v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >> + Tn_INT_ROUTE_CAP_SHIFT; + + /* + * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by + * legacy device. In IO APIC mode, we skip all the legacy IRQS. + */ + if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) + v &= ~0xf3df; + else + v &= ~0xffff; + + for (irq = find_first_bit(&v, HPET_MAX_IRQ); irq < HPET_MAX_IRQ; + irq = find_next_bit(&v, HPET_MAX_IRQ, 1 + irq)) { + + if (irq >= NR_IRQS) { + irq = HPET_MAX_IRQ; + break; + } + + gsi = acpi_register_gsi(irq, ACPI_LEVEL_SENSITIVE, + ACPI_ACTIVE_LOW); + if (gsi > 0) + break; + + /* FIXME: Setup interrupt source table */ + } + + if (irq < HPET_MAX_IRQ) { + spin_lock_irq(&hpet_lock); + v = readl(&timer->hpet_config); + v |= irq << Tn_INT_ROUTE_CNF_SHIFT; + writel(v, &timer->hpet_config); + devp->hd_hdwirq = gsi; + spin_unlock_irq(&hpet_lock); + } + return; +} + static int hpet_open(struct inode *inode, struct file *file) { struct hpet_dev *devp; @@ -199,8 +257,7 @@ static int hpet_open(struct inode *inode, struct file *file) for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) for (i = 0; i < hpetp->hp_ntimer; i++) - if (hpetp->hp_dev[i].hd_flags & HPET_OPEN - || hpetp->hp_dev[i].hd_task) + if (hpetp->hp_dev[i].hd_flags & HPET_OPEN) continue; else { devp = &hpetp->hp_dev[i]; @@ -219,6 +276,8 @@ static int hpet_open(struct inode *inode, struct file *file) spin_unlock_irq(&hpet_lock); unlock_kernel(); + hpet_timer_set_irq(devp); + return 0; } @@ -441,7 +500,11 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp) devp->hd_irq = irq; t = devp->hd_ireqfreq; v = readq(&timer->hpet_config); - g = v | Tn_INT_ENB_CNF_MASK; + + /* 64-bit comparators are not yet supported through the ioctls, + * so force this into 32-bit mode if it supports both modes + */ + g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK; if (devp->hd_flags & HPET_PERIODIC) { write_counter(t, &timer->hpet_compare); @@ -451,6 +514,12 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp) v |= Tn_VAL_SET_CNF_MASK; writeq(v, &timer->hpet_config); local_irq_save(flags); + + /* NOTE: what we modify here is a hidden accumulator + * register supported by periodic-capable comparators. + * We never want to modify the (single) counter; that + * would affect all the comparators. + */ m = read_counter(&hpet->hpet_mc); write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); } else { @@ -604,57 +673,6 @@ static int hpet_is_known(struct hpet_data *hdp) return 0; } -static inline int hpet_tpcheck(struct hpet_task *tp) -{ - struct hpet_dev *devp; - struct hpets *hpetp; - - devp = tp->ht_opaque; - - if (!devp) - return -ENXIO; - - for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) - if (devp >= hpetp->hp_dev - && devp < (hpetp->hp_dev + hpetp->hp_ntimer) - && devp->hd_hpet == hpetp->hp_hpet) - return 0; - - return -ENXIO; -} - -#if 0 -int hpet_unregister(struct hpet_task *tp) -{ - struct hpet_dev *devp; - struct hpet_timer __iomem *timer; - int err; - - if ((err = hpet_tpcheck(tp))) - return err; - - spin_lock_irq(&hpet_task_lock); - spin_lock(&hpet_lock); - - devp = tp->ht_opaque; - if (devp->hd_task != tp) { - spin_unlock(&hpet_lock); - spin_unlock_irq(&hpet_task_lock); - return -ENXIO; - } - - timer = devp->hd_timer; - writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), - &timer->hpet_config); - devp->hd_flags &= ~(HPET_IE | HPET_PERIODIC); - devp->hd_task = NULL; - spin_unlock(&hpet_lock); - spin_unlock_irq(&hpet_task_lock); - - return 0; -} -#endif /* 0 */ - static ctl_table hpet_table[] = { { .ctl_name = CTL_UNNUMBERED, @@ -746,6 +764,7 @@ int hpet_alloc(struct hpet_data *hdp) static struct hpets *last = NULL; unsigned long period; unsigned long long temp; + u32 remainder; /* * hpet_alloc can be called by platform dependent code. @@ -809,9 +828,13 @@ int hpet_alloc(struct hpet_data *hdp) printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); printk("\n"); - printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n", - hpetp->hp_which, hpetp->hp_ntimer, - cap & HPET_COUNTER_SIZE_MASK ? 64 : 32, hpetp->hp_tick_freq); + temp = hpetp->hp_tick_freq; + remainder = do_div(temp, 1000000); + printk(KERN_INFO + "hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n", + hpetp->hp_which, hpetp->hp_ntimer, + cap & HPET_COUNTER_SIZE_MASK ? 64 : 32, + (unsigned) temp, remainder); mcfg = readq(&hpet->hpet_config); if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) { @@ -874,8 +897,6 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) hdp->hd_address = ioremap(addr.minimum, addr.address_length); if (hpet_is_known(hdp)) { - printk(KERN_DEBUG "%s: 0x%lx is busy\n", - __func__, hdp->hd_phys_address); iounmap(hdp->hd_address); return AE_ALREADY_EXISTS; } @@ -891,8 +912,6 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) HPET_RANGE_SIZE); if (hpet_is_known(hdp)) { - printk(KERN_DEBUG "%s: 0x%lx is busy\n", - __func__, hdp->hd_phys_address); iounmap(hdp->hd_address); return AE_ALREADY_EXISTS; } diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 9304c455507..ed982273fb8 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -5,6 +5,7 @@ * @remark Read the file COPYING * * @author John Levon <levon@movementarian.org> + * @author Barry Kasindorf * * This is the core of the buffer management. Each * CPU buffer is processed and entered into the @@ -33,7 +34,7 @@ #include "event_buffer.h" #include "cpu_buffer.h" #include "buffer_sync.h" - + static LIST_HEAD(dying_tasks); static LIST_HEAD(dead_tasks); static cpumask_t marked_cpus = CPU_MASK_NONE; @@ -48,10 +49,11 @@ static void process_task_mortuary(void); * Can be invoked from softirq via RCU callback due to * call_rcu() of the task struct, hence the _irqsave. */ -static int task_free_notify(struct notifier_block * self, unsigned long val, void * data) +static int +task_free_notify(struct notifier_block *self, unsigned long val, void *data) { unsigned long flags; - struct task_struct * task = data; + struct task_struct *task = data; spin_lock_irqsave(&task_mortuary, flags); list_add(&task->tasks, &dying_tasks); spin_unlock_irqrestore(&task_mortuary, flags); @@ -62,13 +64,14 @@ static int task_free_notify(struct notifier_block * self, unsigned long val, voi /* The task is on its way out. A sync of the buffer means we can catch * any remaining samples for this task. */ -static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data) +static int +task_exit_notify(struct notifier_block *self, unsigned long val, void *data) { /* To avoid latency problems, we only process the current CPU, * hoping that most samples for the task are on this CPU */ sync_buffer(raw_smp_processor_id()); - return 0; + return 0; } @@ -77,11 +80,12 @@ static int task_exit_notify(struct notifier_block * self, unsigned long val, voi * we don't lose any. This does not have to be exact, it's a QoI issue * only. */ -static int munmap_notify(struct notifier_block * self, unsigned long val, void * data) +static int +munmap_notify(struct notifier_block *self, unsigned long val, void *data) { unsigned long addr = (unsigned long)data; - struct mm_struct * mm = current->mm; - struct vm_area_struct * mpnt; + struct mm_struct *mm = current->mm; + struct vm_area_struct *mpnt; down_read(&mm->mmap_sem); @@ -99,11 +103,12 @@ static int munmap_notify(struct notifier_block * self, unsigned long val, void * return 0; } - + /* We need to be told about new modules so we don't attribute to a previously * loaded module, or drop the samples on the floor. */ -static int module_load_notify(struct notifier_block * self, unsigned long val, void * data) +static int +module_load_notify(struct notifier_block *self, unsigned long val, void *data) { #ifdef CONFIG_MODULES if (val != MODULE_STATE_COMING) @@ -118,7 +123,7 @@ static int module_load_notify(struct notifier_block * self, unsigned long val, v return 0; } - + static struct notifier_block task_free_nb = { .notifier_call = task_free_notify, }; @@ -135,7 +140,7 @@ static struct notifier_block module_load_nb = { .notifier_call = module_load_notify, }; - + static void end_sync(void) { end_cpu_work(); @@ -208,14 +213,14 @@ static inline unsigned long fast_get_dcookie(struct path *path) * not strictly necessary but allows oprofile to associate * shared-library samples with particular applications */ -static unsigned long get_exec_dcookie(struct mm_struct * mm) +static unsigned long get_exec_dcookie(struct mm_struct *mm) { unsigned long cookie = NO_COOKIE; - struct vm_area_struct * vma; - + struct vm_area_struct *vma; + if (!mm) goto out; - + for (vma = mm->mmap; vma; vma = vma->vm_next) { if (!vma->vm_file) continue; @@ -235,13 +240,14 @@ out: * sure to do this lookup before a mm->mmap modification happens so * we don't lose track. */ -static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset) +static unsigned long +lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset) { unsigned long cookie = NO_COOKIE; - struct vm_area_struct * vma; + struct vm_area_struct *vma; for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { - + if (addr < vma->vm_start || addr >= vma->vm_end) continue; @@ -263,9 +269,20 @@ static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, o return cookie; } +static void increment_tail(struct oprofile_cpu_buffer *b) +{ + unsigned long new_tail = b->tail_pos + 1; + + rmb(); /* be sure fifo pointers are synchromized */ + + if (new_tail < b->buffer_size) + b->tail_pos = new_tail; + else + b->tail_pos = 0; +} static unsigned long last_cookie = INVALID_COOKIE; - + static void add_cpu_switch(int i) { add_event_entry(ESCAPE_CODE); @@ -278,16 +295,16 @@ static void add_kernel_ctx_switch(unsigned int in_kernel) { add_event_entry(ESCAPE_CODE); if (in_kernel) - add_event_entry(KERNEL_ENTER_SWITCH_CODE); + add_event_entry(KERNEL_ENTER_SWITCH_CODE); else - add_event_entry(KERNEL_EXIT_SWITCH_CODE); + add_event_entry(KERNEL_EXIT_SWITCH_CODE); } - + static void -add_user_ctx_switch(struct task_struct const * task, unsigned long cookie) +add_user_ctx_switch(struct task_struct const *task, unsigned long cookie) { add_event_entry(ESCAPE_CODE); - add_event_entry(CTX_SWITCH_CODE); + add_event_entry(CTX_SWITCH_CODE); add_event_entry(task->pid); add_event_entry(cookie); /* Another code for daemon back-compat */ @@ -296,7 +313,7 @@ add_user_ctx_switch(struct task_struct const * task, unsigned long cookie) add_event_entry(task->tgid); } - + static void add_cookie_switch(unsigned long cookie) { add_event_entry(ESCAPE_CODE); @@ -304,13 +321,78 @@ static void add_cookie_switch(unsigned long cookie) add_event_entry(cookie); } - + static void add_trace_begin(void) { add_event_entry(ESCAPE_CODE); add_event_entry(TRACE_BEGIN_CODE); } +#ifdef CONFIG_OPROFILE_IBS + +#define IBS_FETCH_CODE_SIZE 2 +#define IBS_OP_CODE_SIZE 5 +#define IBS_EIP(offset) \ + (((struct op_sample *)&cpu_buf->buffer[(offset)])->eip) +#define IBS_EVENT(offset) \ + (((struct op_sample *)&cpu_buf->buffer[(offset)])->event) + +/* + * Add IBS fetch and op entries to event buffer + */ +static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code, + int in_kernel, struct mm_struct *mm) +{ + unsigned long rip; + int i, count; + unsigned long ibs_cookie = 0; + off_t offset; + + increment_tail(cpu_buf); /* move to RIP entry */ + + rip = IBS_EIP(cpu_buf->tail_pos); + +#ifdef __LP64__ + rip += IBS_EVENT(cpu_buf->tail_pos) << 32; +#endif + + if (mm) { + ibs_cookie = lookup_dcookie(mm, rip, &offset); + + if (ibs_cookie == NO_COOKIE) + offset = rip; + if (ibs_cookie == INVALID_COOKIE) { + atomic_inc(&oprofile_stats.sample_lost_no_mapping); + offset = rip; + } + if (ibs_cookie != last_cookie) { + add_cookie_switch(ibs_cookie); + last_cookie = ibs_cookie; + } + } else + offset = rip; + + add_event_entry(ESCAPE_CODE); + add_event_entry(code); + add_event_entry(offset); /* Offset from Dcookie */ + + /* we send the Dcookie offset, but send the raw Linear Add also*/ + add_event_entry(IBS_EIP(cpu_buf->tail_pos)); + add_event_entry(IBS_EVENT(cpu_buf->tail_pos)); + + if (code == IBS_FETCH_CODE) + count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/ + else + count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/ + + for (i = 0; i < count; i++) { + increment_tail(cpu_buf); + add_event_entry(IBS_EIP(cpu_buf->tail_pos)); + add_event_entry(IBS_EVENT(cpu_buf->tail_pos)); + } +} + +#endif static void add_sample_entry(unsigned long offset, unsigned long event) { @@ -319,13 +401,13 @@ static void add_sample_entry(unsigned long offset, unsigned long event) } -static int add_us_sample(struct mm_struct * mm, struct op_sample * s) +static int add_us_sample(struct mm_struct *mm, struct op_sample *s) { unsigned long cookie; off_t offset; - - cookie = lookup_dcookie(mm, s->eip, &offset); - + + cookie = lookup_dcookie(mm, s->eip, &offset); + if (cookie == INVALID_COOKIE) { atomic_inc(&oprofile_stats.sample_lost_no_mapping); return 0; @@ -341,13 +423,13 @@ static int add_us_sample(struct mm_struct * mm, struct op_sample * s) return 1; } - + /* Add a sample to the global event buffer. If possible the * sample is converted into a persistent dentry/offset pair * for later lookup from userspace. */ static int -add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel) +add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) { if (in_kernel) { add_sample_entry(s->eip, s->event); @@ -359,9 +441,9 @@ add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel) } return 0; } - -static void release_mm(struct mm_struct * mm) + +static void release_mm(struct mm_struct *mm) { if (!mm) return; @@ -370,9 +452,9 @@ static void release_mm(struct mm_struct * mm) } -static struct mm_struct * take_tasks_mm(struct task_struct * task) +static struct mm_struct *take_tasks_mm(struct task_struct *task) { - struct mm_struct * mm = get_task_mm(task); + struct mm_struct *mm = get_task_mm(task); if (mm) down_read(&mm->mmap_sem); return mm; @@ -383,10 +465,10 @@ static inline int is_code(unsigned long val) { return val == ESCAPE_CODE; } - + /* "acquire" as many cpu buffer slots as we can */ -static unsigned long get_slots(struct oprofile_cpu_buffer * b) +static unsigned long get_slots(struct oprofile_cpu_buffer *b) { unsigned long head = b->head_pos; unsigned long tail = b->tail_pos; @@ -412,19 +494,6 @@ static unsigned long get_slots(struct oprofile_cpu_buffer * b) } -static void increment_tail(struct oprofile_cpu_buffer * b) -{ - unsigned long new_tail = b->tail_pos + 1; - - rmb(); - - if (new_tail < b->buffer_size) - b->tail_pos = new_tail; - else - b->tail_pos = 0; -} - - /* Move tasks along towards death. Any tasks on dead_tasks * will definitely have no remaining references in any * CPU buffers at this point, because we use two lists, @@ -435,8 +504,8 @@ static void process_task_mortuary(void) { unsigned long flags; LIST_HEAD(local_dead_tasks); - struct task_struct * task; - struct task_struct * ttask; + struct task_struct *task; + struct task_struct *ttask; spin_lock_irqsave(&task_mortuary, flags); @@ -493,7 +562,7 @@ void sync_buffer(int cpu) { struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); struct mm_struct *mm = NULL; - struct task_struct * new; + struct task_struct *new; unsigned long cookie = 0; int in_kernel = 1; unsigned int i; @@ -501,7 +570,7 @@ void sync_buffer(int cpu) unsigned long available; mutex_lock(&buffer_mutex); - + add_cpu_switch(cpu); /* Remember, only we can modify tail_pos */ @@ -509,8 +578,8 @@ void sync_buffer(int cpu) available = get_slots(cpu_buf); for (i = 0; i < available; ++i) { - struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos]; - + struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos]; + if (is_code(s->eip)) { if (s->event <= CPU_IS_KERNEL) { /* kernel/userspace switch */ @@ -521,8 +590,18 @@ void sync_buffer(int cpu) } else if (s->event == CPU_TRACE_BEGIN) { state = sb_bt_start; add_trace_begin(); +#ifdef CONFIG_OPROFILE_IBS + } else if (s->event == IBS_FETCH_BEGIN) { + state = sb_bt_start; + add_ibs_begin(cpu_buf, + IBS_FETCH_CODE, in_kernel, mm); + } else if (s->event == IBS_OP_BEGIN) { + state = sb_bt_start; + add_ibs_begin(cpu_buf, + IBS_OP_CODE, in_kernel, mm); +#endif } else { - struct mm_struct * oldmm = mm; + struct mm_struct *oldmm = mm; /* userspace context switch */ new = (struct task_struct *)s->event; @@ -533,13 +612,11 @@ void sync_buffer(int cpu) cookie = get_exec_dcookie(mm); add_user_ctx_switch(new, cookie); } - } else { - if (state >= sb_bt_start && - !add_sample(mm, s, in_kernel)) { - if (state == sb_bt_start) { - state = sb_bt_ignore; - atomic_inc(&oprofile_stats.bt_lost_no_mapping); - } + } else if (state >= sb_bt_start && + !add_sample(mm, s, in_kernel)) { + if (state == sb_bt_start) { + state = sb_bt_ignore; + atomic_inc(&oprofile_stats.bt_lost_no_mapping); } } diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 7ba78e6d210..e1bd5a937f6 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -5,6 +5,7 @@ * @remark Read the file COPYING * * @author John Levon <levon@movementarian.org> + * @author Barry Kasindorf <barry.kasindorf@amd.com> * * Each CPU has a local buffer that stores PC value/event * pairs. We also log context switches when we notice them. @@ -209,7 +210,7 @@ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, return 1; } -static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf) +static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) { if (nr_available_slots(cpu_buf) < 4) { cpu_buf->sample_lost_overflow++; @@ -254,6 +255,75 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) oprofile_add_ext_sample(pc, regs, event, is_kernel); } +#ifdef CONFIG_OPROFILE_IBS + +#define MAX_IBS_SAMPLE_SIZE 14 +static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf, + unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code) +{ + struct task_struct *task; + + cpu_buf->sample_received++; + + if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { + cpu_buf->sample_lost_overflow++; + return 0; + } + + is_kernel = !!is_kernel; + + /* notice a switch from user->kernel or vice versa */ + if (cpu_buf->last_is_kernel != is_kernel) { + cpu_buf->last_is_kernel = is_kernel; + add_code(cpu_buf, is_kernel); + } + + /* notice a task switch */ + if (!is_kernel) { + task = current; + + if (cpu_buf->last_task != task) { + cpu_buf->last_task = task; + add_code(cpu_buf, (unsigned long)task); + } + } + + add_code(cpu_buf, ibs_code); + add_sample(cpu_buf, ibs[0], ibs[1]); + add_sample(cpu_buf, ibs[2], ibs[3]); + add_sample(cpu_buf, ibs[4], ibs[5]); + + if (ibs_code == IBS_OP_BEGIN) { + add_sample(cpu_buf, ibs[6], ibs[7]); + add_sample(cpu_buf, ibs[8], ibs[9]); + add_sample(cpu_buf, ibs[10], ibs[11]); + } + + return 1; +} + +void oprofile_add_ibs_sample(struct pt_regs *const regs, + unsigned int * const ibs_sample, u8 code) +{ + int is_kernel = !user_mode(regs); + unsigned long pc = profile_pc(regs); + + struct oprofile_cpu_buffer *cpu_buf = + &per_cpu(cpu_buffer, smp_processor_id()); + + if (!backtrace_depth) { + log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code); + return; + } + + /* if log_sample() fails we can't backtrace since we lost the source + * of this event */ + if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code)) + oprofile_ops.backtrace(regs, backtrace_depth); +} + +#endif + void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); @@ -296,7 +366,7 @@ static void wq_sync_buffer(struct work_struct *work) struct oprofile_cpu_buffer * b = container_of(work, struct oprofile_cpu_buffer, work.work); if (b->cpu != smp_processor_id()) { - printk("WQ on CPU%d, prefer CPU%d\n", + printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", smp_processor_id(), b->cpu); } sync_buffer(b->cpu); diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index c3e366b5226..9c44d004da6 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h @@ -55,5 +55,7 @@ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf); /* transient events for the CPU buffer -> event buffer */ #define CPU_IS_KERNEL 1 #define CPU_TRACE_BEGIN 2 +#define IBS_FETCH_BEGIN 3 +#define IBS_OP_BEGIN 4 #endif /* OPROFILE_CPU_BUFFER_H */ |