aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/oprofile
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/oprofile')
-rw-r--r--arch/powerpc/oprofile/Makefile8
-rw-r--r--arch/powerpc/oprofile/backtrace.c2
-rw-r--r--arch/powerpc/oprofile/cell/pr_util.h26
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c65
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c254
-rw-r--r--arch/powerpc/oprofile/cell/vma_map.c5
-rw-r--r--arch/powerpc/oprofile/common.c54
-rw-r--r--arch/powerpc/oprofile/op_model_7450.c23
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c776
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_emb.c61
-rw-r--r--arch/powerpc/oprofile/op_model_pa6t.c7
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c147
-rw-r--r--arch/powerpc/oprofile/op_model_rs64.c2
13 files changed, 1158 insertions, 272 deletions
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
index 2ef6b0dddd8..751ec7bd501 100644
--- a/arch/powerpc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -1,6 +1,6 @@
-ifeq ($(CONFIG_PPC64),y)
-EXTRA_CFLAGS += -mno-minimal-toc
-endif
+subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+
+ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-$(CONFIG_OPROFILE) += oprofile.o
@@ -14,6 +14,6 @@ oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
oprofile-$(CONFIG_OPROFILE_CELL) += op_model_cell.o \
cell/spu_profiler.o cell/vma_map.o \
cell/spu_task_sync.o
-oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o op_model_pa6t.o
+oprofile-$(CONFIG_PPC_BOOK3S_64) += op_model_rs64.o op_model_power4.o op_model_pa6t.o
oprofile-$(CONFIG_FSL_EMB_PERFMON) += op_model_fsl_emb.o
oprofile-$(CONFIG_6xx) += op_model_7450.o
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c
index b4278cfd1f8..f75301f2c85 100644
--- a/arch/powerpc/oprofile/backtrace.c
+++ b/arch/powerpc/oprofile/backtrace.c
@@ -105,7 +105,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
}
} else {
#ifdef CONFIG_PPC64
- if (!test_thread_flag(TIF_32BIT)) {
+ if (!is_32bit_task()) {
while (depth--) {
sp = user_getsp64(sp, first_frame);
if (!sp)
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h
index 22e4e8d4eb2..964b93974d8 100644
--- a/arch/powerpc/oprofile/cell/pr_util.h
+++ b/arch/powerpc/oprofile/cell/pr_util.h
@@ -24,6 +24,15 @@
#define SKIP_GENERIC_SYNC 0
#define SYNC_START_ERROR -1
#define DO_GENERIC_SYNC 1
+#define SPUS_PER_NODE 8
+#define DEFAULT_TIMER_EXPIRE (HZ / 10)
+
+extern struct delayed_work spu_work;
+extern int spu_prof_running;
+
+#define TRACE_ARRAY_SIZE 1024
+
+extern spinlock_t oprof_spu_smpl_arry_lck;
struct spu_overlay_info { /* map of sections within an SPU overlay */
unsigned int vma; /* SPU virtual memory address from elf */
@@ -62,11 +71,19 @@ struct vma_to_fileoffset_map { /* map of sections within an SPU program */
};
+struct spu_buffer {
+ int last_guard_val;
+ int ctx_sw_seen;
+ unsigned long *buff;
+ unsigned int head, tail;
+};
+
+
/* The three functions below are for maintaining and accessing
* the vma-to-fileoffset map.
*/
struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu,
- u64 objectid);
+ unsigned long objectid);
unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map,
unsigned int vma, const struct spu *aSpu,
int *grd_val);
@@ -76,10 +93,11 @@ void vma_map_free(struct vma_to_fileoffset_map *map);
* Entry point for SPU profiling.
* cycles_reset is the SPU_CYCLES count value specified by the user.
*/
-int start_spu_profiling(unsigned int cycles_reset);
-
-void stop_spu_profiling(void);
+int start_spu_profiling_cycles(unsigned int cycles_reset);
+void start_spu_profiling_events(void);
+void stop_spu_profiling_cycles(void);
+void stop_spu_profiling_events(void);
/* add the necessary profiling hooks */
int spu_sync_start(void);
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
index 380d7e21753..b129d007e7f 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -16,24 +16,34 @@
#include <linux/smp.h>
#include <linux/slab.h>
#include <asm/cell-pmu.h>
+#include <asm/time.h>
#include "pr_util.h"
-#define TRACE_ARRAY_SIZE 1024
#define SCALE_SHIFT 14
static u32 *samples;
-static int spu_prof_running;
+/* spu_prof_running is a flag used to indicate if spu profiling is enabled
+ * or not. It is set by the routines start_spu_profiling_cycles() and
+ * start_spu_profiling_events(). The flag is cleared by the routines
+ * stop_spu_profiling_cycles() and stop_spu_profiling_events(). These
+ * routines are called via global_start() and global_stop() which are called in
+ * op_powerpc_start() and op_powerpc_stop(). These routines are called once
+ * per system as a result of the user starting/stopping oprofile. Hence, only
+ * one CPU per user at a time will be changing the value of spu_prof_running.
+ * In general, OProfile does not protect against multiple users trying to run
+ * OProfile at a time.
+ */
+int spu_prof_running;
static unsigned int profiling_interval;
#define NUM_SPU_BITS_TRBUF 16
#define SPUS_PER_TB_ENTRY 4
-#define SPUS_PER_NODE 8
#define SPU_PC_MASK 0xFFFF
-static DEFINE_SPINLOCK(sample_array_lock);
-unsigned long sample_array_lock_flags;
+DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
+unsigned long oprof_spu_smpl_arry_lck_flags;
void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
{
@@ -50,7 +60,7 @@ void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_rese
* of precision. This is close enough for the purpose at hand.
*
* The value of the timeout should be small enough that the hw
- * trace buffer will not get more then about 1/3 full for the
+ * trace buffer will not get more than about 1/3 full for the
* maximum user specified (the LFSR value) hw sampling frequency.
* This is to ensure the trace buffer will never fill even if the
* kernel thread scheduling varies under a heavy system load.
@@ -146,13 +156,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
* sample array must be loaded and then processed for a given
* cpu. The sample array is not per cpu.
*/
- spin_lock_irqsave(&sample_array_lock,
- sample_array_lock_flags);
+ spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
+ oprof_spu_smpl_arry_lck_flags);
num_samples = cell_spu_pc_collection(cpu);
if (num_samples == 0) {
- spin_unlock_irqrestore(&sample_array_lock,
- sample_array_lock_flags);
+ spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+ oprof_spu_smpl_arry_lck_flags);
continue;
}
@@ -163,8 +173,8 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
num_samples);
}
- spin_unlock_irqrestore(&sample_array_lock,
- sample_array_lock_flags);
+ spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+ oprof_spu_smpl_arry_lck_flags);
}
smp_wmb(); /* insure spu event buffer updates are written */
@@ -183,20 +193,20 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
static struct hrtimer timer;
/*
- * Entry point for SPU profiling.
+ * Entry point for SPU cycle profiling.
* NOTE: SPU profiling is done system-wide, not per-CPU.
*
* cycles_reset is the count value specified by the user when
* setting up OProfile to count SPU_CYCLES.
*/
-int start_spu_profiling(unsigned int cycles_reset)
+int start_spu_profiling_cycles(unsigned int cycles_reset)
{
ktime_t kt;
pr_debug("timer resolution: %lu\n", TICK_NSEC);
kt = ktime_set(0, profiling_interval);
hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- timer.expires = kt;
+ hrtimer_set_expires(&timer, kt);
timer.function = profile_spus;
/* Allocate arrays for collecting SPU PC samples */
@@ -208,14 +218,35 @@ int start_spu_profiling(unsigned int cycles_reset)
spu_prof_running = 1;
hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
+ schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
return 0;
}
-void stop_spu_profiling(void)
+/*
+ * Entry point for SPU event profiling.
+ * NOTE: SPU profiling is done system-wide, not per-CPU.
+ *
+ * cycles_reset is the count value specified by the user when
+ * setting up OProfile to count SPU_CYCLES.
+ */
+void start_spu_profiling_events(void)
+{
+ spu_prof_running = 1;
+ schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
+
+ return;
+}
+
+void stop_spu_profiling_cycles(void)
{
spu_prof_running = 0;
hrtimer_cancel(&timer);
kfree(samples);
- pr_debug("SPU_PROF: stop_spu_profiling issued\n");
+ pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
+}
+
+void stop_spu_profiling_events(void)
+{
+ spu_prof_running = 0;
}
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index 2a9b4a04932..28f1af2db1f 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -26,6 +26,7 @@
#include <linux/notifier.h>
#include <linux/numa.h>
#include <linux/oprofile.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include "pr_util.h"
@@ -35,7 +36,102 @@ static DEFINE_SPINLOCK(buffer_lock);
static DEFINE_SPINLOCK(cache_lock);
static int num_spu_nodes;
int spu_prof_num_nodes;
-int last_guard_val[MAX_NUMNODES * 8];
+
+struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE];
+struct delayed_work spu_work;
+static unsigned max_spu_buff;
+
+static void spu_buff_add(unsigned long int value, int spu)
+{
+ /* spu buff is a circular buffer. Add entries to the
+ * head. Head is the index to store the next value.
+ * The buffer is full when there is one available entry
+ * in the queue, i.e. head and tail can't be equal.
+ * That way we can tell the difference between the
+ * buffer being full versus empty.
+ *
+ * ASSUPTION: the buffer_lock is held when this function
+ * is called to lock the buffer, head and tail.
+ */
+ int full = 1;
+
+ if (spu_buff[spu].head >= spu_buff[spu].tail) {
+ if ((spu_buff[spu].head - spu_buff[spu].tail)
+ < (max_spu_buff - 1))
+ full = 0;
+
+ } else if (spu_buff[spu].tail > spu_buff[spu].head) {
+ if ((spu_buff[spu].tail - spu_buff[spu].head)
+ > 1)
+ full = 0;
+ }
+
+ if (!full) {
+ spu_buff[spu].buff[spu_buff[spu].head] = value;
+ spu_buff[spu].head++;
+
+ if (spu_buff[spu].head >= max_spu_buff)
+ spu_buff[spu].head = 0;
+ } else {
+ /* From the user's perspective make the SPU buffer
+ * size management/overflow look like we are using
+ * per cpu buffers. The user uses the same
+ * per cpu parameter to adjust the SPU buffer size.
+ * Increment the sample_lost_overflow to inform
+ * the user the buffer size needs to be increased.
+ */
+ oprofile_cpu_buffer_inc_smpl_lost();
+ }
+}
+
+/* This function copies the per SPU buffers to the
+ * OProfile kernel buffer.
+ */
+void sync_spu_buff(void)
+{
+ int spu;
+ unsigned long flags;
+ int curr_head;
+
+ for (spu = 0; spu < num_spu_nodes; spu++) {
+ /* In case there was an issue and the buffer didn't
+ * get created skip it.
+ */
+ if (spu_buff[spu].buff == NULL)
+ continue;
+
+ /* Hold the lock to make sure the head/tail
+ * doesn't change while spu_buff_add() is
+ * deciding if the buffer is full or not.
+ * Being a little paranoid.
+ */
+ spin_lock_irqsave(&buffer_lock, flags);
+ curr_head = spu_buff[spu].head;
+ spin_unlock_irqrestore(&buffer_lock, flags);
+
+ /* Transfer the current contents to the kernel buffer.
+ * data can still be added to the head of the buffer.
+ */
+ oprofile_put_buff(spu_buff[spu].buff,
+ spu_buff[spu].tail,
+ curr_head, max_spu_buff);
+
+ spin_lock_irqsave(&buffer_lock, flags);
+ spu_buff[spu].tail = curr_head;
+ spin_unlock_irqrestore(&buffer_lock, flags);
+ }
+
+}
+
+static void wq_sync_spu_buff(struct work_struct *work)
+{
+ /* move data from spu buffers to kernel buffer */
+ sync_spu_buff();
+
+ /* only reschedule if profiling is not done */
+ if (spu_prof_running)
+ schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
+}
/* Container for caching information about an active SPU task. */
struct cached_info {
@@ -202,13 +298,13 @@ static inline unsigned long fast_get_dcookie(struct path *path)
{
unsigned long cookie;
- if (path->dentry->d_cookie)
+ if (path->dentry->d_flags & DCACHE_COOKIE)
return (unsigned long)path->dentry;
get_dcookie(path, &cookie);
return cookie;
}
-/* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
+/* Look up the dcookie for the task's mm->exe_file,
* which corresponds loosely to "application name". Also, determine
* the offset for the SPU ELF object. If computed offset is
* non-zero, it implies an embedded SPU object; otherwise, it's a
@@ -225,7 +321,6 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
{
unsigned long app_cookie = 0;
unsigned int my_offset = 0;
- struct file *app = NULL;
struct vm_area_struct *vma;
struct mm_struct *mm = spu->mm;
@@ -234,16 +329,10 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
down_read(&mm->mmap_sem);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (!vma->vm_file)
- continue;
- if (!(vma->vm_flags & VM_EXECUTABLE))
- continue;
- app_cookie = fast_get_dcookie(&vma->vm_file->f_path);
+ if (mm->exe_file) {
+ app_cookie = fast_get_dcookie(&mm->exe_file->f_path);
pr_debug("got dcookie for %s\n",
- vma->vm_file->f_dentry->d_name.name);
- app = vma->vm_file;
- break;
+ mm->exe_file->f_dentry->d_name.name);
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -305,14 +394,21 @@ static int process_context_switch(struct spu *spu, unsigned long objectId)
/* Record context info in event buffer */
spin_lock_irqsave(&buffer_lock, flags);
- add_event_entry(ESCAPE_CODE);
- add_event_entry(SPU_CTX_SWITCH_CODE);
- add_event_entry(spu->number);
- add_event_entry(spu->pid);
- add_event_entry(spu->tgid);
- add_event_entry(app_dcookie);
- add_event_entry(spu_cookie);
- add_event_entry(offset);
+ spu_buff_add(ESCAPE_CODE, spu->number);
+ spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number);
+ spu_buff_add(spu->number, spu->number);
+ spu_buff_add(spu->pid, spu->number);
+ spu_buff_add(spu->tgid, spu->number);
+ spu_buff_add(app_dcookie, spu->number);
+ spu_buff_add(spu_cookie, spu->number);
+ spu_buff_add(offset, spu->number);
+
+ /* Set flag to indicate SPU PC data can now be written out. If
+ * the SPU program counter data is seen before an SPU context
+ * record is seen, the postprocessing will fail.
+ */
+ spu_buff[spu->number].ctx_sw_seen = 1;
+
spin_unlock_irqrestore(&buffer_lock, flags);
smp_wmb(); /* insure spu event buffer updates are written */
/* don't want entries intermingled... */
@@ -360,6 +456,47 @@ static int number_of_online_nodes(void)
return nodes;
}
+static int oprofile_spu_buff_create(void)
+{
+ int spu;
+
+ max_spu_buff = oprofile_get_cpu_buffer_size();
+
+ for (spu = 0; spu < num_spu_nodes; spu++) {
+ /* create circular buffers to store the data in.
+ * use locks to manage accessing the buffers
+ */
+ spu_buff[spu].head = 0;
+ spu_buff[spu].tail = 0;
+
+ /*
+ * Create a buffer for each SPU. Can't reliably
+ * create a single buffer for all spus due to not
+ * enough contiguous kernel memory.
+ */
+
+ spu_buff[spu].buff = kzalloc((max_spu_buff
+ * sizeof(unsigned long)),
+ GFP_KERNEL);
+
+ if (!spu_buff[spu].buff) {
+ printk(KERN_ERR "SPU_PROF: "
+ "%s, line %d: oprofile_spu_buff_create "
+ "failed to allocate spu buffer %d.\n",
+ __func__, __LINE__, spu);
+
+ /* release the spu buffers that have been allocated */
+ while (spu >= 0) {
+ kfree(spu_buff[spu].buff);
+ spu_buff[spu].buff = 0;
+ spu--;
+ }
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
/* The main purpose of this function is to synchronize
* OProfile with SPUFS by registering to be notified of
* SPU task switches.
@@ -372,20 +509,35 @@ static int number_of_online_nodes(void)
*/
int spu_sync_start(void)
{
- int k;
+ int spu;
int ret = SKIP_GENERIC_SYNC;
int register_ret;
unsigned long flags = 0;
spu_prof_num_nodes = number_of_online_nodes();
num_spu_nodes = spu_prof_num_nodes * 8;
+ INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff);
+
+ /* create buffer for storing the SPU data to put in
+ * the kernel buffer.
+ */
+ ret = oprofile_spu_buff_create();
+ if (ret)
+ goto out;
spin_lock_irqsave(&buffer_lock, flags);
- add_event_entry(ESCAPE_CODE);
- add_event_entry(SPU_PROFILING_CODE);
- add_event_entry(num_spu_nodes);
+ for (spu = 0; spu < num_spu_nodes; spu++) {
+ spu_buff_add(ESCAPE_CODE, spu);
+ spu_buff_add(SPU_PROFILING_CODE, spu);
+ spu_buff_add(num_spu_nodes, spu);
+ }
spin_unlock_irqrestore(&buffer_lock, flags);
+ for (spu = 0; spu < num_spu_nodes; spu++) {
+ spu_buff[spu].ctx_sw_seen = 0;
+ spu_buff[spu].last_guard_val = 0;
+ }
+
/* Register for SPU events */
register_ret = spu_switch_event_register(&spu_active);
if (register_ret) {
@@ -393,8 +545,6 @@ int spu_sync_start(void)
goto out;
}
- for (k = 0; k < (MAX_NUMNODES * 8); k++)
- last_guard_val[k] = 0;
pr_debug("spu_sync_start -- running.\n");
out:
return ret;
@@ -446,13 +596,20 @@ void spu_sync_buffer(int spu_num, unsigned int *samples,
* use. We need to discard samples taken during the time
* period which an overlay occurs (i.e., guard value changes).
*/
- if (grd_val && grd_val != last_guard_val[spu_num]) {
- last_guard_val[spu_num] = grd_val;
+ if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) {
+ spu_buff[spu_num].last_guard_val = grd_val;
/* Drop the rest of the samples. */
break;
}
- add_event_entry(file_offset | spu_num_shifted);
+ /* We must ensure that the SPU context switch has been written
+ * out before samples for the SPU. Otherwise, the SPU context
+ * information is not available and the postprocessing of the
+ * SPU PC will fail with no available anonymous map information.
+ */
+ if (spu_buff[spu_num].ctx_sw_seen)
+ spu_buff_add((file_offset | spu_num_shifted),
+ spu_num);
}
spin_unlock(&buffer_lock);
out:
@@ -463,20 +620,41 @@ out:
int spu_sync_stop(void)
{
unsigned long flags = 0;
- int ret = spu_switch_event_unregister(&spu_active);
- if (ret) {
+ int ret;
+ int k;
+
+ ret = spu_switch_event_unregister(&spu_active);
+
+ if (ret)
printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: spu_switch_event_unregister returned %d\n",
- __func__, __LINE__, ret);
- goto out;
- }
+ "%s, line %d: spu_switch_event_unregister " \
+ "returned %d\n",
+ __func__, __LINE__, ret);
+
+ /* flush any remaining data in the per SPU buffers */
+ sync_spu_buff();
spin_lock_irqsave(&cache_lock, flags);
ret = release_cached_info(RELEASE_ALL);
spin_unlock_irqrestore(&cache_lock, flags);
-out:
+
+ /* remove scheduled work queue item rather then waiting
+ * for every queued entry to execute. Then flush pending
+ * system wide buffer to event buffer.
+ */
+ cancel_delayed_work(&spu_work);
+
+ for (k = 0; k < num_spu_nodes; k++) {
+ spu_buff[k].ctx_sw_seen = 0;
+
+ /*
+ * spu_sys_buff will be null if there was a problem
+ * allocating the buffer. Only delete if it exists.
+ */
+ kfree(spu_buff[k].buff);
+ spu_buff[k].buff = 0;
+ }
pr_debug("spu_sync_stop -- done.\n");
return ret;
}
-
diff --git a/arch/powerpc/oprofile/cell/vma_map.c b/arch/powerpc/oprofile/cell/vma_map.c
index fff66662d02..c579b16845d 100644
--- a/arch/powerpc/oprofile/cell/vma_map.c
+++ b/arch/powerpc/oprofile/cell/vma_map.c
@@ -20,6 +20,7 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/elf.h>
+#include <linux/slab.h>
#include "pr_util.h"
@@ -185,7 +186,7 @@ struct vma_to_fileoffset_map *create_vma_map(const struct spu *aSpu,
goto fail;
if (shdr_str.sh_type != SHT_STRTAB)
- goto fail;;
+ goto fail;
for (j = 0; j < shdr.sh_size / sizeof (sym); j++) {
if (copy_from_user(&sym, spu_elf_start +
@@ -229,7 +230,7 @@ struct vma_to_fileoffset_map *create_vma_map(const struct spu *aSpu,
*/
overlay_tbl_offset = vma_map_lookup(map, ovly_table_sym,
aSpu, &grd_val);
- if (overlay_tbl_offset < 0) {
+ if (overlay_tbl_offset > 0x10000000) {
printk(KERN_ERR "SPU_PROF: "
"%s, line %d: Error finding SPU overlay table\n",
__func__, __LINE__);
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 17807acb05d..c77348c5d46 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -18,7 +18,6 @@
#include <linux/smp.h>
#include <linux/errno.h>
#include <asm/ptrace.h>
-#include <asm/system.h>
#include <asm/pmc.h>
#include <asm/cputable.h>
#include <asm/oprofile_impl.h>
@@ -120,7 +119,7 @@ static void op_powerpc_stop(void)
model->global_stop();
}
-static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
+static int op_powerpc_create_files(struct dentry *root)
{
int i;
@@ -129,9 +128,31 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
* There is one mmcr0, mmcr1 and mmcra for setting the events for
* all of the counters.
*/
- oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
- oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
- oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
+ oprofilefs_create_ulong(root, "mmcr0", &sys.mmcr0);
+ oprofilefs_create_ulong(root, "mmcr1", &sys.mmcr1);
+ oprofilefs_create_ulong(root, "mmcra", &sys.mmcra);
+#ifdef CONFIG_OPROFILE_CELL
+ /* create a file the user tool can check to see what level of profiling
+ * support exits with this kernel. Initialize bit mask to indicate
+ * what support the kernel has:
+ * bit 0 - Supports SPU event profiling in addition to PPU
+ * event and cycles; and SPU cycle profiling
+ * bits 1-31 - Currently unused.
+ *
+ * If the file does not exist, then the kernel only supports SPU
+ * cycle profiling, PPU event and cycle profiling.
+ */
+ oprofilefs_create_ulong(root, "cell_support", &sys.cell_support);
+ sys.cell_support = 0x1; /* Note, the user OProfile tool must check
+ * that this bit is set before attempting to
+ * user SPU event profiling. Older kernels
+ * will not have this file, hence the user
+ * tool is not allowed to do SPU event
+ * profiling on older kernels. Older kernels
+ * will accept SPU events but collected data
+ * is garbage.
+ */
+#endif
#endif
for (i = 0; i < model->num_counters; ++i) {
@@ -139,11 +160,11 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
char buf[4];
snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(sb, root, buf);
+ dir = oprofilefs_mkdir(root, buf);
- oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
- oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
- oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
+ oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
+ oprofilefs_create_ulong(dir, "event", &ctr[i].event);
+ oprofilefs_create_ulong(dir, "count", &ctr[i].count);
/*
* Classic PowerPC doesn't support per-counter
@@ -152,14 +173,14 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
* Book-E style performance monitors, we do
* support them.
*/
- oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
- oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
+ oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
+ oprofilefs_create_ulong(dir, "user", &ctr[i].user);
- oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
+ oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
}
- oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
- oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
+ oprofilefs_create_ulong(root, "enable_kernel", &sys.enable_kernel);
+ oprofilefs_create_ulong(root, "enable_user", &sys.enable_user);
/* Default to tracing both kernel and user */
sys.enable_kernel = 1;
@@ -173,11 +194,8 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
if (!cur_cpu_spec->oprofile_cpu_type)
return -ENODEV;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return -ENODEV;
-
switch (cur_cpu_spec->oprofile_type) {
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_OPROFILE_CELL
case PPC_OPROFILE_CELL:
if (firmware_has_feature(FW_FEATURE_LPAR))
diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c
index cc599eb8768..d29b6e4e5e7 100644
--- a/arch/powerpc/oprofile/op_model_7450.c
+++ b/arch/powerpc/oprofile/op_model_7450.c
@@ -16,10 +16,8 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
-#include <asm/system.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/page.h>
@@ -29,7 +27,7 @@
static unsigned long reset_value[OP_MAX_COUNTER];
static int oprofile_running;
-static u32 mmcr0_val, mmcr1_val, mmcr2_val;
+static u32 mmcr0_val, mmcr1_val, mmcr2_val, num_pmcs;
#define MMCR0_PMC1_SHIFT 6
#define MMCR0_PMC2_SHIFT 0
@@ -88,13 +86,12 @@ static int fsl7450_cpu_setup(struct op_counter_config *ctr)
mtspr(SPRN_MMCR0, mmcr0_val);
mtspr(SPRN_MMCR1, mmcr1_val);
- mtspr(SPRN_MMCR2, mmcr2_val);
+ if (num_pmcs > 4)
+ mtspr(SPRN_MMCR2, mmcr2_val);
return 0;
}
-#define NUM_CTRS 6
-
/* Configures the global settings for the countes on all CPUs. */
static int fsl7450_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
@@ -102,12 +99,13 @@ static int fsl7450_reg_setup(struct op_counter_config *ctr,
{
int i;
+ num_pmcs = num_ctrs;
/* Our counters count up, and "count" refers to
* how much before the next interrupt, and we interrupt
* on overflow. So we calculate the starting value
* which will give us "count" until overflow.
* Then we set the events on the enabled counters */
- for (i = 0; i < NUM_CTRS; ++i)
+ for (i = 0; i < num_ctrs; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count;
/* Set events for Counters 1 & 2 */
@@ -123,9 +121,10 @@ static int fsl7450_reg_setup(struct op_counter_config *ctr,
/* Set events for Counters 3-6 */
mmcr1_val = mmcr1_event3(ctr[2].event)
- | mmcr1_event4(ctr[3].event)
- | mmcr1_event5(ctr[4].event)
- | mmcr1_event6(ctr[5].event);
+ | mmcr1_event4(ctr[3].event);
+ if (num_ctrs > 4)
+ mmcr1_val |= mmcr1_event5(ctr[4].event)
+ | mmcr1_event6(ctr[5].event);
mmcr2_val = 0;
@@ -139,7 +138,7 @@ static int fsl7450_start(struct op_counter_config *ctr)
mtmsr(mfmsr() | MSR_PMM);
- for (i = 0; i < NUM_CTRS; ++i) {
+ for (i = 0; i < num_pmcs; ++i) {
if (ctr[i].enabled)
classic_ctr_write(i, reset_value[i]);
else
@@ -184,7 +183,7 @@ static void fsl7450_handle_interrupt(struct pt_regs *regs,
pc = mfspr(SPRN_SIAR);
is_kernel = is_kernel_addr(pc);
- for (i = 0; i < NUM_CTRS; ++i) {
+ for (i = 0; i < num_pmcs; ++i) {
val = classic_ctr_read(i);
if (val < 0) {
if (oprofile_running && ctr[i].enabled) {
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 5ff4de3eb3b..863d89386f6 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -16,7 +16,6 @@
#include <linux/cpufreq.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kthread.h>
#include <linux/oprofile.h>
@@ -34,20 +33,20 @@
#include <asm/ptrace.h>
#include <asm/reg.h>
#include <asm/rtas.h>
-#include <asm/system.h>
#include <asm/cell-regs.h>
#include "../platforms/cell/interrupt.h"
#include "cell/pr_util.h"
-static void cell_global_stop_spu(void);
+#define PPU_PROFILING 0
+#define SPU_PROFILING_CYCLES 1
+#define SPU_PROFILING_EVENTS 2
-/*
- * spu_cycle_reset is the number of cycles between samples.
- * This variable is used for SPU profiling and should ONLY be set
- * at the beginning of cell_reg_setup; otherwise, it's read-only.
- */
-static unsigned int spu_cycle_reset;
+#define SPU_EVENT_NUM_START 4100
+#define SPU_EVENT_NUM_STOP 4399
+#define SPU_PROFILE_EVENT_ADDR 4363 /* spu, address trace, decimal */
+#define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */
+#define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */
#define NUM_SPUS_PER_NODE 8
#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
@@ -66,6 +65,21 @@ static unsigned int spu_cycle_reset;
#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
+/* Minimum HW interval timer setting to send value to trace buffer is 10 cycle.
+ * To configure counter to send value every N cycles set counter to
+ * 2^32 - 1 - N.
+ */
+#define NUM_INTERVAL_CYC 0xFFFFFFFF - 10
+
+/*
+ * spu_cycle_reset is the number of cycles between samples.
+ * This variable is used for SPU profiling and should ONLY be set
+ * at the beginning of cell_reg_setup; otherwise, it's read-only.
+ */
+static unsigned int spu_cycle_reset;
+static unsigned int profiling_mode;
+static int spu_evnt_phys_spu_indx;
+
struct pmc_cntrl_data {
unsigned long vcntr;
unsigned long evnts;
@@ -105,6 +119,8 @@ struct pm_cntrl {
u16 trace_mode;
u16 freeze;
u16 count_mode;
+ u16 spu_addr_trace;
+ u8 trace_buf_ovflw;
};
static struct {
@@ -122,7 +138,7 @@ static struct {
#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
-
+static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
/*
@@ -152,6 +168,7 @@ static u32 hdw_thread;
static u32 virt_cntr_inter_mask;
static struct timer_list timer_virt_cntr;
+static struct timer_list timer_spu_event_swap;
/*
* pm_signal needs to be global since it is initialized in
@@ -165,7 +182,7 @@ static int spu_rtas_token; /* token for SPU cycle profiling */
static u32 reset_value[NR_PHYS_CTRS];
static int num_counters;
static int oprofile_running;
-static DEFINE_SPINLOCK(virt_cntr_lock);
+static DEFINE_SPINLOCK(cntr_lock);
static u32 ctr_enabled;
@@ -229,7 +246,7 @@ static int pm_rtas_activate_signals(u32 node, u32 count)
* There is no debug setup required for the cycles event.
* Note that only events in the same group can be used.
* Otherwise, there will be conflicts in correctly routing
- * the signals on the debug bus. It is the responsiblity
+ * the signals on the debug bus. It is the responsibility
* of the OProfile user tool to check the events are in
* the same group.
*/
@@ -336,13 +353,13 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
if (bus_word & (1 << i)) {
pm_regs.debug_bus_control |=
- (bus_type << (30 - (2 * i)));
+ (bus_type << (30 - (2 * i)));
for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
if (input_bus[j] == 0xff) {
input_bus[j] = i;
pm_regs.group_control |=
- (i << (30 - (2 * j)));
+ (i << (30 - (2 * j)));
break;
}
@@ -367,12 +384,16 @@ static void write_pm_cntrl(int cpu)
if (pm_regs.pm_cntrl.stop_at_max == 1)
val |= CBE_PM_STOP_AT_MAX;
- if (pm_regs.pm_cntrl.trace_mode == 1)
+ if (pm_regs.pm_cntrl.trace_mode != 0)
val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
+ if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
+ val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
if (pm_regs.pm_cntrl.freeze == 1)
val |= CBE_PM_FREEZE_ALL_CTRS;
+ val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
+
/*
* Routine set_count_mode must be called previously to set
* the count mode based on the user selection of user and kernel.
@@ -404,7 +425,7 @@ set_count_mode(u32 kernel, u32 user)
}
}
-static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl)
+static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
{
pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
@@ -441,7 +462,7 @@ static void cell_virtual_cntr(unsigned long data)
* not both playing with the counters on the same node.
*/
- spin_lock_irqsave(&virt_cntr_lock, flags);
+ spin_lock_irqsave(&cntr_lock, flags);
prev_hdw_thread = hdw_thread;
@@ -480,7 +501,7 @@ static void cell_virtual_cntr(unsigned long data)
cbe_disable_pm_interrupts(cpu);
for (i = 0; i < num_counters; i++) {
per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
- = cbe_read_ctr(cpu, i);
+ = cbe_read_ctr(cpu, i);
if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
== 0xFFFFFFFF)
@@ -527,7 +548,7 @@ static void cell_virtual_cntr(unsigned long data)
cbe_enable_pm(cpu);
}
- spin_unlock_irqrestore(&virt_cntr_lock, flags);
+ spin_unlock_irqrestore(&cntr_lock, flags);
mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
}
@@ -541,38 +562,146 @@ static void start_virt_cntrs(void)
add_timer(&timer_virt_cntr);
}
-/* This function is called once for all cpus combined */
-static int cell_reg_setup(struct op_counter_config *ctr,
+static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
struct op_system_config *sys, int num_ctrs)
{
- int i, j, cpu;
- spu_cycle_reset = 0;
+ spu_cycle_reset = ctr[0].count;
- if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
- spu_cycle_reset = ctr[0].count;
+ /*
+ * Each node will need to make the rtas call to start
+ * and stop SPU profiling. Get the token once and store it.
+ */
+ spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
+
+ if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
+ printk(KERN_ERR
+ "%s: rtas token ibm,cbe-spu-perftools unknown\n",
+ __func__);
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Unfortunately, the hardware will only support event profiling
+ * on one SPU per node at a time. Therefore, we must time slice
+ * the profiling across all SPUs in the node. Note, we do this
+ * in parallel for each node. The following routine is called
+ * periodically based on kernel timer to switch which SPU is
+ * being monitored in a round robbin fashion.
+ */
+static void spu_evnt_swap(unsigned long data)
+{
+ int node;
+ int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
+ unsigned long flags;
+ int cpu;
+ int ret;
+ u32 interrupt_mask;
+
+
+ /* enable interrupts on cntr 0 */
+ interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
+
+ hdw_thread = 0;
+
+ /* Make sure spu event interrupt handler and spu event swap
+ * don't access the counters simultaneously.
+ */
+ spin_lock_irqsave(&cntr_lock, flags);
+
+ cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
+
+ if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
+ spu_evnt_phys_spu_indx = 0;
+
+ pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
+ pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
+ pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
+
+ /* switch the SPU being profiled on each node */
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ node = cbe_cpu_to_node(cpu);
+ cur_phys_spu = (node * NUM_SPUS_PER_NODE)
+ + cur_spu_evnt_phys_spu_indx;
+ nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
+ + spu_evnt_phys_spu_indx;
/*
- * Each node will need to make the rtas call to start
- * and stop SPU profiling. Get the token once and store it.
+ * stop counters, save counter values, restore counts
+ * for previous physical SPU
*/
- spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
+ cbe_disable_pm(cpu);
+ cbe_disable_pm_interrupts(cpu);
- if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
- printk(KERN_ERR
- "%s: rtas token ibm,cbe-spu-perftools unknown\n",
- __func__);
- return -EIO;
- }
+ spu_pm_cnt[cur_phys_spu]
+ = cbe_read_ctr(cpu, 0);
+
+ /* restore previous count for the next spu to sample */
+ /* NOTE, hardware issue, counter will not start if the
+ * counter value is at max (0xFFFFFFFF).
+ */
+ if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
+ cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
+ else
+ cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
+
+ pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+ /* setup the debug bus measure the one event and
+ * the two events to route the next SPU's PC on
+ * the debug bus
+ */
+ ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
+ if (ret)
+ printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
+ "SPU event swap\n", __func__);
+
+ /* clear the trace buffer, don't want to take PC for
+ * previous SPU*/
+ cbe_write_pm(cpu, trace_address, 0);
+
+ enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
+
+ /* Enable interrupts on the CPU thread that is starting */
+ cbe_enable_pm_interrupts(cpu, hdw_thread,
+ interrupt_mask);
+ cbe_enable_pm(cpu);
}
- pm_rtas_token = rtas_token("ibm,cbe-perftools");
+ spin_unlock_irqrestore(&cntr_lock, flags);
+
+ /* swap approximately every 0.1 seconds */
+ mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
+}
+
+static void start_spu_event_swap(void)
+{
+ init_timer(&timer_spu_event_swap);
+ timer_spu_event_swap.function = spu_evnt_swap;
+ timer_spu_event_swap.data = 0UL;
+ timer_spu_event_swap.expires = jiffies + HZ / 25;
+ add_timer(&timer_spu_event_swap);
+}
+
+static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
+ struct op_system_config *sys, int num_ctrs)
+{
+ int i;
+
+ /* routine is called once for all nodes */
+ spu_evnt_phys_spu_indx = 0;
/*
- * For all events excetp PPU CYCLEs, each node will need to make
+ * For all events except PPU CYCLEs, each node will need to make
* the rtas cbe-perftools call to setup and reset the debug bus.
* Make the token lookup call once and store it in the global
* variable pm_rtas_token.
*/
+ pm_rtas_token = rtas_token("ibm,cbe-perftools");
+
if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
printk(KERN_ERR
"%s: rtas token ibm,cbe-perftools unknown\n",
@@ -580,16 +709,67 @@ static int cell_reg_setup(struct op_counter_config *ctr,
return -EIO;
}
- num_counters = num_ctrs;
+ /* setup the pm_control register settings,
+ * settings will be written per node by the
+ * cell_cpu_setup() function.
+ */
+ pm_regs.pm_cntrl.trace_buf_ovflw = 1;
- pm_regs.group_control = 0;
- pm_regs.debug_bus_control = 0;
+ /* Use the occurrence trace mode to have SPU PC saved
+ * to the trace buffer. Occurrence data in trace buffer
+ * is not used. Bit 2 must be set to store SPU addresses.
+ */
+ pm_regs.pm_cntrl.trace_mode = 2;
+
+ pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus
+ event 2 & 3 */
+
+ /* setup the debug bus event array with the SPU PC routing events.
+ * Note, pm_signal[0] will be filled in by set_pm_event() call below.
+ */
+ pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
+ pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
+ pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
+ pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
+
+ pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
+ pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
+ pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
+ pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
+
+ /* Set the user selected spu event to profile on,
+ * note, only one SPU profiling event is supported
+ */
+ num_counters = 1; /* Only support one SPU event at a time */
+ set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
- /* setup the pm_control register */
- memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
- pm_regs.pm_cntrl.stop_at_max = 1;
- pm_regs.pm_cntrl.trace_mode = 0;
- pm_regs.pm_cntrl.freeze = 1;
+ reset_value[0] = 0xFFFFFFFF - ctr[0].count;
+
+ /* global, used by cell_cpu_setup */
+ ctr_enabled |= 1;
+
+ /* Initialize the count for each SPU to the reset value */
+ for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
+ spu_pm_cnt[i] = reset_value[0];
+
+ return 0;
+}
+
+static int cell_reg_setup_ppu(struct op_counter_config *ctr,
+ struct op_system_config *sys, int num_ctrs)
+{
+ /* routine is called once for all nodes */
+ int i, j, cpu;
+
+ num_counters = num_ctrs;
+
+ if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
+ printk(KERN_ERR
+ "%s: Oprofile, number of specified events " \
+ "exceeds number of physical counters\n",
+ __func__);
+ return -EIO;
+ }
set_count_mode(sys->enable_kernel, sys->enable_user);
@@ -658,6 +838,63 @@ static int cell_reg_setup(struct op_counter_config *ctr,
}
+/* This function is called once for all cpus combined */
+static int cell_reg_setup(struct op_counter_config *ctr,
+ struct op_system_config *sys, int num_ctrs)
+{
+ int ret=0;
+ spu_cycle_reset = 0;
+
+ /* initialize the spu_arr_trace value, will be reset if
+ * doing spu event profiling.
+ */
+ pm_regs.group_control = 0;
+ pm_regs.debug_bus_control = 0;
+ pm_regs.pm_cntrl.stop_at_max = 1;
+ pm_regs.pm_cntrl.trace_mode = 0;
+ pm_regs.pm_cntrl.freeze = 1;
+ pm_regs.pm_cntrl.trace_buf_ovflw = 0;
+ pm_regs.pm_cntrl.spu_addr_trace = 0;
+
+ /*
+ * For all events except PPU CYCLEs, each node will need to make
+ * the rtas cbe-perftools call to setup and reset the debug bus.
+ * Make the token lookup call once and store it in the global
+ * variable pm_rtas_token.
+ */
+ pm_rtas_token = rtas_token("ibm,cbe-perftools");
+
+ if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
+ printk(KERN_ERR
+ "%s: rtas token ibm,cbe-perftools unknown\n",
+ __func__);
+ return -EIO;
+ }
+
+ if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
+ profiling_mode = SPU_PROFILING_CYCLES;
+ ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
+ } else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
+ (ctr[0].event <= SPU_EVENT_NUM_STOP)) {
+ profiling_mode = SPU_PROFILING_EVENTS;
+ spu_cycle_reset = ctr[0].count;
+
+ /* for SPU event profiling, need to setup the
+ * pm_signal array with the events to route the
+ * SPU PC before making the FW call. Note, only
+ * one SPU event for profiling can be specified
+ * at a time.
+ */
+ cell_reg_setup_spu_events(ctr, sys, num_ctrs);
+ } else {
+ profiling_mode = PPU_PROFILING;
+ ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
+ }
+
+ return ret;
+}
+
+
/* This function is called once for each cpu */
static int cell_cpu_setup(struct op_counter_config *cntr)
@@ -665,8 +902,13 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
u32 cpu = smp_processor_id();
u32 num_enabled = 0;
int i;
+ int ret;
- if (spu_cycle_reset)
+ /* Cycle based SPU profiling does not use the performance
+ * counters. The trace array is configured to collect
+ * the data.
+ */
+ if (profiling_mode == SPU_PROFILING_CYCLES)
return 0;
/* There is one performance monitor per processor chip (i.e. node),
@@ -679,7 +921,6 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
cbe_disable_pm(cpu);
cbe_disable_pm_interrupts(cpu);
- cbe_write_pm(cpu, pm_interval, 0);
cbe_write_pm(cpu, pm_start_stop, 0);
cbe_write_pm(cpu, group_control, pm_regs.group_control);
cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
@@ -696,7 +937,20 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
* The pm_rtas_activate_signals will return -EIO if the FW
* call failed.
*/
- return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
+ if (profiling_mode == SPU_PROFILING_EVENTS) {
+ /* For SPU event profiling also need to setup the
+ * pm interval timer
+ */
+ ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
+ num_enabled+2);
+ /* store PC from debug bus to Trace buffer as often
+ * as possible (every 10 cycles)
+ */
+ cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
+ return ret;
+ } else
+ return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
+ num_enabled);
}
#define ENTRIES 303
@@ -821,7 +1075,7 @@ static int calculate_lfsr(int n)
index = ENTRIES-1;
/* make sure index is valid */
- if ((index > ENTRIES) || (index < 0))
+ if ((index >= ENTRIES) || (index < 0))
index = ENTRIES-1;
return initial_lfsr[index];
@@ -830,13 +1084,13 @@ static int calculate_lfsr(int n)
static int pm_rtas_activate_spu_profiling(u32 node)
{
int ret, i;
- struct pm_signal pm_signal_local[NR_PHYS_CTRS];
+ struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE];
/*
* Set up the rtas call to configure the debug bus to
* route the SPU PCs. Setup the pm_signal for each SPU
*/
- for (i = 0; i < NUM_SPUS_PER_NODE; i++) {
+ for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) {
pm_signal_local[i].cpu = node;
pm_signal_local[i].signal_group = 41;
/* spu i on word (i/2) */
@@ -848,7 +1102,7 @@ static int pm_rtas_activate_spu_profiling(u32 node)
ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
PASSTHRU_ENABLE, pm_signal_local,
- (NUM_SPUS_PER_NODE
+ (ARRAY_SIZE(pm_signal_local)
* sizeof(struct pm_signal)));
if (unlikely(ret)) {
@@ -867,8 +1121,7 @@ oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
int ret = 0;
struct cpufreq_freqs *frq = data;
if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
- (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) ||
- (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE))
+ (val == CPUFREQ_POSTCHANGE && frq->old > frq->new))
set_spu_profiling_frequency(frq->new, spu_cycle_reset);
return ret;
}
@@ -878,7 +1131,122 @@ static struct notifier_block cpu_freq_notifier_block = {
};
#endif
-static int cell_global_start_spu(struct op_counter_config *ctr)
+/*
+ * Note the generic OProfile stop calls do not support returning
+ * an error on stop. Hence, will not return an error if the FW
+ * calls fail on stop. Failure to reset the debug bus is not an issue.
+ * Failure to disable the SPU profiling is not an issue. The FW calls
+ * to enable the performance counters and debug bus will work even if
+ * the hardware was not cleanly reset.
+ */
+static void cell_global_stop_spu_cycles(void)
+{
+ int subfunc, rtn_value;
+ unsigned int lfsr_value;
+ int cpu;
+
+ oprofile_running = 0;
+ smp_wmb();
+
+#ifdef CONFIG_CPU_FREQ
+ cpufreq_unregister_notifier(&cpu_freq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ subfunc = 3; /*
+ * 2 - activate SPU tracing,
+ * 3 - deactivate
+ */
+ lfsr_value = 0x8f100000;
+
+ rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
+ subfunc, cbe_cpu_to_node(cpu),
+ lfsr_value);
+
+ if (unlikely(rtn_value != 0)) {
+ printk(KERN_ERR
+ "%s: rtas call ibm,cbe-spu-perftools " \
+ "failed, return = %d\n",
+ __func__, rtn_value);
+ }
+
+ /* Deactivate the signals */
+ pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+ }
+
+ stop_spu_profiling_cycles();
+}
+
+static void cell_global_stop_spu_events(void)
+{
+ int cpu;
+ oprofile_running = 0;
+
+ stop_spu_profiling_events();
+ smp_wmb();
+
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ cbe_sync_irq(cbe_cpu_to_node(cpu));
+ /* Stop the counters */
+ cbe_disable_pm(cpu);
+ cbe_write_pm07_control(cpu, 0, 0);
+
+ /* Deactivate the signals */
+ pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+ /* Deactivate interrupts */
+ cbe_disable_pm_interrupts(cpu);
+ }
+ del_timer_sync(&timer_spu_event_swap);
+}
+
+static void cell_global_stop_ppu(void)
+{
+ int cpu;
+
+ /*
+ * This routine will be called once for the system.
+ * There is one performance monitor per node, so we
+ * only need to perform this function once per node.
+ */
+ del_timer_sync(&timer_virt_cntr);
+ oprofile_running = 0;
+ smp_wmb();
+
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ cbe_sync_irq(cbe_cpu_to_node(cpu));
+ /* Stop the counters */
+ cbe_disable_pm(cpu);
+
+ /* Deactivate the signals */
+ pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+ /* Deactivate interrupts */
+ cbe_disable_pm_interrupts(cpu);
+ }
+}
+
+static void cell_global_stop(void)
+{
+ if (profiling_mode == PPU_PROFILING)
+ cell_global_stop_ppu();
+ else if (profiling_mode == SPU_PROFILING_EVENTS)
+ cell_global_stop_spu_events();
+ else
+ cell_global_stop_spu_cycles();
+}
+
+static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
{
int subfunc;
unsigned int lfsr_value;
@@ -944,18 +1312,18 @@ static int cell_global_start_spu(struct op_counter_config *ctr)
/* start profiling */
ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
- cbe_cpu_to_node(cpu), lfsr_value);
+ cbe_cpu_to_node(cpu), lfsr_value);
if (unlikely(ret != 0)) {
printk(KERN_ERR
- "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
- __func__, ret);
+ "%s: rtas call ibm,cbe-spu-perftools failed, " \
+ "return = %d\n", __func__, ret);
rtas_error = -EIO;
goto out;
}
}
- rtas_error = start_spu_profiling(spu_cycle_reset);
+ rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
if (rtas_error)
goto out_stop;
@@ -963,11 +1331,74 @@ static int cell_global_start_spu(struct op_counter_config *ctr)
return 0;
out_stop:
- cell_global_stop_spu(); /* clean up the PMU/debug bus */
+ cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */
out:
return rtas_error;
}
+static int cell_global_start_spu_events(struct op_counter_config *ctr)
+{
+ int cpu;
+ u32 interrupt_mask = 0;
+ int rtn = 0;
+
+ hdw_thread = 0;
+
+ /* spu event profiling, uses the performance counters to generate
+ * an interrupt. The hardware is setup to store the SPU program
+ * counter into the trace array. The occurrence mode is used to
+ * enable storing data to the trace buffer. The bits are set
+ * to send/store the SPU address in the trace buffer. The debug
+ * bus must be setup to route the SPU program counter onto the
+ * debug bus. The occurrence data in the trace buffer is not used.
+ */
+
+ /* This routine gets called once for the system.
+ * There is one performance monitor per node, so we
+ * only need to perform this function once per node.
+ */
+
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ /*
+ * Setup SPU event-based profiling.
+ * Set perf_mon_control bit 0 to a zero before
+ * enabling spu collection hardware.
+ *
+ * Only support one SPU event on one SPU per node.
+ */
+ if (ctr_enabled & 1) {
+ cbe_write_ctr(cpu, 0, reset_value[0]);
+ enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
+ interrupt_mask |=
+ CBE_PM_CTR_OVERFLOW_INTR(0);
+ } else {
+ /* Disable counter */
+ cbe_write_pm07_control(cpu, 0, 0);
+ }
+
+ cbe_get_and_clear_pm_interrupts(cpu);
+ cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
+ cbe_enable_pm(cpu);
+
+ /* clear the trace buffer */
+ cbe_write_pm(cpu, trace_address, 0);
+ }
+
+ /* Start the timer to time slice collecting the event profile
+ * on each of the SPUs. Note, can collect profile on one SPU
+ * per node at a time.
+ */
+ start_spu_event_swap();
+ start_spu_profiling_events();
+ oprofile_running = 1;
+ smp_wmb();
+
+ return rtn;
+}
+
static int cell_global_start_ppu(struct op_counter_config *ctr)
{
u32 cpu, i;
@@ -987,8 +1418,7 @@ static int cell_global_start_ppu(struct op_counter_config *ctr)
if (ctr_enabled & (1 << i)) {
cbe_write_ctr(cpu, i, reset_value[i]);
enable_ctr(cpu, i, pm_regs.pm07_cntrl);
- interrupt_mask |=
- CBE_PM_CTR_OVERFLOW_INTR(i);
+ interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
} else {
/* Disable counter */
cbe_write_pm07_control(cpu, i, 0);
@@ -1017,99 +1447,162 @@ static int cell_global_start_ppu(struct op_counter_config *ctr)
static int cell_global_start(struct op_counter_config *ctr)
{
- if (spu_cycle_reset)
- return cell_global_start_spu(ctr);
+ if (profiling_mode == SPU_PROFILING_CYCLES)
+ return cell_global_start_spu_cycles(ctr);
+ else if (profiling_mode == SPU_PROFILING_EVENTS)
+ return cell_global_start_spu_events(ctr);
else
return cell_global_start_ppu(ctr);
}
-/*
- * Note the generic OProfile stop calls do not support returning
- * an error on stop. Hence, will not return an error if the FW
- * calls fail on stop. Failure to reset the debug bus is not an issue.
- * Failure to disable the SPU profiling is not an issue. The FW calls
- * to enable the performance counters and debug bus will work even if
- * the hardware was not cleanly reset.
+
+/* The SPU interrupt handler
+ *
+ * SPU event profiling works as follows:
+ * The pm_signal[0] holds the one SPU event to be measured. It is routed on
+ * the debug bus using word 0 or 1. The value of pm_signal[1] and
+ * pm_signal[2] contain the necessary events to route the SPU program
+ * counter for the selected SPU onto the debug bus using words 2 and 3.
+ * The pm_interval register is setup to write the SPU PC value into the
+ * trace buffer at the maximum rate possible. The trace buffer is configured
+ * to store the PCs, wrapping when it is full. The performance counter is
+ * initialized to the max hardware count minus the number of events, N, between
+ * samples. Once the N events have occurred, a HW counter overflow occurs
+ * causing the generation of a HW counter interrupt which also stops the
+ * writing of the SPU PC values to the trace buffer. Hence the last PC
+ * written to the trace buffer is the SPU PC that we want. Unfortunately,
+ * we have to read from the beginning of the trace buffer to get to the
+ * last value written. We just hope the PPU has nothing better to do then
+ * service this interrupt. The PC for the specific SPU being profiled is
+ * extracted from the trace buffer processed and stored. The trace buffer
+ * is cleared, interrupts are cleared, the counter is reset to max - N.
+ * A kernel timer is used to periodically call the routine spu_evnt_swap()
+ * to switch to the next physical SPU in the node to profile in round robbin
+ * order. This way data is collected for all SPUs on the node. It does mean
+ * that we need to use a relatively small value of N to ensure enough samples
+ * on each SPU are collected each SPU is being profiled 1/8 of the time.
+ * It may also be necessary to use a longer sample collection period.
*/
-static void cell_global_stop_spu(void)
+static void cell_handle_interrupt_spu(struct pt_regs *regs,
+ struct op_counter_config *ctr)
{
- int subfunc, rtn_value;
- unsigned int lfsr_value;
- int cpu;
+ u32 cpu, cpu_tmp;
+ u64 trace_entry;
+ u32 interrupt_mask;
+ u64 trace_buffer[2];
+ u64 last_trace_buffer;
+ u32 sample;
+ u32 trace_addr;
+ unsigned long sample_array_lock_flags;
+ int spu_num;
+ unsigned long flags;
- oprofile_running = 0;
+ /* Make sure spu event interrupt handler and spu event swap
+ * don't access the counters simultaneously.
+ */
+ cpu = smp_processor_id();
+ spin_lock_irqsave(&cntr_lock, flags);
-#ifdef CONFIG_CPU_FREQ
- cpufreq_unregister_notifier(&cpu_freq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-#endif
+ cpu_tmp = cpu;
+ cbe_disable_pm(cpu);
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
+ interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
- subfunc = 3; /*
- * 2 - activate SPU tracing,
- * 3 - deactivate
- */
- lfsr_value = 0x8f100000;
+ sample = 0xABCDEF;
+ trace_entry = 0xfedcba;
+ last_trace_buffer = 0xdeadbeaf;
- rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
- subfunc, cbe_cpu_to_node(cpu),
- lfsr_value);
+ if ((oprofile_running == 1) && (interrupt_mask != 0)) {
+ /* disable writes to trace buff */
+ cbe_write_pm(cpu, pm_interval, 0);
- if (unlikely(rtn_value != 0)) {
- printk(KERN_ERR
- "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
- __func__, rtn_value);
+ /* only have one perf cntr being used, cntr 0 */
+ if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
+ && ctr[0].enabled)
+ /* The SPU PC values will be read
+ * from the trace buffer, reset counter
+ */
+
+ cbe_write_ctr(cpu, 0, reset_value[0]);
+
+ trace_addr = cbe_read_pm(cpu, trace_address);
+
+ while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
+ /* There is data in the trace buffer to process
+ * Read the buffer until you get to the last
+ * entry. This is the value we want.
+ */
+
+ cbe_read_trace_buffer(cpu, trace_buffer);
+ trace_addr = cbe_read_pm(cpu, trace_address);
}
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
- }
+ /* SPU Address 16 bit count format for 128 bit
+ * HW trace buffer is used for the SPU PC storage
+ * HDR bits 0:15
+ * SPU Addr 0 bits 16:31
+ * SPU Addr 1 bits 32:47
+ * unused bits 48:127
+ *
+ * HDR: bit4 = 1 SPU Address 0 valid
+ * HDR: bit5 = 1 SPU Address 1 valid
+ * - unfortunately, the valid bits don't seem to work
+ *
+ * Note trace_buffer[0] holds bits 0:63 of the HW
+ * trace buffer, trace_buffer[1] holds bits 64:127
+ */
- stop_spu_profiling();
-}
+ trace_entry = trace_buffer[0]
+ & 0x00000000FFFF0000;
-static void cell_global_stop_ppu(void)
-{
- int cpu;
+ /* only top 16 of the 18 bit SPU PC address
+ * is stored in trace buffer, hence shift right
+ * by 16 -2 bits */
+ sample = trace_entry >> 14;
+ last_trace_buffer = trace_buffer[0];
- /*
- * This routine will be called once for the system.
- * There is one performance monitor per node, so we
- * only need to perform this function once per node.
- */
- del_timer_sync(&timer_virt_cntr);
- oprofile_running = 0;
- smp_wmb();
+ spu_num = spu_evnt_phys_spu_indx
+ + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
+ /* make sure only one process at a time is calling
+ * spu_sync_buffer()
+ */
+ spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
+ sample_array_lock_flags);
+ spu_sync_buffer(spu_num, &sample, 1);
+ spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+ sample_array_lock_flags);
- cbe_sync_irq(cbe_cpu_to_node(cpu));
- /* Stop the counters */
- cbe_disable_pm(cpu);
+ smp_wmb(); /* insure spu event buffer updates are written
+ * don't want events intermingled... */
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+ /* The counters were frozen by the interrupt.
+ * Reenable the interrupt and restart the counters.
+ */
+ cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
+ cbe_enable_pm_interrupts(cpu, hdw_thread,
+ virt_cntr_inter_mask);
- /* Deactivate interrupts */
- cbe_disable_pm_interrupts(cpu);
- }
-}
+ /* clear the trace buffer, re-enable writes to trace buff */
+ cbe_write_pm(cpu, trace_address, 0);
+ cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
-static void cell_global_stop(void)
-{
- if (spu_cycle_reset)
- cell_global_stop_spu();
- else
- cell_global_stop_ppu();
+ /* The writes to the various performance counters only writes
+ * to a latch. The new values (interrupt setting bits, reset
+ * counter value etc.) are not copied to the actual registers
+ * until the performance monitor is enabled. In order to get
+ * this to work as desired, the performance monitor needs to
+ * be disabled while writing to the latches. This is a
+ * HW design issue.
+ */
+ write_pm_cntrl(cpu);
+ cbe_enable_pm(cpu);
+ }
+ spin_unlock_irqrestore(&cntr_lock, flags);
}
-static void cell_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
+static void cell_handle_interrupt_ppu(struct pt_regs *regs,
+ struct op_counter_config *ctr)
{
u32 cpu;
u64 pc;
@@ -1125,7 +1618,7 @@ static void cell_handle_interrupt(struct pt_regs *regs,
* routine are not running at the same time. See the
* cell_virtual_cntr() routine for additional comments.
*/
- spin_lock_irqsave(&virt_cntr_lock, flags);
+ spin_lock_irqsave(&cntr_lock, flags);
/*
* Need to disable and reenable the performance counters
@@ -1160,7 +1653,7 @@ static void cell_handle_interrupt(struct pt_regs *regs,
* The counters were frozen by the interrupt.
* Reenable the interrupt and restart the counters.
* If there was a race between the interrupt handler and
- * the virtual counter routine. The virutal counter
+ * the virtual counter routine. The virtual counter
* routine may have cleared the interrupts. Hence must
* use the virt_cntr_inter_mask to re-enable the interrupts.
*/
@@ -1172,13 +1665,22 @@ static void cell_handle_interrupt(struct pt_regs *regs,
* to a latch. The new values (interrupt setting bits, reset
* counter value etc.) are not copied to the actual registers
* until the performance monitor is enabled. In order to get
- * this to work as desired, the permormance monitor needs to
+ * this to work as desired, the performance monitor needs to
* be disabled while writing to the latches. This is a
* HW design issue.
*/
cbe_enable_pm(cpu);
}
- spin_unlock_irqrestore(&virt_cntr_lock, flags);
+ spin_unlock_irqrestore(&cntr_lock, flags);
+}
+
+static void cell_handle_interrupt(struct pt_regs *regs,
+ struct op_counter_config *ctr)
+{
+ if (profiling_mode == PPU_PROFILING)
+ cell_handle_interrupt_ppu(regs, ctr);
+ else
+ cell_handle_interrupt_spu(regs, ctr);
}
/*
@@ -1188,7 +1690,8 @@ static void cell_handle_interrupt(struct pt_regs *regs,
*/
static int cell_sync_start(void)
{
- if (spu_cycle_reset)
+ if ((profiling_mode == SPU_PROFILING_CYCLES) ||
+ (profiling_mode == SPU_PROFILING_EVENTS))
return spu_sync_start();
else
return DO_GENERIC_SYNC;
@@ -1196,7 +1699,8 @@ static int cell_sync_start(void)
static int cell_sync_stop(void)
{
- if (spu_cycle_reset)
+ if ((profiling_mode == SPU_PROFILING_CYCLES) ||
+ (profiling_mode == SPU_PROFILING_EVENTS))
return spu_sync_stop();
else
return 1;
diff --git a/arch/powerpc/oprofile/op_model_fsl_emb.c b/arch/powerpc/oprofile/op_model_fsl_emb.c
index 91596f6ba1f..14cf86fddda 100644
--- a/arch/powerpc/oprofile/op_model_fsl_emb.c
+++ b/arch/powerpc/oprofile/op_model_fsl_emb.c
@@ -2,7 +2,7 @@
* Freescale Embedded oprofile support, based on ppc64 oprofile support
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
*
- * Copyright (c) 2004 Freescale Semiconductor, Inc
+ * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
*
* Author: Andy Fleming
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
@@ -14,10 +14,8 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
-#include <asm/system.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/reg_fsl_emb.h>
@@ -47,6 +45,12 @@ static inline u32 get_pmlca(int ctr)
case 3:
pmlca = mfpmr(PMRN_PMLCA3);
break;
+ case 4:
+ pmlca = mfpmr(PMRN_PMLCA4);
+ break;
+ case 5:
+ pmlca = mfpmr(PMRN_PMLCA5);
+ break;
default:
panic("Bad ctr number\n");
}
@@ -69,6 +73,12 @@ static inline void set_pmlca(int ctr, u32 pmlca)
case 3:
mtpmr(PMRN_PMLCA3, pmlca);
break;
+ case 4:
+ mtpmr(PMRN_PMLCA4, pmlca);
+ break;
+ case 5:
+ mtpmr(PMRN_PMLCA5, pmlca);
+ break;
default:
panic("Bad ctr number\n");
}
@@ -85,6 +95,10 @@ static inline unsigned int ctr_read(unsigned int i)
return mfpmr(PMRN_PMC2);
case 3:
return mfpmr(PMRN_PMC3);
+ case 4:
+ return mfpmr(PMRN_PMC4);
+ case 5:
+ return mfpmr(PMRN_PMC5);
default:
return 0;
}
@@ -105,6 +119,12 @@ static inline void ctr_write(unsigned int i, unsigned int val)
case 3:
mtpmr(PMRN_PMC3, val);
break;
+ case 4:
+ mtpmr(PMRN_PMC4, val);
+ break;
+ case 5:
+ mtpmr(PMRN_PMC5, val);
+ break;
default:
break;
}
@@ -134,6 +154,14 @@ static void init_pmc_stop(int ctr)
mtpmr(PMRN_PMLCA3, pmlca);
mtpmr(PMRN_PMLCB3, pmlcb);
break;
+ case 4:
+ mtpmr(PMRN_PMLCA4, pmlca);
+ mtpmr(PMRN_PMLCB4, pmlcb);
+ break;
+ case 5:
+ mtpmr(PMRN_PMLCA5, pmlca);
+ mtpmr(PMRN_PMLCB5, pmlcb);
+ break;
default:
panic("Bad ctr number!\n");
}
@@ -228,20 +256,6 @@ static void pmc_stop_ctrs(void)
mtpmr(PMRN_PMGC0, pmgc0);
}
-static void dump_pmcs(void)
-{
- printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0));
- printk("pmc\t\tpmlca\t\tpmlcb\n");
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0),
- mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0));
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1),
- mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1));
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2),
- mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2));
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3),
- mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3));
-}
-
static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
{
int i;
@@ -335,9 +349,6 @@ static void fsl_emb_handle_interrupt(struct pt_regs *regs,
int val;
int i;
- /* set the PMM bit (see comment below) */
- mtmsr(mfmsr() | MSR_PMM);
-
pc = regs->nip;
is_kernel = is_kernel_addr(pc);
@@ -354,9 +365,13 @@ static void fsl_emb_handle_interrupt(struct pt_regs *regs,
}
/* The freeze bit was set by the interrupt. */
- /* Clear the freeze bit, and reenable the interrupt.
- * The counters won't actually start until the rfi clears
- * the PMM bit */
+ /* Clear the freeze bit, and reenable the interrupt. The
+ * counters won't actually start until the rfi clears the PMM
+ * bit. The PMM bit should not be set until after the interrupt
+ * is cleared to avoid it getting lost in some hypervisor
+ * environments.
+ */
+ mtmsr(mfmsr() | MSR_PMM);
pmc_start_ctrs(1);
}
diff --git a/arch/powerpc/oprofile/op_model_pa6t.c b/arch/powerpc/oprofile/op_model_pa6t.c
index c40de461fd4..a114a7c22d4 100644
--- a/arch/powerpc/oprofile/op_model_pa6t.c
+++ b/arch/powerpc/oprofile/op_model_pa6t.c
@@ -22,7 +22,6 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/processor.h>
@@ -132,7 +131,7 @@ static int pa6t_reg_setup(struct op_counter_config *ctr,
for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) {
/* counters are 40 bit. Move to cputable at some point? */
reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count;
- pr_debug("reset_value for pmc%u inited to 0x%lx\n",
+ pr_debug("reset_value for pmc%u inited to 0x%llx\n",
pmc, reset_value[pmc]);
}
@@ -177,7 +176,7 @@ static int pa6t_start(struct op_counter_config *ctr)
oprofile_running = 1;
- pr_debug("start on cpu %d, mmcr0 %lx\n", smp_processor_id(), mmcr0);
+ pr_debug("start on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
return 0;
}
@@ -193,7 +192,7 @@ static void pa6t_stop(void)
oprofile_running = 0;
- pr_debug("stop on cpu %d, mmcr0 %lx\n", smp_processor_id(), mmcr0);
+ pr_debug("stop on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
}
/* handle the perfmon overflow vector */
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 446a8bbb847..962fe7b3e3f 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -10,11 +10,9 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
-#include <asm/system.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/rtas.h>
@@ -22,15 +20,78 @@
#include <asm/reg.h>
#define dbg(args...)
+#define OPROFILE_PM_PMCSEL_MSK 0xffULL
+#define OPROFILE_PM_UNIT_SHIFT 60
+#define OPROFILE_PM_UNIT_MSK 0xfULL
+#define OPROFILE_MAX_PMC_NUM 3
+#define OPROFILE_PMSEL_FIELD_WIDTH 8
+#define OPROFILE_UNIT_FIELD_WIDTH 4
+#define MMCRA_SIAR_VALID_MASK 0x10000000ULL
static unsigned long reset_value[OP_MAX_COUNTER];
static int oprofile_running;
+static int use_slot_nums;
/* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
static u32 mmcr0_val;
static u64 mmcr1_val;
static u64 mmcra_val;
+static u32 cntr_marked_events;
+
+static int power7_marked_instr_event(u64 mmcr1)
+{
+ u64 psel, unit;
+ int pmc, cntr_marked_events = 0;
+
+ /* Given the MMCR1 value, look at the field for each counter to
+ * determine if it is a marked event. Code based on the function
+ * power7_marked_instr_event() in file arch/powerpc/perf/power7-pmu.c.
+ */
+ for (pmc = 0; pmc < 4; pmc++) {
+ psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK
+ << (OPROFILE_MAX_PMC_NUM - pmc)
+ * OPROFILE_PMSEL_FIELD_WIDTH);
+ psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc)
+ * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL;
+ unit = mmcr1 & (OPROFILE_PM_UNIT_MSK
+ << (OPROFILE_PM_UNIT_SHIFT
+ - (pmc * OPROFILE_PMSEL_FIELD_WIDTH )));
+ unit = unit >> (OPROFILE_PM_UNIT_SHIFT
+ - (pmc * OPROFILE_PMSEL_FIELD_WIDTH));
+
+ switch (psel >> 4) {
+ case 2:
+ cntr_marked_events |= (pmc == 1 || pmc == 3) << pmc;
+ break;
+ case 3:
+ if (psel == 0x3c) {
+ cntr_marked_events |= (pmc == 0) << pmc;
+ break;
+ }
+
+ if (psel == 0x3e) {
+ cntr_marked_events |= (pmc != 1) << pmc;
+ break;
+ }
+
+ cntr_marked_events |= 1 << pmc;
+ break;
+ case 4:
+ case 5:
+ cntr_marked_events |= (unit == 0xd) << pmc;
+ break;
+ case 6:
+ if (psel == 0x64)
+ cntr_marked_events |= (pmc >= 2) << pmc;
+ break;
+ case 8:
+ cntr_marked_events |= (unit == 0xd) << pmc;
+ break;
+ }
+ }
+ return cntr_marked_events;
+}
static int power4_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
@@ -47,6 +108,23 @@ static int power4_reg_setup(struct op_counter_config *ctr,
mmcr1_val = sys->mmcr1;
mmcra_val = sys->mmcra;
+ /* Power 7+ and newer architectures:
+ * Determine which counter events in the group (the group of events is
+ * specified by the bit settings in the MMCR1 register) are marked
+ * events for use in the interrupt handler. Do the calculation once
+ * before OProfile starts. Information is used in the interrupt
+ * handler. Starting with Power 7+ we only record the sample for
+ * marked events if the SIAR valid bit is set. For non marked events
+ * the sample is always recorded.
+ */
+ if (pvr_version_is(PVR_POWER7p))
+ cntr_marked_events = power7_marked_instr_event(mmcr1_val);
+ else
+ cntr_marked_events = 0; /* For older processors, set the bit map
+ * to zero so the sample will always be
+ * be recorded.
+ */
+
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count;
@@ -61,10 +139,16 @@ static int power4_reg_setup(struct op_counter_config *ctr,
else
mmcr0_val |= MMCR0_PROBLEM_DISABLE;
+ if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
+ pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
+ pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX) ||
+ pvr_version_is(PVR_POWER5) || pvr_version_is(PVR_POWER5p))
+ use_slot_nums = 1;
+
return 0;
}
-extern void ppc64_enable_pmcs(void);
+extern void ppc_enable_pmcs(void);
/*
* Older CPUs require the MMCRA sample bit to be always set, but newer
@@ -78,9 +162,9 @@ extern void ppc64_enable_pmcs(void);
*/
static inline int mmcra_must_set_sample(void)
{
- if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) ||
- __is_processor(PV_970) || __is_processor(PV_970FX) ||
- __is_processor(PV_970MP) || __is_processor(PV_970GX))
+ if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
+ pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
+ pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX))
return 1;
return 0;
@@ -91,7 +175,7 @@ static int power4_cpu_setup(struct op_counter_config *ctr)
unsigned int mmcr0 = mmcr0_val;
unsigned long mmcra = mmcra_val;
- ppc64_enable_pmcs();
+ ppc_enable_pmcs();
/* set the freeze bit */
mmcr0 |= MMCR0_FC;
@@ -200,13 +284,13 @@ static unsigned long get_pc(struct pt_regs *regs)
unsigned long mmcra;
unsigned long slot;
- /* Cant do much about it */
+ /* Can't do much about it */
if (!cur_cpu_spec->oprofile_mmcra_sihv)
return pc;
mmcra = mfspr(SPRN_MMCRA);
- if (mmcra & MMCRA_SAMPLE_ENABLE) {
+ if (use_slot_nums && (mmcra & MMCRA_SAMPLE_ENABLE)) {
slot = ((mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT);
if (slot > 1)
pc += 4 * (slot - 1);
@@ -254,6 +338,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra)
return is_kernel;
}
+static bool pmc_overflow(unsigned long val)
+{
+ if ((int)val < 0)
+ return true;
+
+ /*
+ * Events on POWER7 can roll back if a speculative event doesn't
+ * eventually complete. Unfortunately in some rare cases they will
+ * raise a performance monitor exception. We need to catch this to
+ * ensure we reset the PMC. In all cases the PMC will be 256 or less
+ * cycles from overflow.
+ *
+ * We only do this if the first pass fails to find any overflowing
+ * PMCs because a user might set a period of less than 256 and we
+ * don't want to mistakenly reset them.
+ */
+ if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256))
+ return true;
+
+ return false;
+}
+
static void power4_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
@@ -263,6 +369,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
int i;
unsigned int mmcr0;
unsigned long mmcra;
+ bool siar_valid = false;
mmcra = mfspr(SPRN_MMCRA);
@@ -272,11 +379,29 @@ static void power4_handle_interrupt(struct pt_regs *regs,
/* set the PMM bit (see comment below) */
mtmsrd(mfmsr() | MSR_PMM);
+ /* Check that the SIAR valid bit in MMCRA is set to 1. */
+ if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK)
+ siar_valid = true;
+
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
val = classic_ctr_read(i);
- if (val < 0) {
+ if (pmc_overflow(val)) {
if (oprofile_running && ctr[i].enabled) {
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
+ /* Power 7+ and newer architectures:
+ * If the event is a marked event, then only
+ * save the sample if the SIAR valid bit is
+ * set. If the event is not marked, then
+ * always save the sample.
+ * Note, the Sample enable bit in the MMCRA
+ * register must be set to 1 if the group
+ * contains a marked event.
+ */
+ if ((siar_valid &&
+ (cntr_marked_events & (1 << i)))
+ || !(cntr_marked_events & (1 << i)))
+ oprofile_add_ext_sample(pc, regs, i,
+ is_kernel);
+
classic_ctr_write(i, reset_value[i]);
} else {
classic_ctr_write(i, 0);
diff --git a/arch/powerpc/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c
index a20afe45d93..7e5b8ed3a1b 100644
--- a/arch/powerpc/oprofile/op_model_rs64.c
+++ b/arch/powerpc/oprofile/op_model_rs64.c
@@ -8,10 +8,8 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
-#include <asm/system.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/oprofile_impl.h>