aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/oprofile/nmi_int.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/oprofile/nmi_int.c')
-rw-r--r--arch/x86/oprofile/nmi_int.c120
1 files changed, 67 insertions, 53 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index cf9750004a0..379e8bd0dee 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -55,32 +55,21 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
val |= counter_config->extra;
event &= model->event_mask ? model->event_mask : 0xFF;
val |= event & 0xFF;
- val |= (event & 0x0F00) << 24;
+ val |= (u64)(event & 0x0F00) << 24;
return val;
}
-static int profile_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
+static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
{
- struct die_args *args = (struct die_args *)data;
- int ret = NOTIFY_DONE;
-
- switch (val) {
- case DIE_NMI:
- if (ctr_running)
- model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
- else if (!nmi_enabled)
- break;
- else
- model->stop(&__get_cpu_var(cpu_msrs));
- ret = NOTIFY_STOP;
- break;
- default:
- break;
- }
- return ret;
+ if (ctr_running)
+ model->check_ctrs(regs, &__get_cpu_var(cpu_msrs));
+ else if (!nmi_enabled)
+ return NMI_DONE;
+ else
+ model->stop(&__get_cpu_var(cpu_msrs));
+ return NMI_HANDLED;
}
static void nmi_cpu_save_registers(struct op_msrs *msrs)
@@ -112,8 +101,10 @@ static void nmi_cpu_start(void *dummy)
static int nmi_start(void)
{
get_online_cpus();
- on_each_cpu(nmi_cpu_start, NULL, 1);
ctr_running = 1;
+ /* make ctr_running visible to the nmi handler: */
+ smp_mb();
+ on_each_cpu(nmi_cpu_start, NULL, 1);
put_online_cpus();
return 0;
}
@@ -353,20 +344,14 @@ static void nmi_cpu_setup(void *dummy)
int cpu = smp_processor_id();
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
nmi_cpu_save_registers(msrs);
- spin_lock(&oprofilefs_lock);
+ raw_spin_lock(&oprofilefs_lock);
model->setup_ctrs(model, msrs);
nmi_cpu_setup_mux(cpu, msrs);
- spin_unlock(&oprofilefs_lock);
+ raw_spin_unlock(&oprofilefs_lock);
per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
-static struct notifier_block profile_exceptions_nb = {
- .notifier_call = profile_exceptions_notify,
- .next = NULL,
- .priority = NMI_LOCAL_LOW_PRIOR,
-};
-
static void nmi_cpu_restore_registers(struct op_msrs *msrs)
{
struct op_msr *counters = msrs->counters;
@@ -400,8 +385,6 @@ static void nmi_cpu_shutdown(void *dummy)
apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
apic_write(APIC_LVTERR, v);
nmi_cpu_restore_registers(msrs);
- if (model->cpu_down)
- model->cpu_down();
}
static void nmi_cpu_up(void *dummy)
@@ -420,7 +403,7 @@ static void nmi_cpu_down(void *dummy)
nmi_cpu_shutdown(dummy);
}
-static int nmi_create_files(struct super_block *sb, struct dentry *root)
+static int nmi_create_files(struct dentry *root)
{
unsigned int i;
@@ -437,14 +420,14 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
continue;
snprintf(buf, sizeof(buf), "%d", i);
- dir = oprofilefs_mkdir(sb, root, buf);
- oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
- oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
- oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
- oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
- oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
- oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
- oprofilefs_create_ulong(sb, dir, "extra", &counter_config[i].extra);
+ dir = oprofilefs_mkdir(root, buf);
+ oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
+ oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
+ oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
+ oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
+ oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
+ oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
+ oprofilefs_create_ulong(dir, "extra", &counter_config[i].extra);
}
return 0;
@@ -504,17 +487,26 @@ static int nmi_setup(void)
nmi_enabled = 0;
ctr_running = 0;
- barrier();
- err = register_die_notifier(&profile_exceptions_nb);
+ /* make variables visible to the nmi handler: */
+ smp_mb();
+ err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
+ 0, "oprofile");
if (err)
goto fail;
+ cpu_notifier_register_begin();
+
+ /* Use get/put_online_cpus() to protect 'nmi_enabled' */
get_online_cpus();
- register_cpu_notifier(&oprofile_cpu_nb);
- on_each_cpu(nmi_cpu_setup, NULL, 1);
nmi_enabled = 1;
+ /* make nmi_enabled visible to the nmi handler: */
+ smp_mb();
+ on_each_cpu(nmi_cpu_setup, NULL, 1);
+ __register_cpu_notifier(&oprofile_cpu_nb);
put_online_cpus();
+ cpu_notifier_register_done();
+
return 0;
fail:
free_msrs();
@@ -525,14 +517,21 @@ static void nmi_shutdown(void)
{
struct op_msrs *msrs;
+ cpu_notifier_register_begin();
+
+ /* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */
get_online_cpus();
- unregister_cpu_notifier(&oprofile_cpu_nb);
on_each_cpu(nmi_cpu_shutdown, NULL, 1);
nmi_enabled = 0;
ctr_running = 0;
+ __unregister_cpu_notifier(&oprofile_cpu_nb);
put_online_cpus();
- barrier();
- unregister_die_notifier(&profile_exceptions_nb);
+
+ cpu_notifier_register_done();
+
+ /* make variables visible to the nmi handler: */
+ smp_mb();
+ unregister_nmi_handler(NMI_LOCAL, "oprofile");
msrs = &get_cpu_var(cpu_msrs);
model->shutdown(msrs);
free_msrs();
@@ -607,24 +606,36 @@ static int __init p4_init(char **cpu_type)
return 0;
}
-static int force_arch_perfmon;
-static int force_cpu_type(const char *str, struct kernel_param *kp)
+enum __force_cpu_type {
+ reserved = 0, /* do not force */
+ timer,
+ arch_perfmon,
+};
+
+static int force_cpu_type;
+
+static int set_cpu_type(const char *str, struct kernel_param *kp)
{
- if (!strcmp(str, "arch_perfmon")) {
- force_arch_perfmon = 1;
+ if (!strcmp(str, "timer")) {
+ force_cpu_type = timer;
+ printk(KERN_INFO "oprofile: forcing NMI timer mode\n");
+ } else if (!strcmp(str, "arch_perfmon")) {
+ force_cpu_type = arch_perfmon;
printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
+ } else {
+ force_cpu_type = 0;
}
return 0;
}
-module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
+module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
static int __init ppro_init(char **cpu_type)
{
__u8 cpu_model = boot_cpu_data.x86_model;
struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
- if (force_arch_perfmon && cpu_has_arch_perfmon)
+ if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon)
return 0;
/*
@@ -691,6 +702,9 @@ int __init op_nmi_init(struct oprofile_operations *ops)
if (!cpu_has_apic)
return -ENODEV;
+ if (force_cpu_type == timer)
+ return -ENODEV;
+
switch (vendor) {
case X86_VENDOR_AMD:
/* Needs to be at least an Athlon (or hammer in 32bit mode) */