diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 07:41:01 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 07:41:01 -0700 |
commit | e0972916e8fe943f342b0dd1c9d43dbf5bc261c2 (patch) | |
tree | 690c436f1f9b839c4ba34d17ab3efa63b97a2dce /arch/x86/kernel/cpu/perf_event_amd.c | |
parent | 1f889ec62c3f0d8913f3c32f9aff2a1e15099346 (diff) | |
parent | 5ac2b5c2721501a8f5c5e1cd4116cbc31ace6886 (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"Features:
- Add "uretprobes" - an optimization to uprobes, like kretprobes are
an optimization to kprobes. "perf probe -x file sym%return" now
works like kretprobes. By Oleg Nesterov.
- Introduce per core aggregation in 'perf stat', from Stephane
Eranian.
- Add memory profiling via PEBS, from Stephane Eranian.
- Event group view for 'annotate' in --stdio, --tui and --gtk, from
Namhyung Kim.
- Add support for AMD NB and L2I "uncore" counters, by Jacob Shin.
- Add Ivy Bridge-EP uncore support, by Zheng Yan
- IBM zEnterprise EC12 oprofile support patchlet from Robert Richter.
- Add perf test entries for checking breakpoint overflow signal
handler issues, from Jiri Olsa.
- Add perf test entry for for checking number of EXIT events, from
Namhyung Kim.
- Add perf test entries for checking --cpu in record and stat, from
Jiri Olsa.
- Introduce perf stat --repeat forever, from Frederik Deweerdt.
- Add --no-demangle to report/top, from Namhyung Kim.
- PowerPC fixes plus a couple of cleanups/optimizations in uprobes
and trace_uprobes, by Oleg Nesterov.
Various fixes and refactorings:
- Fix dependency of the python binding wrt libtraceevent, from
Naohiro Aota.
- Simplify some perf_evlist methods and to allow 'stat' to share code
with 'record' and 'trace', by Arnaldo Carvalho de Melo.
- Remove dead code in related to libtraceevent integration, from
Namhyung Kim.
- Revert "perf sched: Handle PERF_RECORD_EXIT events" to get 'perf
sched lat' back working, by Arnaldo Carvalho de Melo
- We don't use Newt anymore, just plain libslang, by Arnaldo Carvalho
de Melo.
- Kill a bunch of die() calls, from Namhyung Kim.
- Fix build on non-glibc systems due to libio.h absence, from Cody P
Schafer.
- Remove some perf_session and tracing dead code, from David Ahern.
- Honor parallel jobs, fix from Borislav Petkov
- Introduce tools/lib/lk library, initially just removing duplication
among tools/perf and tools/vm. from Borislav Petkov
... and many more I missed to list, see the shortlog and git log for
more details."
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (136 commits)
perf/x86/intel/P4: Robistify P4 PMU types
perf/x86/amd: Fix AMD NB and L2I "uncore" support
perf/x86/amd: Remove old-style NB counter support from perf_event_amd.c
perf/x86: Check all MSRs before passing hw check
perf/x86/amd: Add support for AMD NB and L2I "uncore" counters
perf/x86/intel: Add Ivy Bridge-EP uncore support
perf/x86/intel: Fix SNB-EP CBO and PCU uncore PMU filter management
perf/x86: Avoid kfree() in CPU_{STARTING,DYING}
uprobes/perf: Avoid perf_trace_buf_prepare/submit if ->perf_events is empty
uprobes/tracing: Don't pass addr=ip to perf_trace_buf_submit()
uprobes/tracing: Change create_trace_uprobe() to support uretprobes
uprobes/tracing: Make seq_printf() code uretprobe-friendly
uprobes/tracing: Make register_uprobe_event() paths uretprobe-friendly
uprobes/tracing: Make uprobe_{trace,perf}_print() uretprobe-friendly
uprobes/tracing: Introduce is_ret_probe() and uretprobe_dispatcher()
uprobes/tracing: Introduce uprobe_{trace,perf}_print() helpers
uprobes/tracing: Generalize struct uprobe_trace_entry_head
uprobes/tracing: Kill the pointless local_save_flags/preempt_count calls
uprobes/tracing: Kill the pointless seq_print_ip_sym() call
uprobes/tracing: Kill the pointless task_pt_regs() calls
...
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_amd.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 138 |
1 files changed, 5 insertions, 133 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index dfdab42aed2..7e28d9467bb 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -132,14 +132,11 @@ static u64 amd_pmu_event_map(int hw_event) return amd_perfmon_event_map[hw_event]; } -static struct event_constraint *amd_nb_event_constraint; - /* * Previously calculated offsets */ static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; -static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly; /* * Legacy CPUs: @@ -147,14 +144,10 @@ static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly; * * CPUs with core performance counter extensions: * 6 counters starting at 0xc0010200 each offset by 2 - * - * CPUs with north bridge performance counter extensions: - * 4 additional counters starting at 0xc0010240 each offset by 2 - * (indexed right above either one of the above core counters) */ static inline int amd_pmu_addr_offset(int index, bool eventsel) { - int offset, first, base; + int offset; if (!index) return index; @@ -167,23 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel) if (offset) return offset; - if (amd_nb_event_constraint && - test_bit(index, amd_nb_event_constraint->idxmsk)) { - /* - * calculate the offset of NB counters with respect to - * base eventsel or perfctr - */ - - first = find_first_bit(amd_nb_event_constraint->idxmsk, - X86_PMC_IDX_MAX); - - if (eventsel) - base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel; - else - base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr; - - offset = base + ((index - first) << 1); - } else if (!cpu_has_perfctr_core) + if (!cpu_has_perfctr_core) offset = index; else offset = index << 1; @@ -196,36 +173,6 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel) return offset; } -static inline int amd_pmu_rdpmc_index(int index) -{ - int ret, first; - - if (!index) - return index; - - ret = rdpmc_indexes[index]; - - if (ret) - return ret; - - if (amd_nb_event_constraint && - test_bit(index, amd_nb_event_constraint->idxmsk)) { - /* - * according to the mnual, ECX value of the NB counters is - * the index of the NB counter (0, 1, 2 or 3) plus 6 - */ - - first = find_first_bit(amd_nb_event_constraint->idxmsk, - X86_PMC_IDX_MAX); - ret = index - first + 6; - } else - ret = index; - - rdpmc_indexes[index] = ret; - - return ret; -} - static int amd_core_hw_config(struct perf_event *event) { if (event->attr.exclude_host && event->attr.exclude_guest) @@ -245,34 +192,6 @@ static int amd_core_hw_config(struct perf_event *event) } /* - * NB counters do not support the following event select bits: - * Host/Guest only - * Counter mask - * Invert counter mask - * Edge detect - * OS/User mode - */ -static int amd_nb_hw_config(struct perf_event *event) -{ - /* for NB, we only allow system wide counting mode */ - if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) - return -EINVAL; - - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_host || event->attr.exclude_guest) - return -EINVAL; - - event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | - ARCH_PERFMON_EVENTSEL_OS); - - if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB | - ARCH_PERFMON_EVENTSEL_INT)) - return -EINVAL; - - return 0; -} - -/* * AMD64 events are detected based on their event codes. */ static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) @@ -285,11 +204,6 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc) return (hwc->config & 0xe0) == 0xe0; } -static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc) -{ - return amd_nb_event_constraint && amd_is_nb_event(hwc); -} - static inline int amd_has_nb(struct cpu_hw_events *cpuc) { struct amd_nb *nb = cpuc->amd_nb; @@ -315,9 +229,6 @@ static int amd_pmu_hw_config(struct perf_event *event) if (event->attr.type == PERF_TYPE_RAW) event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; - if (amd_is_perfctr_nb_event(&event->hw)) - return amd_nb_hw_config(event); - return amd_core_hw_config(event); } @@ -341,19 +252,6 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, } } -static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc) -{ - int core_id = cpu_data(smp_processor_id()).cpu_core_id; - - /* deliver interrupts only to this core */ - if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) { - hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE; - hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK; - hwc->config |= (u64)(core_id) << - AMD64_EVENTSEL_INT_CORE_SEL_SHIFT; - } -} - /* * AMD64 NorthBridge events need special treatment because * counter access needs to be synchronized across all cores @@ -441,9 +339,6 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev if (new == -1) return &emptyconstraint; - if (amd_is_perfctr_nb_event(hwc)) - amd_nb_interrupt_hw_config(hwc); - return &nb->event_constraints[new]; } @@ -543,8 +438,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) return &unconstrained; - return __amd_get_nb_event_constraints(cpuc, event, - amd_nb_event_constraint); + return __amd_get_nb_event_constraints(cpuc, event, NULL); } static void amd_put_event_constraints(struct cpu_hw_events *cpuc, @@ -643,9 +537,6 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); -static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0); -static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0); - static struct event_constraint * amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) { @@ -711,8 +602,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev return &amd_f15_PMC20; } case AMD_EVENT_NB: - return __amd_get_nb_event_constraints(cpuc, event, - amd_nb_event_constraint); + /* moved to perf_event_amd_uncore.c */ + return &emptyconstraint; default: return &emptyconstraint; } @@ -738,7 +629,6 @@ static __initconst const struct x86_pmu amd_pmu = { .eventsel = MSR_K7_EVNTSEL0, .perfctr = MSR_K7_PERFCTR0, .addr_offset = amd_pmu_addr_offset, - .rdpmc_index = amd_pmu_rdpmc_index, .event_map = amd_pmu_event_map, .max_events = ARRAY_SIZE(amd_perfmon_event_map), .num_counters = AMD64_NUM_COUNTERS, @@ -790,23 +680,6 @@ static int setup_perfctr_core(void) return 0; } -static int setup_perfctr_nb(void) -{ - if (!cpu_has_perfctr_nb) - return -ENODEV; - - x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB; - - if (cpu_has_perfctr_core) - amd_nb_event_constraint = &amd_NBPMC96; - else - amd_nb_event_constraint = &amd_NBPMC74; - - printk(KERN_INFO "perf: AMD northbridge performance counters detected\n"); - - return 0; -} - __init int amd_pmu_init(void) { /* Performance-monitoring supported from K7 and later: */ @@ -817,7 +690,6 @@ __init int amd_pmu_init(void) setup_event_constraints(); setup_perfctr_core(); - setup_perfctr_nb(); /* Events are common for all AMDs */ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, |