diff options
Diffstat (limited to 'arch/powerpc/perf/core-book3s.c')
| -rw-r--r-- | arch/powerpc/perf/core-book3s.c | 770 |
1 files changed, 682 insertions, 88 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index aa2465e21f1..fe52db2eea6 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -13,11 +13,18 @@ #include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/hardirq.h> +#include <linux/uaccess.h> #include <asm/reg.h> #include <asm/pmc.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/ptrace.h> +#include <asm/code-patching.h> + +#define BHRB_MAX_ENTRIES 32 +#define BHRB_TARGET 0x0000000000000002 +#define BHRB_PREDICTION 0x0000000000000001 +#define BHRB_EA 0xFFFFFFFFFFFFFFFCUL struct cpu_hw_events { int n_events; @@ -38,7 +45,15 @@ struct cpu_hw_events { unsigned int group_flag; int n_txn_start; + + /* BHRB bits */ + u64 bhrb_filter; /* BHRB HW branch filter */ + int bhrb_users; + void *bhrb_context; + struct perf_branch_stack bhrb_stack; + struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES]; }; + DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); struct power_pmu *ppmu; @@ -60,6 +75,12 @@ static unsigned int freeze_events_kernel = MMCR0_FCS; #define MMCR0_FCHV 0 #define MMCR0_PMCjCE MMCR0_PMCnCE +#define MMCR0_FC56 0 +#define MMCR0_PMAO 0 +#define MMCR0_EBE 0 +#define MMCR0_BHRBA 0 +#define MMCR0_PMCC 0 +#define MMCR0_PMCC_U6 0 #define SPRN_MMCRA SPRN_MMCR2 #define MMCRA_SAMPLE_ENABLE 0 @@ -87,8 +108,27 @@ static inline int siar_valid(struct pt_regs *regs) return 1; } +static bool is_ebb_event(struct perf_event *event) { return false; } +static int ebb_event_check(struct perf_event *event) { return 0; } +static void ebb_event_add(struct perf_event *event) { } +static void ebb_switch_out(unsigned long mmcr0) { } +static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0) +{ + return mmcr0; +} + +static inline void power_pmu_bhrb_enable(struct perf_event *event) {} +static inline void power_pmu_bhrb_disable(struct perf_event *event) {} +void power_pmu_flush_branch_stack(void) {} +static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} +static void pmao_restore_workaround(bool ebb) { } #endif /* CONFIG_PPC32 */ +static bool regs_use_siar(struct pt_regs *regs) +{ + return !!regs->result; +} + /* * Things that are specific to 64-bit implementations. */ @@ -98,11 +138,12 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { unsigned long mmcra = regs->dsisr; - if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { + if ((ppmu->flags & PPMU_HAS_SSLOT) && (mmcra & MMCRA_SAMPLE_ENABLE)) { unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; if (slot > 1) return 4 * (slot - 1); } + return 0; } @@ -111,43 +152,57 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs) * If we're not doing instruction sampling, give them the SDAR * (sampled data address). If we are doing instruction sampling, then * only give them the SDAR if it corresponds to the instruction - * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or - * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA. + * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the + * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER. */ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { unsigned long mmcra = regs->dsisr; - unsigned long sdsync; + bool sdar_valid; - if (ppmu->flags & PPMU_SIAR_VALID) - sdsync = POWER7P_MMCRA_SDAR_VALID; - else if (ppmu->flags & PPMU_ALT_SIPR) - sdsync = POWER6_MMCRA_SDSYNC; - else - sdsync = MMCRA_SDSYNC; + if (ppmu->flags & PPMU_HAS_SIER) + sdar_valid = regs->dar & SIER_SDAR_VALID; + else { + unsigned long sdsync; - if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) + if (ppmu->flags & PPMU_SIAR_VALID) + sdsync = POWER7P_MMCRA_SDAR_VALID; + else if (ppmu->flags & PPMU_ALT_SIPR) + sdsync = POWER6_MMCRA_SDSYNC; + else + sdsync = MMCRA_SDSYNC; + + sdar_valid = mmcra & sdsync; + } + + if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid) *addrp = mfspr(SPRN_SDAR); } -static bool mmcra_sihv(unsigned long mmcra) +static bool regs_sihv(struct pt_regs *regs) { unsigned long sihv = MMCRA_SIHV; + if (ppmu->flags & PPMU_HAS_SIER) + return !!(regs->dar & SIER_SIHV); + if (ppmu->flags & PPMU_ALT_SIPR) sihv = POWER6_MMCRA_SIHV; - return !!(mmcra & sihv); + return !!(regs->dsisr & sihv); } -static bool mmcra_sipr(unsigned long mmcra) +static bool regs_sipr(struct pt_regs *regs) { unsigned long sipr = MMCRA_SIPR; + if (ppmu->flags & PPMU_HAS_SIER) + return !!(regs->dar & SIER_SIPR); + if (ppmu->flags & PPMU_ALT_SIPR) sipr = POWER6_MMCRA_SIPR; - return !!(mmcra & sipr); + return !!(regs->dsisr & sipr); } static inline u32 perf_flags_from_msr(struct pt_regs *regs) @@ -161,8 +216,7 @@ static inline u32 perf_flags_from_msr(struct pt_regs *regs) static inline u32 perf_get_misc_flags(struct pt_regs *regs) { - unsigned long mmcra = regs->dsisr; - unsigned long use_siar = regs->result; + bool use_siar = regs_use_siar(regs); if (!use_siar) return perf_flags_from_msr(regs); @@ -181,16 +235,19 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs) } /* PR has priority over HV, so order below is important */ - if (mmcra_sipr(mmcra)) + if (regs_sipr(regs)) return PERF_RECORD_MISC_USER; - if (mmcra_sihv(mmcra) && (freeze_events_kernel != MMCR0_FCHV)) + + if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV)) return PERF_RECORD_MISC_HYPERVISOR; + return PERF_RECORD_MISC_KERNEL; } /* * Overload regs->dsisr to store MMCRA so we only need to read it once * on each interrupt. + * Overload regs->dar to store SIER if we have it. * Overload regs->result to specify whether we should use the MSR (result * is zero) or the SIAR (result is non zero). */ @@ -200,6 +257,11 @@ static inline void perf_read_regs(struct pt_regs *regs) int marked = mmcra & MMCRA_SAMPLE_ENABLE; int use_siar; + regs->dsisr = mmcra; + + if (ppmu->flags & PPMU_HAS_SIER) + regs->dar = mfspr(SPRN_SIER); + /* * If this isn't a PMU exception (eg a software event) the SIAR is * not valid. Use pt_regs. @@ -223,12 +285,11 @@ static inline void perf_read_regs(struct pt_regs *regs) use_siar = 1; else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) use_siar = 0; - else if (!(ppmu->flags & PPMU_NO_SIPR) && mmcra_sipr(mmcra)) + else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs)) use_siar = 0; else use_siar = 1; - regs->dsisr = mmcra; regs->result = use_siar; } @@ -253,20 +314,353 @@ static inline int siar_valid(struct pt_regs *regs) unsigned long mmcra = regs->dsisr; int marked = mmcra & MMCRA_SAMPLE_ENABLE; - if ((ppmu->flags & PPMU_SIAR_VALID) && marked) - return mmcra & POWER7P_MMCRA_SIAR_VALID; + if (marked) { + if (ppmu->flags & PPMU_HAS_SIER) + return regs->dar & SIER_SIAR_VALID; + + if (ppmu->flags & PPMU_SIAR_VALID) + return mmcra & POWER7P_MMCRA_SIAR_VALID; + } return 1; } -#endif /* CONFIG_PPC64 */ -static void perf_event_interrupt(struct pt_regs *regs); +/* Reset all possible BHRB entries */ +static void power_pmu_bhrb_reset(void) +{ + asm volatile(PPC_CLRBHRB); +} -void perf_event_print_debug(void) +static void power_pmu_bhrb_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + if (!ppmu->bhrb_nr) + return; + + /* Clear BHRB if we changed task context to avoid data leaks */ + if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { + power_pmu_bhrb_reset(); + cpuhw->bhrb_context = event->ctx; + } + cpuhw->bhrb_users++; +} + +static void power_pmu_bhrb_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + if (!ppmu->bhrb_nr) + return; + + cpuhw->bhrb_users--; + WARN_ON_ONCE(cpuhw->bhrb_users < 0); + + if (!cpuhw->disabled && !cpuhw->bhrb_users) { + /* BHRB cannot be turned off when other + * events are active on the PMU. + */ + + /* avoid stale pointer */ + cpuhw->bhrb_context = NULL; + } +} + +/* Called from ctxsw to prevent one process's branch entries to + * mingle with the other process's entries during context switch. + */ +void power_pmu_flush_branch_stack(void) +{ + if (ppmu->bhrb_nr) + power_pmu_bhrb_reset(); +} +/* Calculate the to address for a branch */ +static __u64 power_pmu_bhrb_to(u64 addr) { + unsigned int instr; + int ret; + __u64 target; + + if (is_kernel_addr(addr)) + return branch_target((unsigned int *)addr); + + /* Userspace: need copy instruction here then translate it */ + pagefault_disable(); + ret = __get_user_inatomic(instr, (unsigned int __user *)addr); + if (ret) { + pagefault_enable(); + return 0; + } + pagefault_enable(); + + target = branch_target(&instr); + if ((!target) || (instr & BRANCH_ABSOLUTE)) + return target; + + /* Translate relative branch target from kernel to user address */ + return target - (unsigned long)&instr + addr; +} + +/* Processing BHRB entries */ +void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) +{ + u64 val; + u64 addr; + int r_index, u_index, pred; + + r_index = 0; + u_index = 0; + while (r_index < ppmu->bhrb_nr) { + /* Assembly read function */ + val = read_bhrb(r_index++); + if (!val) + /* Terminal marker: End of valid BHRB entries */ + break; + else { + addr = val & BHRB_EA; + pred = val & BHRB_PREDICTION; + + if (!addr) + /* invalid entry */ + continue; + + /* Branches are read most recent first (ie. mfbhrb 0 is + * the most recent branch). + * There are two types of valid entries: + * 1) a target entry which is the to address of a + * computed goto like a blr,bctr,btar. The next + * entry read from the bhrb will be branch + * corresponding to this target (ie. the actual + * blr/bctr/btar instruction). + * 2) a from address which is an actual branch. If a + * target entry proceeds this, then this is the + * matching branch for that target. If this is not + * following a target entry, then this is a branch + * where the target is given as an immediate field + * in the instruction (ie. an i or b form branch). + * In this case we need to read the instruction from + * memory to determine the target/to address. + */ + + if (val & BHRB_TARGET) { + /* Target branches use two entries + * (ie. computed gotos/XL form) + */ + cpuhw->bhrb_entries[u_index].to = addr; + cpuhw->bhrb_entries[u_index].mispred = pred; + cpuhw->bhrb_entries[u_index].predicted = ~pred; + + /* Get from address in next entry */ + val = read_bhrb(r_index++); + addr = val & BHRB_EA; + if (val & BHRB_TARGET) { + /* Shouldn't have two targets in a + row.. Reset index and try again */ + r_index--; + addr = 0; + } + cpuhw->bhrb_entries[u_index].from = addr; + } else { + /* Branches to immediate field + (ie I or B form) */ + cpuhw->bhrb_entries[u_index].from = addr; + cpuhw->bhrb_entries[u_index].to = + power_pmu_bhrb_to(addr); + cpuhw->bhrb_entries[u_index].mispred = pred; + cpuhw->bhrb_entries[u_index].predicted = ~pred; + } + u_index++; + + } + } + cpuhw->bhrb_stack.nr = u_index; + return; } +static bool is_ebb_event(struct perf_event *event) +{ + /* + * This could be a per-PMU callback, but we'd rather avoid the cost. We + * check that the PMU supports EBB, meaning those that don't can still + * use bit 63 of the event code for something else if they wish. + */ + return (ppmu->flags & PPMU_ARCH_207S) && + ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); +} + +static int ebb_event_check(struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + + /* Event and group leader must agree on EBB */ + if (is_ebb_event(leader) != is_ebb_event(event)) + return -EINVAL; + + if (is_ebb_event(event)) { + if (!(event->attach_state & PERF_ATTACH_TASK)) + return -EINVAL; + + if (!leader->attr.pinned || !leader->attr.exclusive) + return -EINVAL; + + if (event->attr.freq || + event->attr.inherit || + event->attr.sample_type || + event->attr.sample_period || + event->attr.enable_on_exec) + return -EINVAL; + } + + return 0; +} + +static void ebb_event_add(struct perf_event *event) +{ + if (!is_ebb_event(event) || current->thread.used_ebb) + return; + + /* + * IFF this is the first time we've added an EBB event, set + * PMXE in the user MMCR0 so we can detect when it's cleared by + * userspace. We need this so that we can context switch while + * userspace is in the EBB handler (where PMXE is 0). + */ + current->thread.used_ebb = 1; + current->thread.mmcr0 |= MMCR0_PMXE; +} + +static void ebb_switch_out(unsigned long mmcr0) +{ + if (!(mmcr0 & MMCR0_EBE)) + return; + + current->thread.siar = mfspr(SPRN_SIAR); + current->thread.sier = mfspr(SPRN_SIER); + current->thread.sdar = mfspr(SPRN_SDAR); + current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK; + current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK; +} + +static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0) +{ + if (!ebb) + goto out; + + /* Enable EBB and read/write to all 6 PMCs and BHRB for userspace */ + mmcr0 |= MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC_U6; + + /* + * Add any bits from the user MMCR0, FC or PMAO. This is compatible + * with pmao_restore_workaround() because we may add PMAO but we never + * clear it here. + */ + mmcr0 |= current->thread.mmcr0; + + /* + * Be careful not to set PMXE if userspace had it cleared. This is also + * compatible with pmao_restore_workaround() because it has already + * cleared PMXE and we leave PMAO alone. + */ + if (!(current->thread.mmcr0 & MMCR0_PMXE)) + mmcr0 &= ~MMCR0_PMXE; + + mtspr(SPRN_SIAR, current->thread.siar); + mtspr(SPRN_SIER, current->thread.sier); + mtspr(SPRN_SDAR, current->thread.sdar); + mtspr(SPRN_MMCR2, current->thread.mmcr2); +out: + return mmcr0; +} + +static void pmao_restore_workaround(bool ebb) +{ + unsigned pmcs[6]; + + if (!cpu_has_feature(CPU_FTR_PMAO_BUG)) + return; + + /* + * On POWER8E there is a hardware defect which affects the PMU context + * switch logic, ie. power_pmu_disable/enable(). + * + * When a counter overflows PMXE is cleared and FC/PMAO is set in MMCR0 + * by the hardware. Sometime later the actual PMU exception is + * delivered. + * + * If we context switch, or simply disable/enable, the PMU prior to the + * exception arriving, the exception will be lost when we clear PMAO. + * + * When we reenable the PMU, we will write the saved MMCR0 with PMAO + * set, and this _should_ generate an exception. However because of the + * defect no exception is generated when we write PMAO, and we get + * stuck with no counters counting but no exception delivered. + * + * The workaround is to detect this case and tweak the hardware to + * create another pending PMU exception. + * + * We do that by setting up PMC6 (cycles) for an imminent overflow and + * enabling the PMU. That causes a new exception to be generated in the + * chip, but we don't take it yet because we have interrupts hard + * disabled. We then write back the PMU state as we want it to be seen + * by the exception handler. When we reenable interrupts the exception + * handler will be called and see the correct state. + * + * The logic is the same for EBB, except that the exception is gated by + * us having interrupts hard disabled as well as the fact that we are + * not in userspace. The exception is finally delivered when we return + * to userspace. + */ + + /* Only if PMAO is set and PMAO_SYNC is clear */ + if ((current->thread.mmcr0 & (MMCR0_PMAO | MMCR0_PMAO_SYNC)) != MMCR0_PMAO) + return; + + /* If we're doing EBB, only if BESCR[GE] is set */ + if (ebb && !(current->thread.bescr & BESCR_GE)) + return; + + /* + * We are already soft-disabled in power_pmu_enable(). We need to hard + * enable to actually prevent the PMU exception from firing. + */ + hard_irq_disable(); + + /* + * This is a bit gross, but we know we're on POWER8E and have 6 PMCs. + * Using read/write_pmc() in a for loop adds 12 function calls and + * almost doubles our code size. + */ + pmcs[0] = mfspr(SPRN_PMC1); + pmcs[1] = mfspr(SPRN_PMC2); + pmcs[2] = mfspr(SPRN_PMC3); + pmcs[3] = mfspr(SPRN_PMC4); + pmcs[4] = mfspr(SPRN_PMC5); + pmcs[5] = mfspr(SPRN_PMC6); + + /* Ensure all freeze bits are unset */ + mtspr(SPRN_MMCR2, 0); + + /* Set up PMC6 to overflow in one cycle */ + mtspr(SPRN_PMC6, 0x7FFFFFFE); + + /* Enable exceptions and unfreeze PMC6 */ + mtspr(SPRN_MMCR0, MMCR0_PMXE | MMCR0_PMCjCE | MMCR0_PMAO); + + /* Now we need to refreeze and restore the PMCs */ + mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMAO); + + mtspr(SPRN_PMC1, pmcs[0]); + mtspr(SPRN_PMC2, pmcs[1]); + mtspr(SPRN_PMC3, pmcs[2]); + mtspr(SPRN_PMC4, pmcs[3]); + mtspr(SPRN_PMC5, pmcs[4]); + mtspr(SPRN_PMC6, pmcs[5]); +} +#endif /* CONFIG_PPC64 */ + +static void perf_event_interrupt(struct pt_regs *regs); + /* * Read one performance monitor counter (PMC). */ @@ -345,6 +739,57 @@ static void write_pmc(int idx, unsigned long val) } } +/* Called from sysrq_handle_showregs() */ +void perf_event_print_debug(void) +{ + unsigned long sdar, sier, flags; + u32 pmcs[MAX_HWEVENTS]; + int i; + + if (!ppmu->n_counter) + return; + + local_irq_save(flags); + + pr_info("CPU: %d PMU registers, ppmu = %s n_counters = %d", + smp_processor_id(), ppmu->name, ppmu->n_counter); + + for (i = 0; i < ppmu->n_counter; i++) + pmcs[i] = read_pmc(i + 1); + + for (; i < MAX_HWEVENTS; i++) + pmcs[i] = 0xdeadbeef; + + pr_info("PMC1: %08x PMC2: %08x PMC3: %08x PMC4: %08x\n", + pmcs[0], pmcs[1], pmcs[2], pmcs[3]); + + if (ppmu->n_counter > 4) + pr_info("PMC5: %08x PMC6: %08x PMC7: %08x PMC8: %08x\n", + pmcs[4], pmcs[5], pmcs[6], pmcs[7]); + + pr_info("MMCR0: %016lx MMCR1: %016lx MMCRA: %016lx\n", + mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCRA)); + + sdar = sier = 0; +#ifdef CONFIG_PPC64 + sdar = mfspr(SPRN_SDAR); + + if (ppmu->flags & PPMU_HAS_SIER) + sier = mfspr(SPRN_SIER); + + if (ppmu->flags & PPMU_ARCH_207S) { + pr_info("MMCR2: %016lx EBBHR: %016lx\n", + mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR)); + pr_info("EBBRR: %016lx BESCR: %016lx\n", + mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR)); + } +#endif + pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n", + mfspr(SPRN_SIAR), sdar, sier); + + local_irq_restore(flags); +} + /* * Check if a set of events can all go on the PMU at once. * If they can't, this will look at alternative codes for the events @@ -529,6 +974,13 @@ static void power_pmu_read(struct perf_event *event) if (!event->hw.idx) return; + + if (is_ebb_event(event)) { + val = read_pmc(event->hw.idx); + local64_set(&event->hw.prev_count, val); + return; + } + /* * Performance monitor interrupts come even when interrupts * are soft-disabled, as long as interrupts are hard-enabled. @@ -544,7 +996,22 @@ static void power_pmu_read(struct perf_event *event) } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); local64_add(delta, &event->count); - local64_sub(delta, &event->hw.period_left); + + /* + * A number of places program the PMC with (0x80000000 - period_left). + * We never want period_left to be less than 1 because we will program + * the PMC with a value >= 0x800000000 and an edge detected PMC will + * roll around to 0 before taking an exception. We have seen this + * on POWER8. + * + * To fix this, clamp the minimum value of period_left to 1. + */ + do { + prev = local64_read(&event->hw.period_left); + val = prev - delta; + if (val < 1) + val = 1; + } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); } /* @@ -649,7 +1116,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) static void power_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; - unsigned long flags; + unsigned long flags, mmcr0, val; if (!ppmu) return; @@ -657,9 +1124,6 @@ static void power_pmu_disable(struct pmu *pmu) cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { - cpuhw->disabled = 1; - cpuhw->n_added = 0; - /* * Check if we ever enabled the PMU on this cpu. */ @@ -669,6 +1133,22 @@ static void power_pmu_disable(struct pmu *pmu) } /* + * Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56 + */ + val = mmcr0 = mfspr(SPRN_MMCR0); + val |= MMCR0_FC; + val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO | + MMCR0_FC56); + + /* + * The barrier is to make sure the mtspr has been + * executed and the PMU has frozen the events etc. + * before we return. + */ + write_mmcr0(cpuhw, val); + mb(); + + /* * Disable instruction sampling if it was enabled */ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { @@ -677,15 +1157,12 @@ static void power_pmu_disable(struct pmu *pmu) mb(); } - /* - * Set the 'freeze counters' bit. - * The barrier is to make sure the mtspr has been - * executed and the PMU has frozen the events - * before we return. - */ - write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); - mb(); + cpuhw->disabled = 1; + cpuhw->n_added = 0; + + ebb_switch_out(mmcr0); } + local_irq_restore(flags); } @@ -700,23 +1177,36 @@ static void power_pmu_enable(struct pmu *pmu) struct cpu_hw_events *cpuhw; unsigned long flags; long i; - unsigned long val; + unsigned long val, mmcr0; s64 left; unsigned int hwc_index[MAX_HWEVENTS]; int n_lim; int idx; + bool ebb; if (!ppmu) return; local_irq_save(flags); + cpuhw = &__get_cpu_var(cpu_hw_events); - if (!cpuhw->disabled) { - local_irq_restore(flags); - return; + if (!cpuhw->disabled) + goto out; + + if (cpuhw->n_events == 0) { + ppc_set_pmu_inuse(0); + goto out; } + cpuhw->disabled = 0; /* + * EBB requires an exclusive group and all events must have the EBB + * flag set, or not set, so we can just check a single event. Also we + * know we have at least one event. + */ + ebb = is_ebb_event(cpuhw->event[0]); + + /* * If we didn't change anything, or only removed events, * no need to recalculate MMCR* settings and reset the PMCs. * Just reenable the PMU with the current MMCR* settings @@ -725,8 +1215,6 @@ static void power_pmu_enable(struct pmu *pmu) if (!cpuhw->n_added) { mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); - if (cpuhw->n_events == 0) - ppc_set_pmu_inuse(0); goto out_enable; } @@ -793,25 +1281,42 @@ static void power_pmu_enable(struct pmu *pmu) ++n_lim; continue; } - val = 0; - if (event->hw.sample_period) { - left = local64_read(&event->hw.period_left); - if (left < 0x80000000L) - val = 0x80000000L - left; + + if (ebb) + val = local64_read(&event->hw.prev_count); + else { + val = 0; + if (event->hw.sample_period) { + left = local64_read(&event->hw.period_left); + if (left < 0x80000000L) + val = 0x80000000L - left; + } + local64_set(&event->hw.prev_count, val); } - local64_set(&event->hw.prev_count, val); + event->hw.idx = idx; if (event->hw.state & PERF_HES_STOPPED) val = 0; write_pmc(idx, val); + perf_event_update_userpage(event); } cpuhw->n_limited = n_lim; cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; out_enable: + pmao_restore_workaround(ebb); + + if (ppmu->flags & PPMU_ARCH_207S) + mtspr(SPRN_MMCR2, 0); + + mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); + mb(); - write_mmcr0(cpuhw, cpuhw->mmcr[0]); + if (cpuhw->bhrb_users) + ppmu->config_bhrb(cpuhw->bhrb_filter); + + write_mmcr0(cpuhw, mmcr0); /* * Enable instruction sampling if necessary @@ -822,6 +1327,7 @@ static void power_pmu_enable(struct pmu *pmu) } out: + local_irq_restore(flags); } @@ -880,8 +1386,16 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) cpuhw->events[n0] = event->hw.config; cpuhw->flags[n0] = event->hw.event_base; + /* + * This event may have been disabled/stopped in record_and_restart() + * because we exceeded the ->event_limit. If re-starting the event, + * clear the ->hw.state (STOPPED and UPTODATE flags), so the user + * notification is re-enabled. + */ if (!(ef_flags & PERF_EF_START)) event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + else + event->hw.state = 0; /* * If group events scheduling transaction was started, @@ -898,11 +1412,19 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) event->hw.config = cpuhw->events[n0]; nocheck: + ebb_event_add(event); + ++cpuhw->n_events; ++cpuhw->n_added; ret = 0; out: + if (has_branch_stack(event)) { + power_pmu_bhrb_enable(event); + cpuhw->bhrb_filter = ppmu->bhrb_filter_map( + event->attr.branch_sample_type); + } + perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; @@ -955,6 +1477,9 @@ static void power_pmu_del(struct perf_event *event, int ef_flags) cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); } + if (has_branch_stack(event)) + power_pmu_bhrb_disable(event); + perf_pmu_enable(event->pmu); local_irq_restore(flags); } @@ -1187,9 +1712,11 @@ static int power_pmu_event_init(struct perf_event *event) if (!ppmu) return -ENOENT; - /* does not support taken branch sampling */ - if (has_branch_stack(event)) - return -EOPNOTSUPP; + if (has_branch_stack(event)) { + /* PMU has BHRB enabled */ + if (!(ppmu->flags & PPMU_ARCH_207S)) + return -EOPNOTSUPP; + } switch (event->attr.type) { case PERF_TYPE_HARDWARE: @@ -1250,6 +1777,11 @@ static int power_pmu_event_init(struct perf_event *event) } } + /* Extra checks for EBB */ + err = ebb_event_check(event); + if (err) + return err; + /* * If this is in a group, check if it can go on with all the * other hardware events in the group. We assume the event @@ -1270,6 +1802,15 @@ static int power_pmu_event_init(struct perf_event *event) cpuhw = &get_cpu_var(cpu_hw_events); err = power_check_constraints(cpuhw, events, cflags, n + 1); + + if (has_branch_stack(event)) { + cpuhw->bhrb_filter = ppmu->bhrb_filter_map( + event->attr.branch_sample_type); + + if(cpuhw->bhrb_filter == -1) + return -EOPNOTSUPP; + } + put_cpu_var(cpu_hw_events); if (err) return -EINVAL; @@ -1280,6 +1821,13 @@ static int power_pmu_event_init(struct perf_event *event) local64_set(&event->hw.period_left, event->hw.last_period); /* + * For EBB events we just context switch the PMC value, we don't do any + * of the sample_period logic. We use hw.prev_count for this. + */ + if (is_ebb_event(event)) + local64_set(&event->hw.prev_count, 0); + + /* * See if we need to reserve the PMU. * If no events are currently in use, then we have to take a * mutex to ensure that we don't race with another task doing @@ -1305,6 +1853,16 @@ static int power_pmu_event_idx(struct perf_event *event) return event->hw.idx; } +ssize_t power_events_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + struct pmu power_pmu = { .pmu_enable = power_pmu_enable, .pmu_disable = power_pmu_disable, @@ -1318,9 +1876,9 @@ struct pmu power_pmu = { .cancel_txn = power_pmu_cancel_txn, .commit_txn = power_pmu_commit_txn, .event_idx = power_pmu_event_idx, + .flush_branch_stack = power_pmu_flush_branch_stack, }; - /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled @@ -1349,6 +1907,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val, */ val = 0; left = local64_read(&event->hw.period_left) - delta; + if (delta == 0) + left++; if (period) { if (left <= 0) { left += period; @@ -1377,6 +1937,13 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (event->attr.sample_type & PERF_SAMPLE_ADDR) perf_get_data_addr(regs, &data.addr); + if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { + struct cpu_hw_events *cpuhw; + cpuhw = &__get_cpu_var(cpu_hw_events); + power_pmu_bhrb_read(cpuhw); + data.br_stack = &cpuhw->bhrb_stack; + } + if (perf_event_overflow(event, &data, regs)) power_pmu_stop(event, 0); } @@ -1402,7 +1969,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs) */ unsigned long perf_instruction_pointer(struct pt_regs *regs) { - unsigned long use_siar = regs->result; + bool use_siar = regs_use_siar(regs); if (use_siar && siar_valid(regs)) return mfspr(SPRN_SIAR) + perf_ip_adjust(regs); @@ -1412,11 +1979,8 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return regs->nip; } -static bool pmc_overflow(unsigned long val) +static bool pmc_overflow_power7(unsigned long val) { - if ((int)val < 0) - return true; - /* * Events on POWER7 can roll back if a speculative event doesn't * eventually complete. Unfortunately in some rare cases they will @@ -1428,7 +1992,15 @@ static bool pmc_overflow(unsigned long val) * PMCs because a user might set a period of less than 256 and we * don't want to mistakenly reset them. */ - if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256)) + if ((0x80000000 - val) <= 256) + return true; + + return false; +} + +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) return true; return false; @@ -1439,11 +2011,11 @@ static bool pmc_overflow(unsigned long val) */ static void perf_event_interrupt(struct pt_regs *regs) { - int i; + int i, j; struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct perf_event *event; - unsigned long val; - int found = 0; + unsigned long val[8]; + int found, active; int nmi; if (cpuhw->n_limited) @@ -1458,33 +2030,53 @@ static void perf_event_interrupt(struct pt_regs *regs) else irq_enter(); - for (i = 0; i < cpuhw->n_events; ++i) { - event = cpuhw->event[i]; - if (!event->hw.idx || is_limited_pmc(event->hw.idx)) + /* Read all the PMCs since we'll need them a bunch of times */ + for (i = 0; i < ppmu->n_counter; ++i) + val[i] = read_pmc(i + 1); + + /* Try to find what caused the IRQ */ + found = 0; + for (i = 0; i < ppmu->n_counter; ++i) { + if (!pmc_overflow(val[i])) continue; - val = read_pmc(event->hw.idx); - if ((int)val < 0) { - /* event has overflowed */ - found = 1; - record_and_restart(event, val, regs); + if (is_limited_pmc(i + 1)) + continue; /* these won't generate IRQs */ + /* + * We've found one that's overflowed. For active + * counters we need to log this. For inactive + * counters, we need to reset it anyway + */ + found = 1; + active = 0; + for (j = 0; j < cpuhw->n_events; ++j) { + event = cpuhw->event[j]; + if (event->hw.idx == (i + 1)) { + active = 1; + record_and_restart(event, val[i], regs); + break; + } } + if (!active) + /* reset non active counters that have overflowed */ + write_pmc(i + 1, 0); } - - /* - * In case we didn't find and reset the event that caused - * the interrupt, scan all events and reset any that are - * negative, to avoid getting continual interrupts. - * Any that we processed in the previous loop will not be negative. - */ - if (!found) { - for (i = 0; i < ppmu->n_counter; ++i) { - if (is_limited_pmc(i + 1)) + if (!found && pvr_version_is(PVR_POWER7)) { + /* check active counters for special buggy p7 overflow */ + for (i = 0; i < cpuhw->n_events; ++i) { + event = cpuhw->event[i]; + if (!event->hw.idx || is_limited_pmc(event->hw.idx)) continue; - val = read_pmc(i + 1); - if (pmc_overflow(val)) - write_pmc(i + 1, 0); + if (pmc_overflow_power7(val[event->hw.idx - 1])) { + /* event has overflowed in a buggy way*/ + found = 1; + record_and_restart(event, + val[event->hw.idx - 1], + regs); + } } } + if (!found && !nmi && printk_ratelimit()) + printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); /* * Reset MMCR0 to its normal value. This will set PMXE and @@ -1511,7 +2103,7 @@ static void power_pmu_setup(int cpu) cpuhw->mmcr[0] = MMCR0_FC; } -static int __cpuinit +static int power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; @@ -1528,7 +2120,7 @@ power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu return NOTIFY_OK; } -int __cpuinit register_power_pmu(struct power_pmu *pmu) +int register_power_pmu(struct power_pmu *pmu) { if (ppmu) return -EBUSY; /* something's already registered */ @@ -1537,6 +2129,8 @@ int __cpuinit register_power_pmu(struct power_pmu *pmu) pr_info("%s performance monitor hardware support registered\n", pmu->name); + power_pmu.attr_groups = ppmu->attr_groups; + #ifdef MSR_HV /* * Use FCHV to ignore kernel events if MSR.HV is set. |
