aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc/kernel/perf_event.c
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@redhat.com>2012-12-11 11:18:45 -0200
committerMauro Carvalho Chehab <mchehab@redhat.com>2012-12-11 11:18:45 -0200
commit9374020a78fce13a1cf2edf3d26f6dd7231b5c3d (patch)
tree50c8629e6c6222c5b9681506b52afbde818c5e56 /arch/sparc/kernel/perf_event.c
parentd2a0db1ee01aea154ccc460e45a16857e32c4427 (diff)
parent29594404d7fe73cd80eaa4ee8c43dcc53970c60e (diff)
Merge tag 'v3.7' into v4l_for_linus
Linux 3.7 * tag 'v3.7': (1545 commits) Linux 3.7 Input: matrix-keymap - provide proper module license Revert "revert "Revert "mm: remove __GFP_NO_KSWAPD""" and associated damage ipv4: ip_check_defrag must not modify skb before unsharing Revert "mm: avoid waking kswapd for THP allocations when compaction is deferred or contended" inet_diag: validate port comparison byte code to prevent unsafe reads inet_diag: avoid unsafe and nonsensical prefix matches in inet_diag_bc_run() inet_diag: validate byte code to prevent oops in inet_diag_bc_run() inet_diag: fix oops for IPv4 AF_INET6 TCP SYN-RECV state mm: vmscan: fix inappropriate zone congestion clearing vfs: fix O_DIRECT read past end of block device net: gro: fix possible panic in skb_gro_receive() tcp: bug fix Fast Open client retransmission tmpfs: fix shared mempolicy leak mm: vmscan: do not keep kswapd looping forever due to individual uncompactable zones mm: compaction: validate pfn range passed to isolate_freepages_block mmc: sh-mmcif: avoid oops on spurious interrupts (second try) Revert misapplied "mmc: sh-mmcif: avoid oops on spurious interrupts" mmc: sdhci-s3c: fix missing clock for gpio card-detect lib/Makefile: Fix oid_registry build dependency ...
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
-rw-r--r--arch/sparc/kernel/perf_event.c37
1 files changed, 25 insertions, 12 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index e48651dace1..b5c38faa4ea 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -817,15 +817,17 @@ static u64 nop_for_index(int idx)
static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
{
- u64 val, mask = mask_for_index(idx);
+ u64 enc, val, mask = mask_for_index(idx);
int pcr_index = 0;
if (sparc_pmu->num_pcrs > 1)
pcr_index = idx;
+ enc = perf_event_get_enc(cpuc->events[idx]);
+
val = cpuc->pcr[pcr_index];
val &= ~mask;
- val |= hwc->config;
+ val |= event_encoding(enc, idx);
cpuc->pcr[pcr_index] = val;
pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
@@ -1738,8 +1740,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
{
unsigned long ufp;
- perf_callchain_store(entry, regs->tpc);
-
ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
do {
struct sparc_stackf *usf, sf;
@@ -1760,19 +1760,27 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
{
unsigned long ufp;
- perf_callchain_store(entry, regs->tpc);
-
ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
do {
- struct sparc_stackf32 *usf, sf;
unsigned long pc;
- usf = (struct sparc_stackf32 *) ufp;
- if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
- break;
+ if (thread32_stack_is_64bit(ufp)) {
+ struct sparc_stackf *usf, sf;
- pc = sf.callers_pc;
- ufp = (unsigned long)sf.fp;
+ ufp += STACK_BIAS;
+ usf = (struct sparc_stackf *) ufp;
+ if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
+ break;
+ pc = sf.callers_pc & 0xffffffff;
+ ufp = ((unsigned long) sf.fp) & 0xffffffff;
+ } else {
+ struct sparc_stackf32 *usf, sf;
+ usf = (struct sparc_stackf32 *) ufp;
+ if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
+ break;
+ pc = sf.callers_pc;
+ ufp = (unsigned long)sf.fp;
+ }
perf_callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
@@ -1780,6 +1788,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
+ perf_callchain_store(entry, regs->tpc);
+
+ if (!current->mm)
+ return;
+
flushw_user();
if (test_thread_flag(TIF_32BIT))
perf_callchain_user_32(entry, regs);