diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.h')
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 42 | 
1 files changed, 31 insertions, 11 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index cc16faae053..8ade93111e0 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -130,9 +130,11 @@ struct cpu_hw_events {  	unsigned long		running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];  	int			enabled; -	int			n_events; -	int			n_added; -	int			n_txn; +	int			n_events; /* the # of events in the below arrays */ +	int			n_added;  /* the # last events in the below arrays; +					     they've never been enabled yet */ +	int			n_txn;    /* the # last events in the below arrays; +					     added in the current transaction */  	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */  	u64			tags[X86_PMC_IDX_MAX];  	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */ @@ -164,6 +166,11 @@ struct cpu_hw_events {  	struct perf_guest_switch_msr	guest_switch_msrs[X86_PMC_IDX_MAX];  	/* +	 * Intel checkpoint mask +	 */ +	u64				intel_cp_status; + +	/*  	 * manage shared (per-core, per-cpu) registers  	 * used on Intel NHM/WSM/SNB  	 */ @@ -257,11 +264,20 @@ struct cpu_hw_events {  	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \  			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) -#define EVENT_CONSTRAINT_END		\ -	EVENT_CONSTRAINT(0, 0, 0) +/* + * We define the end marker as having a weight of -1 + * to enable blacklisting of events using a counter bitmask + * of zero and thus a weight of zero. + * The end marker has a weight that cannot possibly be + * obtained from counting the bits in the bitmask. + */ +#define EVENT_CONSTRAINT_END { .weight = -1 } +/* + * Check for end marker with weight == -1 + */  #define for_each_event_constraint(e, c)	\ -	for ((e) = (c); (e)->weight; (e)++) +	for ((e) = (c); (e)->weight != -1; (e)++)  /*   * Extra registers for specific events. @@ -279,14 +295,16 @@ struct extra_reg {  	u64			config_mask;  	u64			valid_mask;  	int			idx;  /* per_xxx->regs[] reg index */ +	bool			extra_msr_access;  };  #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\ -	.event = (e),		\ -	.msr = (ms),		\ -	.config_mask = (m),	\ -	.valid_mask = (vm),	\ -	.idx = EXTRA_REG_##i,	\ +	.event = (e),			\ +	.msr = (ms),			\ +	.config_mask = (m),		\ +	.valid_mask = (vm),		\ +	.idx = EXTRA_REG_##i,		\ +	.extra_msr_access = true,	\  	}  #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\ @@ -395,6 +413,7 @@ struct x86_pmu {  	/*  	 * sysfs attrs  	 */ +	int		attr_rdpmc_broken;  	int		attr_rdpmc;  	struct attribute **format_attrs;  	struct attribute **event_attrs; @@ -440,6 +459,7 @@ struct x86_pmu {  	int		lbr_nr;			   /* hardware stack size */  	u64		lbr_sel_mask;		   /* LBR_SELECT valid bits */  	const int	*lbr_sel_map;		   /* lbr_select mappings */ +	bool		lbr_double_abort;	   /* duplicated lbr aborts */  	/*  	 * Extra registers for events  | 
