diff options
Diffstat (limited to 'arch/s390/kernel/perf_event.c')
| -rw-r--r-- | arch/s390/kernel/perf_event.c | 174 | 
1 files changed, 162 insertions, 12 deletions
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 2343c218b8f..61595c1f0a0 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -1,7 +1,7 @@  /*   * Performance event support for s390x   * - *  Copyright IBM Corp. 2012 + *  Copyright IBM Corp. 2012, 2013   *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>   *   * This program is free software; you can redistribute it and/or modify @@ -16,15 +16,19 @@  #include <linux/kvm_host.h>  #include <linux/percpu.h>  #include <linux/export.h> +#include <linux/seq_file.h> +#include <linux/spinlock.h> +#include <linux/sysfs.h>  #include <asm/irq.h>  #include <asm/cpu_mf.h>  #include <asm/lowcore.h>  #include <asm/processor.h> +#include <asm/sysinfo.h>  const char *perf_pmu_name(void)  {  	if (cpum_cf_avail() || cpum_sf_avail()) -		return "CPU-measurement facilities (CPUMF)"; +		return "CPU-Measurement Facilities (CPU-MF)";  	return "pmu";  }  EXPORT_SYMBOL(perf_pmu_name); @@ -35,6 +39,8 @@ int perf_num_counters(void)  	if (cpum_cf_avail())  		num += PERF_CPUM_CF_MAX_CTR; +	if (cpum_sf_avail()) +		num += PERF_CPUM_SF_MAX_CTR;  	return num;  } @@ -54,7 +60,7 @@ static bool is_in_guest(struct pt_regs *regs)  {  	if (user_mode(regs))  		return false; -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM)  	return instruction_pointer(regs) == (unsigned long) &sie_exit;  #else  	return false; @@ -83,8 +89,31 @@ static unsigned long perf_misc_guest_flags(struct pt_regs *regs)  					: PERF_RECORD_MISC_GUEST_KERNEL;  } +static unsigned long perf_misc_flags_sf(struct pt_regs *regs) +{ +	struct perf_sf_sde_regs *sde_regs; +	unsigned long flags; + +	sde_regs = (struct perf_sf_sde_regs *) ®s->int_parm_long; +	if (sde_regs->in_guest) +		flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER +					: PERF_RECORD_MISC_GUEST_KERNEL; +	else +		flags = user_mode(regs) ? PERF_RECORD_MISC_USER +					: PERF_RECORD_MISC_KERNEL; +	return flags; +} +  unsigned long perf_misc_flags(struct pt_regs *regs)  { +	/* Check if the cpum_sf PMU has created the pt_regs structure. +	 * In this case, perf misc flags can be easily extracted.  Otherwise, +	 * do regular checks on the pt_regs content. +	 */ +	if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA) +		if (!regs->gprs[15]) +			return perf_misc_flags_sf(regs); +  	if (is_in_guest(regs))  		return perf_misc_guest_flags(regs); @@ -92,27 +121,107 @@ unsigned long perf_misc_flags(struct pt_regs *regs)  			       : PERF_RECORD_MISC_KERNEL;  } -void perf_event_print_debug(void) +static void print_debug_cf(void)  {  	struct cpumf_ctr_info cf_info; -	unsigned long flags; -	int cpu; - -	if (!cpum_cf_avail()) -		return; - -	local_irq_save(flags); +	int cpu = smp_processor_id(); -	cpu = smp_processor_id();  	memset(&cf_info, 0, sizeof(cf_info));  	if (!qctri(&cf_info))  		pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",  			cpu, cf_info.cfvn, cf_info.csvn,  			cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl); +} + +static void print_debug_sf(void) +{ +	struct hws_qsi_info_block si; +	int cpu = smp_processor_id(); +	memset(&si, 0, sizeof(si)); +	if (qsi(&si)) +		return; + +	pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n", +		cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate, +		si.cpu_speed); + +	if (si.as) +		pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i" +			" bsdes=%i tear=%016lx dear=%016lx\n", cpu, +			si.as, si.es, si.cs, si.bsdes, si.tear, si.dear); +	if (si.ad) +		pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i" +			" dsdes=%i tear=%016lx dear=%016lx\n", cpu, +			si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear); +} + +void perf_event_print_debug(void) +{ +	unsigned long flags; + +	local_irq_save(flags); +	if (cpum_cf_avail()) +		print_debug_cf(); +	if (cpum_sf_avail()) +		print_debug_sf();  	local_irq_restore(flags);  } +/* Service level infrastructure */ +static void sl_print_counter(struct seq_file *m) +{ +	struct cpumf_ctr_info ci; + +	memset(&ci, 0, sizeof(ci)); +	if (qctri(&ci)) +		return; + +	seq_printf(m, "CPU-MF: Counter facility: version=%u.%u " +		   "authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl); +} + +static void sl_print_sampling(struct seq_file *m) +{ +	struct hws_qsi_info_block si; + +	memset(&si, 0, sizeof(si)); +	if (qsi(&si)) +		return; + +	if (!si.as && !si.ad) +		return; + +	seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu" +		   " cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate, +		   si.cpu_speed); +	if (si.as) +		seq_printf(m, "CPU-MF: Sampling facility: mode=basic" +			   " sample_size=%u\n", si.bsdes); +	if (si.ad) +		seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic" +			   " sample_size=%u\n", si.dsdes); +} + +static void service_level_perf_print(struct seq_file *m, +				     struct service_level *sl) +{ +	if (cpum_cf_avail()) +		sl_print_counter(m); +	if (cpum_sf_avail()) +		sl_print_sampling(m); +} + +static struct service_level service_level_perf = { +	.seq_print = service_level_perf_print, +}; + +static int __init service_level_perf_register(void) +{ +	return register_service_level(&service_level_perf); +} +arch_initcall(service_level_perf_register); +  /* See also arch/s390/kernel/traps.c */  static unsigned long __store_trace(struct perf_callchain_entry *entry,  				   unsigned long sp, @@ -172,3 +281,44 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,  	__store_trace(entry, head, S390_lowcore.thread_info,  		      S390_lowcore.thread_info + THREAD_SIZE);  } + +/* Perf defintions for PMU event attributes in sysfs */ +ssize_t cpumf_events_sysfs_show(struct device *dev, +				struct device_attribute *attr, char *page) +{ +	struct perf_pmu_events_attr *pmu_attr; + +	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); +	return sprintf(page, "event=0x%04llx,name=%s\n", +		       pmu_attr->id, attr->attr.name); +} + +/* Reserve/release functions for sharing perf hardware */ +static DEFINE_SPINLOCK(perf_hw_owner_lock); +static void *perf_sampling_owner; + +int perf_reserve_sampling(void) +{ +	int err; + +	err = 0; +	spin_lock(&perf_hw_owner_lock); +	if (perf_sampling_owner) { +		pr_warn("The sampling facility is already reserved by %p\n", +			perf_sampling_owner); +		err = -EBUSY; +	} else +		perf_sampling_owner = __builtin_return_address(0); +	spin_unlock(&perf_hw_owner_lock); +	return err; +} +EXPORT_SYMBOL(perf_reserve_sampling); + +void perf_release_sampling(void) +{ +	spin_lock(&perf_hw_owner_lock); +	WARN_ON(!perf_sampling_owner); +	perf_sampling_owner = NULL; +	spin_unlock(&perf_hw_owner_lock); +} +EXPORT_SYMBOL(perf_release_sampling);  | 
