diff options
Diffstat (limited to 'arch/mips/oprofile/op_model_mipsxx.c')
| -rw-r--r-- | arch/mips/oprofile/op_model_mipsxx.c | 108 | 
1 files changed, 86 insertions, 22 deletions
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index 54759f1669d..42821ae2d77 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c @@ -14,25 +14,39 @@  #include "op_impl.h" -#define M_PERFCTL_EXL			(1UL      <<  0) -#define M_PERFCTL_KERNEL		(1UL      <<  1) -#define M_PERFCTL_SUPERVISOR		(1UL      <<  2) -#define M_PERFCTL_USER			(1UL      <<  3) -#define M_PERFCTL_INTERRUPT_ENABLE	(1UL      <<  4) +#define M_PERFCTL_EXL			(1UL	  <<  0) +#define M_PERFCTL_KERNEL		(1UL	  <<  1) +#define M_PERFCTL_SUPERVISOR		(1UL	  <<  2) +#define M_PERFCTL_USER			(1UL	  <<  3) +#define M_PERFCTL_INTERRUPT_ENABLE	(1UL	  <<  4)  #define M_PERFCTL_EVENT(event)		(((event) & 0x3ff)  << 5) -#define M_PERFCTL_VPEID(vpe)		((vpe)    << 16) +#define M_PERFCTL_VPEID(vpe)		((vpe)	  << 16)  #define M_PERFCTL_MT_EN(filter)		((filter) << 20) -#define    M_TC_EN_ALL			M_PERFCTL_MT_EN(0) -#define    M_TC_EN_VPE			M_PERFCTL_MT_EN(1) -#define    M_TC_EN_TC			M_PERFCTL_MT_EN(2) -#define M_PERFCTL_TCID(tcid)		((tcid)   << 22) -#define M_PERFCTL_WIDE			(1UL      << 30) -#define M_PERFCTL_MORE			(1UL      << 31) +#define	   M_TC_EN_ALL			M_PERFCTL_MT_EN(0) +#define	   M_TC_EN_VPE			M_PERFCTL_MT_EN(1) +#define	   M_TC_EN_TC			M_PERFCTL_MT_EN(2) +#define M_PERFCTL_TCID(tcid)		((tcid)	  << 22) +#define M_PERFCTL_WIDE			(1UL	  << 30) +#define M_PERFCTL_MORE			(1UL	  << 31) -#define M_COUNTER_OVERFLOW		(1UL      << 31) +#define M_COUNTER_OVERFLOW		(1UL	  << 31) + +/* Netlogic XLR specific, count events in all threads in a core */ +#define M_PERFCTL_COUNT_ALL_THREADS	(1UL	  << 13)  static int (*save_perf_irq)(void); +/* + * XLR has only one set of counters per core. Designate the + * first hardware thread in the core for setup and init. + * Skip CPUs with non-zero hardware thread id (4 hwt per core) + */ +#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP) +#define oprofile_skip_cpu(c)	((cpu_logical_map(c) & 0x3) != 0) +#else +#define oprofile_skip_cpu(c)	0 +#endif +  #ifdef CONFIG_MIPS_MT_SMP  static int cpu_has_mipsmt_pertccounters;  #define WHAT		(M_TC_EN_VPE | \ @@ -129,7 +143,7 @@ static struct mipsxx_register_config {  	unsigned int counter[4];  } reg; -/* Compute all of the registers in preparation for enabling profiling.  */ +/* Compute all of the registers in preparation for enabling profiling.	*/  static void mipsxx_reg_setup(struct op_counter_config *ctr)  { @@ -145,23 +159,28 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)  			continue;  		reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) | -		                 M_PERFCTL_INTERRUPT_ENABLE; +				 M_PERFCTL_INTERRUPT_ENABLE;  		if (ctr[i].kernel)  			reg.control[i] |= M_PERFCTL_KERNEL;  		if (ctr[i].user)  			reg.control[i] |= M_PERFCTL_USER;  		if (ctr[i].exl)  			reg.control[i] |= M_PERFCTL_EXL; +		if (boot_cpu_type() == CPU_XLR) +			reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;  		reg.counter[i] = 0x80000000 - ctr[i].count;  	}  } -/* Program all of the registers in preparation for enabling profiling.  */ +/* Program all of the registers in preparation for enabling profiling.	*/  static void mipsxx_cpu_setup(void *args)  {  	unsigned int counters = op_model_mipsxx_ops.num_counters; +	if (oprofile_skip_cpu(smp_processor_id())) +		return; +  	switch (counters) {  	case 4:  		w_c0_perfctrl3(0); @@ -183,6 +202,9 @@ static void mipsxx_cpu_start(void *args)  {  	unsigned int counters = op_model_mipsxx_ops.num_counters; +	if (oprofile_skip_cpu(smp_processor_id())) +		return; +  	switch (counters) {  	case 4:  		w_c0_perfctrl3(WHAT | reg.control[3]); @@ -200,6 +222,9 @@ static void mipsxx_cpu_stop(void *args)  {  	unsigned int counters = op_model_mipsxx_ops.num_counters; +	if (oprofile_skip_cpu(smp_processor_id())) +		return; +  	switch (counters) {  	case 4:  		w_c0_perfctrl3(0); @@ -298,6 +323,11 @@ static void reset_counters(void *arg)  	}  } +static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id) +{ +	return mipsxx_perfcount_handler(); +} +  static int __init mipsxx_init(void)  {  	int counters; @@ -317,6 +347,14 @@ static int __init mipsxx_init(void)  	op_model_mipsxx_ops.num_counters = counters;  	switch (current_cpu_type()) { +	case CPU_M14KC: +		op_model_mipsxx_ops.cpu_type = "mips/M14Kc"; +		break; + +	case CPU_M14KEC: +		op_model_mipsxx_ops.cpu_type = "mips/M14KEc"; +		break; +  	case CPU_20KC:  		op_model_mipsxx_ops.cpu_type = "mips/20K";  		break; @@ -330,20 +368,31 @@ static int __init mipsxx_init(void)  		break;  	case CPU_1004K: -#if 0 -		/* FIXME: report as 34K for now */ -		op_model_mipsxx_ops.cpu_type = "mips/1004K"; -		break; -#endif -  	case CPU_34K:  		op_model_mipsxx_ops.cpu_type = "mips/34K";  		break; +	case CPU_1074K:  	case CPU_74K:  		op_model_mipsxx_ops.cpu_type = "mips/74K";  		break; +	case CPU_INTERAPTIV: +		op_model_mipsxx_ops.cpu_type = "mips/interAptiv"; +		break; + +	case CPU_PROAPTIV: +		op_model_mipsxx_ops.cpu_type = "mips/proAptiv"; +		break; + +	case CPU_P5600: +		op_model_mipsxx_ops.cpu_type = "mips/P5600"; +		break; + +	case CPU_M5150: +		op_model_mipsxx_ops.cpu_type = "mips/M5150"; +		break; +  	case CPU_5KC:  		op_model_mipsxx_ops.cpu_type = "mips/5K";  		break; @@ -365,6 +414,14 @@ static int __init mipsxx_init(void)  		op_model_mipsxx_ops.cpu_type = "mips/sb1";  		break; +	case CPU_LOONGSON1: +		op_model_mipsxx_ops.cpu_type = "mips/loongson1"; +		break; + +	case CPU_XLR: +		op_model_mipsxx_ops.cpu_type = "mips/xlr"; +		break; +  	default:  		printk(KERN_ERR "Profiling unsupported for this CPU\n"); @@ -374,6 +431,10 @@ static int __init mipsxx_init(void)  	save_perf_irq = perf_irq;  	perf_irq = mipsxx_perfcount_handler; +	if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq)) +		return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int, +			0, "Perfcounter", save_perf_irq); +  	return 0;  } @@ -381,6 +442,9 @@ static void mipsxx_exit(void)  {  	int counters = op_model_mipsxx_ops.num_counters; +	if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq)) +		free_irq(cp0_perfcount_irq, save_perf_irq); +  	counters = counters_per_cpu_to_total(counters);  	on_each_cpu(reset_counters, (void *)(long)counters, 1);  | 
