diff options
Diffstat (limited to 'arch/tile/kernel')
| -rw-r--r-- | arch/tile/kernel/Makefile | 2 | ||||
| -rw-r--r-- | arch/tile/kernel/compat.c | 2 | ||||
| -rw-r--r-- | arch/tile/kernel/compat_signal.c | 2 | ||||
| -rw-r--r-- | arch/tile/kernel/ftrace.c | 4 | ||||
| -rw-r--r-- | arch/tile/kernel/futex_64.S | 55 | ||||
| -rw-r--r-- | arch/tile/kernel/hardwall.c | 6 | ||||
| -rw-r--r-- | arch/tile/kernel/intvec_32.S | 27 | ||||
| -rw-r--r-- | arch/tile/kernel/intvec_64.S | 27 | ||||
| -rw-r--r-- | arch/tile/kernel/irq.c | 50 | ||||
| -rw-r--r-- | arch/tile/kernel/messaging.c | 4 | ||||
| -rw-r--r-- | arch/tile/kernel/pci.c | 9 | ||||
| -rw-r--r-- | arch/tile/kernel/pci_gx.c | 29 | ||||
| -rw-r--r-- | arch/tile/kernel/perf_event.c | 1005 | ||||
| -rw-r--r-- | arch/tile/kernel/pmc.c | 121 | ||||
| -rw-r--r-- | arch/tile/kernel/proc.c | 4 | ||||
| -rw-r--r-- | arch/tile/kernel/setup.c | 15 | ||||
| -rw-r--r-- | arch/tile/kernel/signal.c | 7 | ||||
| -rw-r--r-- | arch/tile/kernel/stack.c | 12 | ||||
| -rw-r--r-- | arch/tile/kernel/time.c | 10 | ||||
| -rw-r--r-- | arch/tile/kernel/traps.c | 5 | ||||
| -rw-r--r-- | arch/tile/kernel/unaligned.c | 19 | ||||
| -rw-r--r-- | arch/tile/kernel/vdso/Makefile | 2 | 
22 files changed, 1236 insertions, 181 deletions
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile index 27a2bf39dae..21f77bf68c6 100644 --- a/arch/tile/kernel/Makefile +++ b/arch/tile/kernel/Makefile @@ -25,6 +25,8 @@ obj-$(CONFIG_PCI)		+= pci_gx.o  else  obj-$(CONFIG_PCI)		+= pci.o  endif +obj-$(CONFIG_PERF_EVENTS)	+= perf_event.o +obj-$(CONFIG_USE_PMC)		+= pmc.o  obj-$(CONFIG_TILE_USB)		+= usb.o  obj-$(CONFIG_TILE_HVGLUE_TRACE)	+= hvglue_trace.o  obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o mcount_64.o diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c index ed378416b86..49120843ff9 100644 --- a/arch/tile/kernel/compat.c +++ b/arch/tile/kernel/compat.c @@ -84,7 +84,7 @@ COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,  {  	return sys_llseek(fd, offset_high, offset_low, result, origin);  } -  +  /* Provide the compat syscall number to call mapping. */  #undef __SYSCALL  #define __SYSCALL(nr, call) [nr] = (call), diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index 85e00b2f39b..19c04b5ce40 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -49,7 +49,7 @@ struct compat_rt_sigframe {  	struct compat_ucontext uc;  }; -int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from) +int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from)  {  	int err; diff --git a/arch/tile/kernel/ftrace.c b/arch/tile/kernel/ftrace.c index f1c452092ee..8d52d83cc51 100644 --- a/arch/tile/kernel/ftrace.c +++ b/arch/tile/kernel/ftrace.c @@ -167,10 +167,8 @@ int ftrace_make_nop(struct module *mod,  	return ret;  } -int __init ftrace_dyn_arch_init(void *data) +int __init ftrace_dyn_arch_init(void)  { -	*(unsigned long *)data = 0; -  	return 0;  }  #endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/tile/kernel/futex_64.S b/arch/tile/kernel/futex_64.S deleted file mode 100644 index f465d1eda20..00000000000 --- a/arch/tile/kernel/futex_64.S +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2011 Tilera Corporation. All Rights Reserved. - * - *   This program is free software; you can redistribute it and/or - *   modify it under the terms of the GNU General Public License - *   as published by the Free Software Foundation, version 2. - * - *   This program is distributed in the hope that it will be useful, but - *   WITHOUT ANY WARRANTY; without even the implied warranty of - *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - *   NON INFRINGEMENT.  See the GNU General Public License for - *   more details. - * - * Atomically access user memory, but use MMU to avoid propagating - * kernel exceptions. - */ - -#include <linux/linkage.h> -#include <asm/errno.h> -#include <asm/futex.h> -#include <asm/page.h> -#include <asm/processor.h> - -/* - * Provide a set of atomic memory operations supporting <asm/futex.h>. - * - * r0: user address to manipulate - * r1: new value to write, or for cmpxchg, old value to compare against - * r2: (cmpxchg only) new value to write - * - * Return __get_user struct, r0 with value, r1 with error. - */ -#define FUTEX_OP(name, ...) \ -STD_ENTRY(futex_##name)			\ -	__VA_ARGS__;			\ -	{				\ -	 move   r1, zero;		\ -	 jrp    lr			\ -	};				\ -	STD_ENDPROC(futex_##name);	\ -	.pushsection __ex_table,"a";	\ -	.quad 1b, get_user_fault;	\ -	.popsection - -	.pushsection .fixup,"ax" -get_user_fault: -	{ movei r1, -EFAULT; jrp lr } -	ENDPROC(get_user_fault) -	.popsection - -FUTEX_OP(cmpxchg, mtspr CMPEXCH_VALUE, r1; 1: cmpexch4 r0, r0, r2) -FUTEX_OP(set, 1: exch4 r0, r0, r1) -FUTEX_OP(add, 1: fetchadd4 r0, r0, r1) -FUTEX_OP(or, 1: fetchor4 r0, r0, r1) -FUTEX_OP(andn, nor r1, r1, zero; 1: fetchand4 r0, r0, r1) diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c index df27a1fd94a..531f4c36535 100644 --- a/arch/tile/kernel/hardwall.c +++ b/arch/tile/kernel/hardwall.c @@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = {  		0,  		"udn",  		LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), -		__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock), +		__SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),  		NULL  	},  #ifndef __tilepro__ @@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = {  		1,  /* disabled pending hypervisor support */  		"idn",  		LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), -		__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock), +		__SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),  		NULL  	},  	{  /* access to user-space IPI */ @@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = {  		0,  		"ipi",  		LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), -		__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock), +		__SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),  		NULL  	},  #endif diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 088d5c141e6..cdbda45a4e4 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -313,13 +313,13 @@ intvec_\vecname:  	 movei  r3, 0  	}  	.else -	.ifc \c_routine, op_handle_perf_interrupt +	.ifc \c_routine, handle_perf_interrupt  	{  	 mfspr  r2, PERF_COUNT_STS  	 movei  r3, -1   /* not used, but set for consistency */  	}  	.else -	.ifc \c_routine, op_handle_aux_perf_interrupt +	.ifc \c_routine, handle_perf_interrupt  	{  	 mfspr  r2, AUX_PERF_COUNT_STS  	 movei  r3, -1   /* not used, but set for consistency */ @@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return)  	}  	bzt     r28, 1f  	bnz     r29, 1f +	/* Disable interrupts explicitly for preemption. */ +	IRQ_DISABLE(r20,r21) +	TRACE_IRQS_OFF  	jal     preempt_schedule_irq  	FEEDBACK_REENTER(interrupt_return)  1: @@ -943,6 +946,13 @@ STD_ENTRY(interrupt_return)  	bzt     r30, .Lrestore_regs  3: +	/* We are relying on INT_PERF_COUNT at 33, and AUX_PERF_COUNT at 48 */ +	{ +	 moveli r0, lo16(1 << (INT_PERF_COUNT - 32)) +	 bz     r31, .Lrestore_regs +	} +	auli    r0, r0, ha16(1 << (INT_AUX_PERF_COUNT - 32)) +	mtspr   SPR_INTERRUPT_MASK_RESET_K_1, r0  	/*  	 * We now commit to returning from this interrupt, since we will be @@ -1168,6 +1178,10 @@ handle_nmi:  	 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)  	}  	FEEDBACK_REENTER(handle_nmi) +	{ +	 movei  r30, 1 +	 seq    r31, r0, zero +	}  	j       interrupt_return  	STD_ENDPROC(handle_nmi) @@ -1832,8 +1846,9 @@ int_unalign:  /* Include .intrpt array of interrupt vectors */  	.section ".intrpt", "ax" -#define op_handle_perf_interrupt bad_intr -#define op_handle_aux_perf_interrupt bad_intr +#ifndef CONFIG_USE_PMC +#define handle_perf_interrupt bad_intr +#endif  #ifndef CONFIG_HARDWALL  #define do_hardwall_trap bad_intr @@ -1874,7 +1889,7 @@ int_unalign:  	int_hand     INT_IDN_AVAIL, IDN_AVAIL, bad_intr  	int_hand     INT_UDN_AVAIL, UDN_AVAIL, bad_intr  	int_hand     INT_PERF_COUNT, PERF_COUNT, \ -		     op_handle_perf_interrupt, handle_nmi +		     handle_perf_interrupt, handle_nmi  	int_hand     INT_INTCTRL_3, INTCTRL_3, bad_intr  #if CONFIG_KERNEL_PL == 2  	dc_dispatch  INT_INTCTRL_2, INTCTRL_2 @@ -1899,7 +1914,7 @@ int_unalign:  	int_hand     INT_SN_CPL, SN_CPL, bad_intr  	int_hand     INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap  	int_hand     INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ -		     op_handle_aux_perf_interrupt, handle_nmi +		     handle_perf_interrupt, handle_nmi  	/* Synthetic interrupt delivered only by the simulator */  	int_hand     INT_BREAKPOINT, BREAKPOINT, do_breakpoint diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index ec755d3f373..5b67efcecab 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S @@ -509,10 +509,10 @@ intvec_\vecname:  	.ifc \c_routine, do_trap  	mfspr   r2, GPV_REASON  	.else -	.ifc \c_routine, op_handle_perf_interrupt +	.ifc \c_routine, handle_perf_interrupt  	mfspr   r2, PERF_COUNT_STS  	.else -	.ifc \c_routine, op_handle_aux_perf_interrupt +	.ifc \c_routine, handle_perf_interrupt  	mfspr   r2, AUX_PERF_COUNT_STS  	.endif  	.endif @@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return)  	}  	beqzt   r28, 1f  	bnez    r29, 1f +	/* Disable interrupts explicitly for preemption. */ +	IRQ_DISABLE(r20,r21) +	TRACE_IRQS_OFF  	jal     preempt_schedule_irq  	FEEDBACK_REENTER(interrupt_return)  1: @@ -968,6 +971,15 @@ STD_ENTRY(interrupt_return)  	beqzt   r30, .Lrestore_regs  3: +#if INT_PERF_COUNT + 1 != INT_AUX_PERF_COUNT +# error Bad interrupt assumption +#endif +	{ +	 movei  r0, 3   /* two adjacent bits for the PERF_COUNT mask */ +	 beqz   r31, .Lrestore_regs +	} +	shli    r0, r0, INT_PERF_COUNT +	mtspr   SPR_INTERRUPT_MASK_RESET_K, r0  	/*  	 * We now commit to returning from this interrupt, since we will be @@ -1184,7 +1196,7 @@ handle_nmi:  	FEEDBACK_REENTER(handle_nmi)  	{  	 movei  r30, 1 -	 move   r31, r0 +	 cmpeq  r31, r0, zero  	}  	j       interrupt_return  	STD_ENDPROC(handle_nmi) @@ -1488,8 +1500,9 @@ STD_ENTRY(fill_ra_stack)  	.global intrpt_start  intrpt_start: -#define op_handle_perf_interrupt bad_intr -#define op_handle_aux_perf_interrupt bad_intr +#ifndef CONFIG_USE_PMC +#define handle_perf_interrupt bad_intr +#endif  #ifndef CONFIG_HARDWALL  #define do_hardwall_trap bad_intr @@ -1537,9 +1550,9 @@ intrpt_start:  #endif  	int_hand     INT_IPI_0, IPI_0, bad_intr  	int_hand     INT_PERF_COUNT, PERF_COUNT, \ -		     op_handle_perf_interrupt, handle_nmi +		     handle_perf_interrupt, handle_nmi  	int_hand     INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ -		     op_handle_perf_interrupt, handle_nmi +		     handle_perf_interrupt, handle_nmi  	int_hand     INT_INTCTRL_3, INTCTRL_3, bad_intr  #if CONFIG_KERNEL_PL == 2  	dc_dispatch  INT_INTCTRL_2, INTCTRL_2 diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 0586fdb9352..637f2ffaa5f 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c @@ -21,6 +21,7 @@  #include <hv/drv_pcie_rc_intf.h>  #include <arch/spr_def.h>  #include <asm/traps.h> +#include <linux/perf_event.h>  /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */  #define IS_HW_CLEARED 1 @@ -53,13 +54,6 @@ static DEFINE_PER_CPU(unsigned long, irq_disable_mask)   */  static DEFINE_PER_CPU(int, irq_depth); -/* State for allocating IRQs on Gx. */ -#if CHIP_HAS_IPI() -static unsigned long available_irqs = ((1UL << NR_IRQS) - 1) & -				      (~(1UL << IRQ_RESCHEDULE)); -static DEFINE_SPINLOCK(available_irqs_lock); -#endif -  #if CHIP_HAS_IPI()  /* Use SPRs to manipulate device interrupts. */  #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask) @@ -261,37 +255,27 @@ void ack_bad_irq(unsigned int irq)  }  /* - * Generic, controller-independent functions: + * /proc/interrupts printing:   */ - -#if CHIP_HAS_IPI() -int create_irq(void) +int arch_show_interrupts(struct seq_file *p, int prec)  { -	unsigned long flags; -	int result; - -	spin_lock_irqsave(&available_irqs_lock, flags); -	if (available_irqs == 0) -		result = -ENOMEM; -	else { -		result = __ffs(available_irqs); -		available_irqs &= ~(1UL << result); -		dynamic_irq_init(result); -	} -	spin_unlock_irqrestore(&available_irqs_lock, flags); +#ifdef CONFIG_PERF_EVENTS +	int i; -	return result; +	seq_printf(p, "%*s: ", prec, "PMI"); + +	for_each_online_cpu(i) +		seq_printf(p, "%10llu ", per_cpu(perf_irqs, i)); +	seq_puts(p, "  perf_events\n"); +#endif +	return 0;  } -EXPORT_SYMBOL(create_irq); -void destroy_irq(unsigned int irq) +#if CHIP_HAS_IPI() +int arch_setup_hwirq(unsigned int irq, int node)  { -	unsigned long flags; - -	spin_lock_irqsave(&available_irqs_lock, flags); -	available_irqs |= (1UL << irq); -	dynamic_irq_cleanup(irq); -	spin_unlock_irqrestore(&available_irqs_lock, flags); +	return irq >= NR_IRQS ? -EINVAL : 0;  } -EXPORT_SYMBOL(destroy_irq); + +void arch_teardown_hwirq(unsigned int irq) { }  #endif diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c index 00331af9525..7867266f971 100644 --- a/arch/tile/kernel/messaging.c +++ b/arch/tile/kernel/messaging.c @@ -68,8 +68,8 @@ void hv_message_intr(struct pt_regs *regs, int intnum)  #endif  	while (1) { -		rmi = hv_receive_message(__get_cpu_var(msg_state), -					 (HV_VirtAddr) message, +		HV_MsgState *state = this_cpu_ptr(&msg_state); +		rmi = hv_receive_message(*state, (HV_VirtAddr) message,  					 sizeof(message));  		if (rmi.msglen == 0)  			break; diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index b7180e6e900..1f80a88c75a 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c @@ -250,16 +250,11 @@ static void fixup_read_and_payload_sizes(void)  	/* Scan for the smallest maximum payload size. */  	for_each_pci_dev(dev) { -		u32 devcap; -		int max_payload; -  		if (!pci_is_pcie(dev))  			continue; -		pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &devcap); -		max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD; -		if (max_payload < smallest_max_payload) -			smallest_max_payload = max_payload; +		if (dev->pcie_mpss < smallest_max_payload) +			smallest_max_payload = dev->pcie_mpss;  	}  	/* Now, set the max_payload_size for all devices to that value. */ diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index a97a6452b81..e39f9c54280 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -350,10 +350,9 @@ static int tile_init_irqs(struct pci_controller *controller)  		int cpu;  		/* Ask the kernel to allocate an IRQ. */ -		irq = create_irq(); -		if (irq < 0) { +		irq = irq_alloc_hwirq(-1); +		if (!irq) {  			pr_err("PCI: no free irq vectors, failed for %d\n", i); -  			goto free_irqs;  		}  		controller->irq_intx_table[i] = irq; @@ -382,7 +381,7 @@ static int tile_init_irqs(struct pci_controller *controller)  free_irqs:  	for (j = 0; j < i; j++) -		destroy_irq(controller->irq_intx_table[j]); +		irq_free_hwirq(controller->irq_intx_table[j]);  	return -1;  } @@ -1065,18 +1064,6 @@ char *__init pcibios_setup(char *str)  }  /* - * Enable memory address decoding, as appropriate, for the - * device described by the 'dev' struct. - * - * This is called from the generic PCI layer, and can be called - * for bridges or endpoints. - */ -int pcibios_enable_device(struct pci_dev *dev, int mask) -{ -	return pci_enable_resources(dev, mask); -} - -/*   * Called for each device after PCI setup is done.   * We initialize the PCI device capabilities conservatively, assuming that   * all devices can only address the 32-bit DMA space. The exception here is @@ -1512,9 +1499,9 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)  	int irq;  	int ret; -	irq = create_irq(); -	if (irq < 0) -		return irq; +	irq = irq_alloc_hwirq(-1); +	if (!irq) +		return -ENOSPC;  	/*  	 * Since we use a 64-bit Mem-Map to accept the MSI write, we fail @@ -1613,11 +1600,11 @@ hv_msi_config_failure:  	/* Free mem-map */  msi_mem_map_alloc_failure:  is_64_failure: -	destroy_irq(irq); +	irq_free_hwirq(irq);  	return ret;  }  void arch_teardown_msi_irq(unsigned int irq)  { -	destroy_irq(irq); +	irq_free_hwirq(irq);  } diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c new file mode 100644 index 00000000000..2bf6c9c135c --- /dev/null +++ b/arch/tile/kernel/perf_event.c @@ -0,0 +1,1005 @@ +/* + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * + *   This program is free software; you can redistribute it and/or + *   modify it under the terms of the GNU General Public License + *   as published by the Free Software Foundation, version 2. + * + *   This program is distributed in the hope that it will be useful, but + *   WITHOUT ANY WARRANTY; without even the implied warranty of + *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + *   NON INFRINGEMENT.  See the GNU General Public License for + *   more details. + * + * + * Perf_events support for Tile processor. + * + * This code is based upon the x86 perf event + * code, which is: + * + *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> + *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + *  Copyright (C) 2009 Jaswinder Singh Rajput + *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter + *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> + *  Copyright (C) 2009 Google, Inc., Stephane Eranian + */ + +#include <linux/kprobes.h> +#include <linux/kernel.h> +#include <linux/kdebug.h> +#include <linux/mutex.h> +#include <linux/bitmap.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/perf_event.h> +#include <linux/atomic.h> +#include <asm/traps.h> +#include <asm/stack.h> +#include <asm/pmc.h> +#include <hv/hypervisor.h> + +#define TILE_MAX_COUNTERS	4 + +#define PERF_COUNT_0_IDX	0 +#define PERF_COUNT_1_IDX	1 +#define AUX_PERF_COUNT_0_IDX	2 +#define AUX_PERF_COUNT_1_IDX	3 + +struct cpu_hw_events { +	int			n_events; +	struct perf_event	*events[TILE_MAX_COUNTERS]; /* counter order */ +	struct perf_event	*event_list[TILE_MAX_COUNTERS]; /* enabled +								order */ +	int			assign[TILE_MAX_COUNTERS]; +	unsigned long		active_mask[BITS_TO_LONGS(TILE_MAX_COUNTERS)]; +	unsigned long		used_mask; +}; + +/* TILE arch specific performance monitor unit */ +struct tile_pmu { +	const char	*name; +	int		version; +	const int	*hw_events;	/* generic hw events table */ +	/* generic hw cache events table */ +	const int	(*cache_events)[PERF_COUNT_HW_CACHE_MAX] +				       [PERF_COUNT_HW_CACHE_OP_MAX] +				       [PERF_COUNT_HW_CACHE_RESULT_MAX]; +	int		(*map_hw_event)(u64);	 /*method used to map +						  hw events */ +	int		(*map_cache_event)(u64); /*method used to map +						  cache events */ + +	u64		max_period;		/* max sampling period */ +	u64		cntval_mask;		/* counter width mask */ +	int		cntval_bits;		/* counter width */ +	int		max_events;		/* max generic hw events +						in map */ +	int		num_counters;		/* number base + aux counters */ +	int		num_base_counters;	/* number base counters */ +}; + +DEFINE_PER_CPU(u64, perf_irqs); +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +#define TILE_OP_UNSUPP		(-1) + +#ifndef __tilegx__ +/* TILEPro hardware events map */ +static const int tile_hw_event_map[] = { +	[PERF_COUNT_HW_CPU_CYCLES]		= 0x01, /* ONE */ +	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x06, /* MP_BUNDLE_RETIRED */ +	[PERF_COUNT_HW_CACHE_REFERENCES]	= TILE_OP_UNSUPP, +	[PERF_COUNT_HW_CACHE_MISSES]		= TILE_OP_UNSUPP, +	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x16, /* +					  MP_CONDITIONAL_BRANCH_ISSUED */ +	[PERF_COUNT_HW_BRANCH_MISSES]		= 0x14, /* +					  MP_CONDITIONAL_BRANCH_MISSPREDICT */ +	[PERF_COUNT_HW_BUS_CYCLES]		= TILE_OP_UNSUPP, +}; +#else +/* TILEGx hardware events map */ +static const int tile_hw_event_map[] = { +	[PERF_COUNT_HW_CPU_CYCLES]		= 0x181, /* ONE */ +	[PERF_COUNT_HW_INSTRUCTIONS]		= 0xdb, /* INSTRUCTION_BUNDLE */ +	[PERF_COUNT_HW_CACHE_REFERENCES]	= TILE_OP_UNSUPP, +	[PERF_COUNT_HW_CACHE_MISSES]		= TILE_OP_UNSUPP, +	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0xd9, /* +						COND_BRANCH_PRED_CORRECT */ +	[PERF_COUNT_HW_BRANCH_MISSES]		= 0xda, /* +						COND_BRANCH_PRED_INCORRECT */ +	[PERF_COUNT_HW_BUS_CYCLES]		= TILE_OP_UNSUPP, +}; +#endif + +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Generalized hw caching related hw_event table, filled + * in on a per model basis. A value of -1 means + * 'not supported', any other value means the + * raw hw_event ID. + */ +#ifndef __tilegx__ +/* TILEPro hardware cache event map */ +static const int tile_cache_event_map[PERF_COUNT_HW_CACHE_MAX] +				     [PERF_COUNT_HW_CACHE_OP_MAX] +				     [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = 0x21, /* RD_MISS */ +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = 0x22, /* WR_MISS */ +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(L1I)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = 0x12, /* MP_ICACHE_HIT_ISSUED */ +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(LL)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(DTLB)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = 0x1d, /* TLB_CNT */ +		[C(RESULT_MISS)] = 0x20, /* TLB_EXCEPTION */ +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(ITLB)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = 0x13, /* MP_ITLB_HIT_ISSUED */ +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(BPU)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +}; +#else +/* TILEGx hardware events map */ +static const int tile_cache_event_map[PERF_COUNT_HW_CACHE_MAX] +				     [PERF_COUNT_HW_CACHE_OP_MAX] +				     [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { +	/* +	 * Like some other architectures (e.g. ARM), the performance +	 * counters don't differentiate between read and write +	 * accesses/misses, so this isn't strictly correct, but it's the +	 * best we can do. Writes and reads get combined. +	 */ +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = 0x44, /* RD_MISS */ +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = 0x45, /* WR_MISS */ +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(L1I)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(LL)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(DTLB)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = 0x40, /* TLB_CNT */ +		[C(RESULT_MISS)] = 0x43, /* TLB_EXCEPTION */ +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = 0x40, /* TLB_CNT */ +		[C(RESULT_MISS)] = 0x43, /* TLB_EXCEPTION */ +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(ITLB)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = 0xd4, /* ITLB_MISS_INT */ +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = 0xd4, /* ITLB_MISS_INT */ +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(BPU)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +}; +#endif + +static atomic_t tile_active_events; +static DEFINE_MUTEX(perf_intr_reserve_mutex); + +static int tile_map_hw_event(u64 config); +static int tile_map_cache_event(u64 config); + +static int tile_pmu_handle_irq(struct pt_regs *regs, int fault); + +/* + * To avoid new_raw_count getting larger then pre_raw_count + * in tile_perf_event_update(), we limit the value of max_period to 2^31 - 1. + */ +static const struct tile_pmu tilepmu = { +#ifndef __tilegx__ +	.name = "tilepro", +#else +	.name = "tilegx", +#endif +	.max_events = ARRAY_SIZE(tile_hw_event_map), +	.map_hw_event = tile_map_hw_event, +	.hw_events = tile_hw_event_map, +	.map_cache_event = tile_map_cache_event, +	.cache_events = &tile_cache_event_map, +	.cntval_bits = 32, +	.cntval_mask = (1ULL << 32) - 1, +	.max_period = (1ULL << 31) - 1, +	.num_counters = TILE_MAX_COUNTERS, +	.num_base_counters = TILE_BASE_COUNTERS, +}; + +static const struct tile_pmu *tile_pmu __read_mostly; + +/* + * Check whether perf event is enabled. + */ +int tile_perf_enabled(void) +{ +	return atomic_read(&tile_active_events) != 0; +} + +/* + * Read Performance Counters. + */ +static inline u64 read_counter(int idx) +{ +	u64 val = 0; + +	/* __insn_mfspr() only takes an immediate argument */ +	switch (idx) { +	case PERF_COUNT_0_IDX: +		val = __insn_mfspr(SPR_PERF_COUNT_0); +		break; +	case PERF_COUNT_1_IDX: +		val = __insn_mfspr(SPR_PERF_COUNT_1); +		break; +	case AUX_PERF_COUNT_0_IDX: +		val = __insn_mfspr(SPR_AUX_PERF_COUNT_0); +		break; +	case AUX_PERF_COUNT_1_IDX: +		val = __insn_mfspr(SPR_AUX_PERF_COUNT_1); +		break; +	default: +		WARN_ON_ONCE(idx > AUX_PERF_COUNT_1_IDX || +				idx < PERF_COUNT_0_IDX); +	} + +	return val; +} + +/* + * Write Performance Counters. + */ +static inline void write_counter(int idx, u64 value) +{ +	/* __insn_mtspr() only takes an immediate argument */ +	switch (idx) { +	case PERF_COUNT_0_IDX: +		__insn_mtspr(SPR_PERF_COUNT_0, value); +		break; +	case PERF_COUNT_1_IDX: +		__insn_mtspr(SPR_PERF_COUNT_1, value); +		break; +	case AUX_PERF_COUNT_0_IDX: +		__insn_mtspr(SPR_AUX_PERF_COUNT_0, value); +		break; +	case AUX_PERF_COUNT_1_IDX: +		__insn_mtspr(SPR_AUX_PERF_COUNT_1, value); +		break; +	default: +		WARN_ON_ONCE(idx > AUX_PERF_COUNT_1_IDX || +				idx < PERF_COUNT_0_IDX); +	} +} + +/* + * Enable performance event by setting + * Performance Counter Control registers. + */ +static inline void tile_pmu_enable_event(struct perf_event *event) +{ +	struct hw_perf_event *hwc = &event->hw; +	unsigned long cfg, mask; +	int shift, idx = hwc->idx; + +	/* +	 * prevent early activation from tile_pmu_start() in hw_perf_enable +	 */ + +	if (WARN_ON_ONCE(idx == -1)) +		return; + +	if (idx < tile_pmu->num_base_counters) +		cfg = __insn_mfspr(SPR_PERF_COUNT_CTL); +	else +		cfg = __insn_mfspr(SPR_AUX_PERF_COUNT_CTL); + +	switch (idx) { +	case PERF_COUNT_0_IDX: +	case AUX_PERF_COUNT_0_IDX: +		mask = TILE_EVENT_MASK; +		shift = 0; +		break; +	case PERF_COUNT_1_IDX: +	case AUX_PERF_COUNT_1_IDX: +		mask = TILE_EVENT_MASK << 16; +		shift = 16; +		break; +	default: +		WARN_ON_ONCE(idx < PERF_COUNT_0_IDX || +			idx > AUX_PERF_COUNT_1_IDX); +		return; +	} + +	/* Clear mask bits to enable the event. */ +	cfg &= ~mask; +	cfg |= hwc->config << shift; + +	if (idx < tile_pmu->num_base_counters) +		__insn_mtspr(SPR_PERF_COUNT_CTL, cfg); +	else +		__insn_mtspr(SPR_AUX_PERF_COUNT_CTL, cfg); +} + +/* + * Disable performance event by clearing + * Performance Counter Control registers. + */ +static inline void tile_pmu_disable_event(struct perf_event *event) +{ +	struct hw_perf_event *hwc = &event->hw; +	unsigned long cfg, mask; +	int idx = hwc->idx; + +	if (idx == -1) +		return; + +	if (idx < tile_pmu->num_base_counters) +		cfg = __insn_mfspr(SPR_PERF_COUNT_CTL); +	else +		cfg = __insn_mfspr(SPR_AUX_PERF_COUNT_CTL); + +	switch (idx) { +	case PERF_COUNT_0_IDX: +	case AUX_PERF_COUNT_0_IDX: +		mask = TILE_PLM_MASK; +		break; +	case PERF_COUNT_1_IDX: +	case AUX_PERF_COUNT_1_IDX: +		mask = TILE_PLM_MASK << 16; +		break; +	default: +		WARN_ON_ONCE(idx < PERF_COUNT_0_IDX || +			idx > AUX_PERF_COUNT_1_IDX); +		return; +	} + +	/* Set mask bits to disable the event. */ +	cfg |= mask; + +	if (idx < tile_pmu->num_base_counters) +		__insn_mtspr(SPR_PERF_COUNT_CTL, cfg); +	else +		__insn_mtspr(SPR_AUX_PERF_COUNT_CTL, cfg); +} + +/* + * Propagate event elapsed time into the generic event. + * Can only be executed on the CPU where the event is active. + * Returns the delta events processed. + */ +static u64 tile_perf_event_update(struct perf_event *event) +{ +	struct hw_perf_event *hwc = &event->hw; +	int shift = 64 - tile_pmu->cntval_bits; +	u64 prev_raw_count, new_raw_count; +	u64 oldval; +	int idx = hwc->idx; +	u64 delta; + +	/* +	 * Careful: an NMI might modify the previous event value. +	 * +	 * Our tactic to handle this is to first atomically read and +	 * exchange a new raw count - then add that new-prev delta +	 * count to the generic event atomically: +	 */ +again: +	prev_raw_count = local64_read(&hwc->prev_count); +	new_raw_count = read_counter(idx); + +	oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count, +				 new_raw_count); +	if (oldval != prev_raw_count) +		goto again; + +	/* +	 * Now we have the new raw value and have updated the prev +	 * timestamp already. We can now calculate the elapsed delta +	 * (event-)time and add that to the generic event. +	 * +	 * Careful, not all hw sign-extends above the physical width +	 * of the count. +	 */ +	delta = (new_raw_count << shift) - (prev_raw_count << shift); +	delta >>= shift; + +	local64_add(delta, &event->count); +	local64_sub(delta, &hwc->period_left); + +	return new_raw_count; +} + +/* + * Set the next IRQ period, based on the hwc->period_left value. + * To be called with the event disabled in hw: + */ +static int tile_event_set_period(struct perf_event *event) +{ +	struct hw_perf_event *hwc = &event->hw; +	int idx = hwc->idx; +	s64 left = local64_read(&hwc->period_left); +	s64 period = hwc->sample_period; +	int ret = 0; + +	/* +	 * If we are way outside a reasonable range then just skip forward: +	 */ +	if (unlikely(left <= -period)) { +		left = period; +		local64_set(&hwc->period_left, left); +		hwc->last_period = period; +		ret = 1; +	} + +	if (unlikely(left <= 0)) { +		left += period; +		local64_set(&hwc->period_left, left); +		hwc->last_period = period; +		ret = 1; +	} +	if (left > tile_pmu->max_period) +		left = tile_pmu->max_period; + +	/* +	 * The hw event starts counting from this event offset, +	 * mark it to be able to extra future deltas: +	 */ +	local64_set(&hwc->prev_count, (u64)-left); + +	write_counter(idx, (u64)(-left) & tile_pmu->cntval_mask); + +	perf_event_update_userpage(event); + +	return ret; +} + +/* + * Stop the event but do not release the PMU counter + */ +static void tile_pmu_stop(struct perf_event *event, int flags) +{ +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	struct hw_perf_event *hwc = &event->hw; +	int idx = hwc->idx; + +	if (__test_and_clear_bit(idx, cpuc->active_mask)) { +		tile_pmu_disable_event(event); +		cpuc->events[hwc->idx] = NULL; +		WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); +		hwc->state |= PERF_HES_STOPPED; +	} + +	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { +		/* +		 * Drain the remaining delta count out of a event +		 * that we are disabling: +		 */ +		tile_perf_event_update(event); +		hwc->state |= PERF_HES_UPTODATE; +	} +} + +/* + * Start an event (without re-assigning counter) + */ +static void tile_pmu_start(struct perf_event *event, int flags) +{ +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	int idx = event->hw.idx; + +	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) +		return; + +	if (WARN_ON_ONCE(idx == -1)) +		return; + +	if (flags & PERF_EF_RELOAD) { +		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); +		tile_event_set_period(event); +	} + +	event->hw.state = 0; + +	cpuc->events[idx] = event; +	__set_bit(idx, cpuc->active_mask); + +	unmask_pmc_interrupts(); + +	tile_pmu_enable_event(event); + +	perf_event_update_userpage(event); +} + +/* + * Add a single event to the PMU. + * + * The event is added to the group of enabled events + * but only if it can be scehduled with existing events. + */ +static int tile_pmu_add(struct perf_event *event, int flags) +{ +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	struct hw_perf_event *hwc; +	unsigned long mask; +	int b, max_cnt; + +	hwc = &event->hw; + +	/* +	 * We are full. +	 */ +	if (cpuc->n_events == tile_pmu->num_counters) +		return -ENOSPC; + +	cpuc->event_list[cpuc->n_events] = event; +	cpuc->n_events++; + +	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; +	if (!(flags & PERF_EF_START)) +		hwc->state |= PERF_HES_ARCH; + +	/* +	 * Find first empty counter. +	 */ +	max_cnt = tile_pmu->num_counters; +	mask = ~cpuc->used_mask; + +	/* Find next free counter. */ +	b = find_next_bit(&mask, max_cnt, 0); + +	/* Should not happen. */ +	if (WARN_ON_ONCE(b == max_cnt)) +		return -ENOSPC; + +	/* +	 * Assign counter to event. +	 */ +	event->hw.idx = b; +	__set_bit(b, &cpuc->used_mask); + +	/* +	 * Start if requested. +	 */ +	if (flags & PERF_EF_START) +		tile_pmu_start(event, PERF_EF_RELOAD); + +	return 0; +} + +/* + * Delete a single event from the PMU. + * + * The event is deleted from the group of enabled events. + * If it is the last event, disable PMU interrupt. + */ +static void tile_pmu_del(struct perf_event *event, int flags) +{ +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	int i; + +	/* +	 * Remove event from list, compact list if necessary. +	 */ +	for (i = 0; i < cpuc->n_events; i++) { +		if (cpuc->event_list[i] == event) { +			while (++i < cpuc->n_events) +				cpuc->event_list[i-1] = cpuc->event_list[i]; +			--cpuc->n_events; +			cpuc->events[event->hw.idx] = NULL; +			__clear_bit(event->hw.idx, &cpuc->used_mask); +			tile_pmu_stop(event, PERF_EF_UPDATE); +			break; +		} +	} +	/* +	 * If there are no events left, then mask PMU interrupt. +	 */ +	if (cpuc->n_events == 0) +		mask_pmc_interrupts(); +	perf_event_update_userpage(event); +} + +/* + * Propagate event elapsed time into the event. + */ +static inline void tile_pmu_read(struct perf_event *event) +{ +	tile_perf_event_update(event); +} + +/* + * Map generic events to Tile PMU. + */ +static int tile_map_hw_event(u64 config) +{ +	if (config >= tile_pmu->max_events) +		return -EINVAL; +	return tile_pmu->hw_events[config]; +} + +/* + * Map generic hardware cache events to Tile PMU. + */ +static int tile_map_cache_event(u64 config) +{ +	unsigned int cache_type, cache_op, cache_result; +	int code; + +	if (!tile_pmu->cache_events) +		return -ENOENT; + +	cache_type = (config >>  0) & 0xff; +	if (cache_type >= PERF_COUNT_HW_CACHE_MAX) +		return -EINVAL; + +	cache_op = (config >>  8) & 0xff; +	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) +		return -EINVAL; + +	cache_result = (config >> 16) & 0xff; +	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) +		return -EINVAL; + +	code = (*tile_pmu->cache_events)[cache_type][cache_op][cache_result]; +	if (code == TILE_OP_UNSUPP) +		return -EINVAL; + +	return code; +} + +static void tile_event_destroy(struct perf_event *event) +{ +	if (atomic_dec_return(&tile_active_events) == 0) +		release_pmc_hardware(); +} + +static int __tile_event_init(struct perf_event *event) +{ +	struct perf_event_attr *attr = &event->attr; +	struct hw_perf_event *hwc = &event->hw; +	int code; + +	switch (attr->type) { +	case PERF_TYPE_HARDWARE: +		code = tile_pmu->map_hw_event(attr->config); +		break; +	case PERF_TYPE_HW_CACHE: +		code = tile_pmu->map_cache_event(attr->config); +		break; +	case PERF_TYPE_RAW: +		code = attr->config & TILE_EVENT_MASK; +		break; +	default: +		/* Should not happen. */ +		return -EOPNOTSUPP; +	} + +	if (code < 0) +		return code; + +	hwc->config = code; +	hwc->idx = -1; + +	if (attr->exclude_user) +		hwc->config |= TILE_CTL_EXCL_USER; + +	if (attr->exclude_kernel) +		hwc->config |= TILE_CTL_EXCL_KERNEL; + +	if (attr->exclude_hv) +		hwc->config |= TILE_CTL_EXCL_HV; + +	if (!hwc->sample_period) { +		hwc->sample_period = tile_pmu->max_period; +		hwc->last_period = hwc->sample_period; +		local64_set(&hwc->period_left, hwc->sample_period); +	} +	event->destroy = tile_event_destroy; +	return 0; +} + +static int tile_event_init(struct perf_event *event) +{ +	int err = 0; +	perf_irq_t old_irq_handler = NULL; + +	if (atomic_inc_return(&tile_active_events) == 1) +		old_irq_handler = reserve_pmc_hardware(tile_pmu_handle_irq); + +	if (old_irq_handler) { +		pr_warn("PMC hardware busy (reserved by oprofile)\n"); + +		atomic_dec(&tile_active_events); +		return -EBUSY; +	} + +	switch (event->attr.type) { +	case PERF_TYPE_RAW: +	case PERF_TYPE_HARDWARE: +	case PERF_TYPE_HW_CACHE: +		break; + +	default: +		return -ENOENT; +	} + +	err = __tile_event_init(event); +	if (err) { +		if (event->destroy) +			event->destroy(event); +	} +	return err; +} + +static struct pmu tilera_pmu = { +	.event_init	= tile_event_init, +	.add		= tile_pmu_add, +	.del		= tile_pmu_del, + +	.start		= tile_pmu_start, +	.stop		= tile_pmu_stop, + +	.read		= tile_pmu_read, +}; + +/* + * PMU's IRQ handler, PMU has 2 interrupts, they share the same handler. + */ +int tile_pmu_handle_irq(struct pt_regs *regs, int fault) +{ +	struct perf_sample_data data; +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	struct perf_event *event; +	struct hw_perf_event *hwc; +	u64 val; +	unsigned long status; +	int bit; + +	__get_cpu_var(perf_irqs)++; + +	if (!atomic_read(&tile_active_events)) +		return 0; + +	status = pmc_get_overflow(); +	pmc_ack_overflow(status); + +	for_each_set_bit(bit, &status, tile_pmu->num_counters) { + +		event = cpuc->events[bit]; + +		if (!event) +			continue; + +		if (!test_bit(bit, cpuc->active_mask)) +			continue; + +		hwc = &event->hw; + +		val = tile_perf_event_update(event); +		if (val & (1ULL << (tile_pmu->cntval_bits - 1))) +			continue; + +		perf_sample_data_init(&data, 0, event->hw.last_period); +		if (!tile_event_set_period(event)) +			continue; + +		if (perf_event_overflow(event, &data, regs)) +			tile_pmu_stop(event, 0); +	} + +	return 0; +} + +static bool __init supported_pmu(void) +{ +	tile_pmu = &tilepmu; +	return true; +} + +int __init init_hw_perf_events(void) +{ +	supported_pmu(); +	perf_pmu_register(&tilera_pmu, "cpu", PERF_TYPE_RAW); +	return 0; +} +arch_initcall(init_hw_perf_events); + +/* Callchain handling code. */ + +/* + * Tile specific backtracing code for perf_events. + */ +static inline void perf_callchain(struct perf_callchain_entry *entry, +		    struct pt_regs *regs) +{ +	struct KBacktraceIterator kbt; +	unsigned int i; + +	/* +	 * Get the address just after the "jalr" instruction that +	 * jumps to the handler for a syscall.  When we find this +	 * address in a backtrace, we silently ignore it, which gives +	 * us a one-step backtrace connection from the sys_xxx() +	 * function in the kernel to the xxx() function in libc. +	 * Otherwise, we lose the ability to properly attribute time +	 * from the libc calls to the kernel implementations, since +	 * oprofile only considers PCs from backtraces a pair at a time. +	 */ +	unsigned long handle_syscall_pc = handle_syscall_link_address(); + +	KBacktraceIterator_init(&kbt, NULL, regs); +	kbt.profile = 1; + +	/* +	 * The sample for the pc is already recorded.  Now we are adding the +	 * address of the callsites on the stack.  Our iterator starts +	 * with the frame of the (already sampled) call site.  If our +	 * iterator contained a "return address" field, we could have just +	 * used it and wouldn't have needed to skip the first +	 * frame.  That's in effect what the arm and x86 versions do. +	 * Instead we peel off the first iteration to get the equivalent +	 * behavior. +	 */ + +	if (KBacktraceIterator_end(&kbt)) +		return; +	KBacktraceIterator_next(&kbt); + +	/* +	 * Set stack depth to 16 for user and kernel space respectively, that +	 * is, total 32 stack frames. +	 */ +	for (i = 0; i < 16; ++i) { +		unsigned long pc; +		if (KBacktraceIterator_end(&kbt)) +			break; +		pc = kbt.it.pc; +		if (pc != handle_syscall_pc) +			perf_callchain_store(entry, pc); +		KBacktraceIterator_next(&kbt); +	} +} + +void perf_callchain_user(struct perf_callchain_entry *entry, +		    struct pt_regs *regs) +{ +	perf_callchain(entry, regs); +} + +void perf_callchain_kernel(struct perf_callchain_entry *entry, +		      struct pt_regs *regs) +{ +	perf_callchain(entry, regs); +} diff --git a/arch/tile/kernel/pmc.c b/arch/tile/kernel/pmc.c new file mode 100644 index 00000000000..db62cc34b95 --- /dev/null +++ b/arch/tile/kernel/pmc.c @@ -0,0 +1,121 @@ +/* + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * + *   This program is free software; you can redistribute it and/or + *   modify it under the terms of the GNU General Public License + *   as published by the Free Software Foundation, version 2. + * + *   This program is distributed in the hope that it will be useful, but + *   WITHOUT ANY WARRANTY; without even the implied warranty of + *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + *   NON INFRINGEMENT.  See the GNU General Public License for + *   more details. + */ + +#include <linux/errno.h> +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/atomic.h> +#include <linux/interrupt.h> + +#include <asm/processor.h> +#include <asm/pmc.h> + +perf_irq_t perf_irq = NULL; +int handle_perf_interrupt(struct pt_regs *regs, int fault) +{ +	int retval; + +	if (!perf_irq) +		panic("Unexpected PERF_COUNT interrupt %d\n", fault); + +	nmi_enter(); +	retval = perf_irq(regs, fault); +	nmi_exit(); +	return retval; +} + +/* Reserve PMC hardware if it is available. */ +perf_irq_t reserve_pmc_hardware(perf_irq_t new_perf_irq) +{ +	return cmpxchg(&perf_irq, NULL, new_perf_irq); +} +EXPORT_SYMBOL(reserve_pmc_hardware); + +/* Release PMC hardware. */ +void release_pmc_hardware(void) +{ +	perf_irq = NULL; +} +EXPORT_SYMBOL(release_pmc_hardware); + + +/* + * Get current overflow status of each performance counter, + * and auxiliary performance counter. + */ +unsigned long +pmc_get_overflow(void) +{ +	unsigned long status; + +	/* +	 * merge base+aux into a single vector +	 */ +	status = __insn_mfspr(SPR_PERF_COUNT_STS); +	status |= __insn_mfspr(SPR_AUX_PERF_COUNT_STS) << TILE_BASE_COUNTERS; +	return status; +} + +/* + * Clear the status bit for the corresponding counter, if written + * with a one. + */ +void +pmc_ack_overflow(unsigned long status) +{ +	/* +	 * clear overflow status by writing ones +	 */ +	__insn_mtspr(SPR_PERF_COUNT_STS, status); +	__insn_mtspr(SPR_AUX_PERF_COUNT_STS, status >> TILE_BASE_COUNTERS); +} + +/* + * The perf count interrupts are masked and unmasked explicitly, + * and only here.  The normal irq_enable() does not enable them, + * and irq_disable() does not disable them.  That lets these + * routines drive the perf count interrupts orthogonally. + * + * We also mask the perf count interrupts on entry to the perf count + * interrupt handler in assembly code, and by default unmask them + * again (with interrupt critical section protection) just before + * returning from the interrupt.  If the perf count handler returns + * a non-zero error code, then we don't re-enable them before returning. + * + * For Pro, we rely on both interrupts being in the same word to update + * them atomically so we never have one enabled and one disabled. + */ + +#if CHIP_HAS_SPLIT_INTR_MASK() +# if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 +#  error Fix assumptions about which word PERF_COUNT interrupts are in +# endif +#endif + +static inline unsigned long long pmc_mask(void) +{ +	unsigned long long mask = 1ULL << INT_PERF_COUNT; +	mask |= 1ULL << INT_AUX_PERF_COUNT; +	return mask; +} + +void unmask_pmc_interrupts(void) +{ +	interrupt_mask_reset_mask(pmc_mask()); +} + +void mask_pmc_interrupts(void) +{ +	interrupt_mask_set_mask(pmc_mask()); +} diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c index 681100c59fd..6829a950864 100644 --- a/arch/tile/kernel/proc.c +++ b/arch/tile/kernel/proc.c @@ -113,7 +113,7 @@ arch_initcall(proc_tile_init);   * Support /proc/sys/tile directory   */ -static ctl_table unaligned_subtable[] = { +static struct ctl_table unaligned_subtable[] = {  	{  		.procname	= "enabled",  		.data		= &unaligned_fixup, @@ -138,7 +138,7 @@ static ctl_table unaligned_subtable[] = {  	{}  }; -static ctl_table unaligned_table[] = { +static struct ctl_table unaligned_table[] = {  	{  		.procname	= "unaligned_fixup",  		.mode		= 0555, diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 4c34caea9dd..112ababa9e5 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -228,13 +228,10 @@ early_param("isolnodes", setup_isolnodes);  #if defined(CONFIG_PCI) && !defined(__tilegx__)  static int __init setup_pci_reserve(char* str)  { -	unsigned long mb; - -	if (str == NULL || strict_strtoul(str, 0, &mb) != 0 || -	    mb > 3 * 1024) +	if (str == NULL || kstrtouint(str, 0, &pci_reserve_mb) != 0 || +	    pci_reserve_mb > 3 * 1024)  		return -EINVAL; -	pci_reserve_mb = mb;  	pr_info("Reserving %dMB for PCIE root complex mappings\n",  		pci_reserve_mb);  	return 0; @@ -691,7 +688,7 @@ static void __init setup_bootmem_allocator(void)  	/* Reserve any memory excluded by "memmap" arguments. */  	for (i = 0; i < memmap_nr; ++i) {  		struct memmap_entry *m = &memmap_map[i]; -		reserve_bootmem(m->addr, m->size, 0); +		reserve_bootmem(m->addr, m->size, BOOTMEM_DEFAULT);  	}  #ifdef CONFIG_BLK_DEV_INITRD @@ -715,7 +712,8 @@ static void __init setup_bootmem_allocator(void)  #ifdef CONFIG_KEXEC  	if (crashk_res.start != crashk_res.end) -		reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0); +		reserve_bootmem(crashk_res.start, resource_size(&crashk_res), +				BOOTMEM_DEFAULT);  #endif  } @@ -1268,8 +1266,7 @@ static void __init validate_va(void)  	if ((long)VMALLOC_START >= 0)  		early_panic(  			"Linux VMALLOC region below the 2GB line (%#lx)!\n" -			"Reconfigure the kernel with fewer NR_HUGE_VMAPS\n" -			"or smaller VMALLOC_RESERVE.\n", +			"Reconfigure the kernel with smaller VMALLOC_RESERVE.\n",  			VMALLOC_START);  #endif  } diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index 2d1dbf38a9a..d1d026f0126 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c @@ -321,14 +321,13 @@ int show_unhandled_signals = 1;  static int __init crashinfo(char *str)  { -	unsigned long val;  	const char *word;  	if (*str == '\0') -		val = 2; -	else if (*str != '=' || strict_strtoul(++str, 0, &val) != 0) +		show_unhandled_signals = 2; +	else if (*str != '=' || kstrtoint(++str, 0, &show_unhandled_signals) != 0)  		return 0; -	show_unhandled_signals = val; +  	switch (show_unhandled_signals) {  	case 0:  		word = "No"; diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 362284af3af..c93977a6211 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -23,6 +23,7 @@  #include <linux/mmzone.h>  #include <linux/dcache.h>  #include <linux/fs.h> +#include <linux/string.h>  #include <asm/backtrace.h>  #include <asm/page.h>  #include <asm/ucontext.h> @@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt,  	}  	if (vma->vm_file) { -		char *s;  		p = d_path(&vma->vm_file->f_path, buf, bufsize);  		if (IS_ERR(p))  			p = "?"; -		s = strrchr(p, '/'); -		if (s) -			p = s+1; +		name = kbasename(p);  	} else { -		p = "anon"; +		name = "anon";  	}  	/* Generate a string description of the vma info. */ -	namelen = strlen(p); +	namelen = strlen(name);  	remaining = (bufsize - 1) - namelen; -	memmove(buf, p, namelen); +	memmove(buf, name, namelen);  	snprintf(buf + namelen, remaining, "[%lx+%lx] ",  		 vma->vm_start, vma->vm_end - vma->vm_start);  } diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index 5d10642db63..462dcd0c170 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -236,7 +236,15 @@ cycles_t ns2cycles(unsigned long nsecs)  	 * clock frequency.  	 */  	struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer); -	return ((u64)nsecs * dev->mult) >> dev->shift; + +	/* +	 * as in clocksource.h and x86's timer.h, we split the calculation +	 * into 2 parts to avoid unecessary overflow of the intermediate +	 * value. This will not lead to any loss of precision. +	 */ +	u64 quot = (u64)nsecs >> dev->shift; +	u64 rem  = (u64)nsecs & ((1ULL << dev->shift) - 1); +	return quot * dev->mult + ((rem * dev->mult) >> dev->shift);  }  void update_vsyscall_tz(void) diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 6b603d556ca..f3ceb6308e4 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c @@ -42,10 +42,9 @@ static int __init setup_unaligned_fixup(char *str)  	 * will still parse the instruction, then fire a SIGBUS with  	 * the correct address from inside the single_step code.  	 */ -	long val; -	if (strict_strtol(str, 0, &val) != 0) +	if (kstrtoint(str, 0, &unaligned_fixup) != 0)  		return 0; -	unaligned_fixup = val; +  	pr_info("Fixups for unaligned data accesses are %s\n",  	       unaligned_fixup >= 0 ?  	       (unaligned_fixup ? "enabled" : "disabled") : diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c index b425fb6a480..c02ea2a45f6 100644 --- a/arch/tile/kernel/unaligned.c +++ b/arch/tile/kernel/unaligned.c @@ -182,18 +182,7 @@ static void find_regs(tilegx_bundle_bits bundle, uint64_t *rd, uint64_t *ra,  	int i;  	uint64_t reg;  	uint64_t reg_map = 0, alias_reg_map = 0, map; -	bool alias; - -	*ra = -1; -	*rb = -1; - -	if (rd) -		*rd = -1; - -	*clob1 = -1; -	*clob2 = -1; -	*clob3 = -1; -	alias = false; +	bool alias = false;  	/*  	 * Parse fault bundle, find potential used registers and mark @@ -551,8 +540,8 @@ static tilegx_bundle_bits  jit_x1_bnezt(int ra, int broff)  /*   * This function generates unalign fixup JIT.   * - * We fist find unalign load/store instruction's destination, source - * reguisters: ra, rb and rd. and 3 scratch registers by calling + * We first find unalign load/store instruction's destination, source + * registers: ra, rb and rd. and 3 scratch registers by calling   * find_regs(...). 3 scratch clobbers should not alias with any register   * used in the fault bundle. Then analyze the fault bundle to determine   * if it's a load or store, operand width, branch or address increment etc. @@ -569,7 +558,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,  	tilegx_bundle_bits bundle_2 = 0;  	/* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */  	bool     bundle_2_enable = true; -	uint64_t ra, rb, rd = -1, clob1, clob2, clob3; +	uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1;  	/*  	 * Indicate if the unalign access  	 * instruction's registers hit with diff --git a/arch/tile/kernel/vdso/Makefile b/arch/tile/kernel/vdso/Makefile index e2b7a2f4ee4..a025f63d54c 100644 --- a/arch/tile/kernel/vdso/Makefile +++ b/arch/tile/kernel/vdso/Makefile @@ -104,7 +104,7 @@ $(obj-vdso32:%=%): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)  $(obj-vdso32:%=%): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)  $(obj)/vgettimeofday32.o: $(obj)/vgettimeofday.c -	$(call if_changed,cc_o_c) +	$(call if_changed_rule,cc_o_c)  $(obj)/vrt_sigreturn32.o: $(obj)/vrt_sigreturn.S  	$(call if_changed,as_o_S)  | 
