diff options
Diffstat (limited to 'arch/arm/kernel/arch_timer.c')
| -rw-r--r-- | arch/arm/kernel/arch_timer.c | 383 | 
1 files changed, 273 insertions, 110 deletions
| diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index cf258807160..c8ef20747ee 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -21,18 +21,28 @@  #include <linux/io.h>  #include <asm/cputype.h> +#include <asm/delay.h>  #include <asm/localtimer.h>  #include <asm/arch_timer.h>  #include <asm/system_info.h>  #include <asm/sched_clock.h>  static unsigned long arch_timer_rate; -static int arch_timer_ppi; -static int arch_timer_ppi2; + +enum ppi_nr { +	PHYS_SECURE_PPI, +	PHYS_NONSECURE_PPI, +	VIRT_PPI, +	HYP_PPI, +	MAX_TIMER_PPI +}; + +static int arch_timer_ppi[MAX_TIMER_PPI];  static struct clock_event_device __percpu **arch_timer_evt; +static struct delay_timer arch_delay_timer; -extern void init_current_timer_delay(unsigned long freq); +static bool arch_timer_use_virtual = true;  /*   * Architected system timer support. @@ -46,50 +56,104 @@ extern void init_current_timer_delay(unsigned long freq);  #define ARCH_TIMER_REG_FREQ		1  #define ARCH_TIMER_REG_TVAL		2 -static void arch_timer_reg_write(int reg, u32 val) +#define ARCH_TIMER_PHYS_ACCESS		0 +#define ARCH_TIMER_VIRT_ACCESS		1 + +/* + * These register accessors are marked inline so the compiler can + * nicely work out which register we want, and chuck away the rest of + * the code. At least it does so with a recent GCC (4.6.3). + */ +static inline void arch_timer_reg_write(const int access, const int reg, u32 val)  { -	switch (reg) { -	case ARCH_TIMER_REG_CTRL: -		asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); -		break; -	case ARCH_TIMER_REG_TVAL: -		asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); -		break; +	if (access == ARCH_TIMER_PHYS_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); +			break; +		} +	} + +	if (access == ARCH_TIMER_VIRT_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); +			break; +		}  	}  	isb();  } -static u32 arch_timer_reg_read(int reg) +static inline u32 arch_timer_reg_read(const int access, const int reg)  { -	u32 val; +	u32 val = 0; + +	if (access == ARCH_TIMER_PHYS_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); +			break; +		case ARCH_TIMER_REG_FREQ: +			asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); +			break; +		} +	} -	switch (reg) { -	case ARCH_TIMER_REG_CTRL: -		asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); -		break; -	case ARCH_TIMER_REG_FREQ: -		asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); -		break; -	case ARCH_TIMER_REG_TVAL: -		asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); -		break; -	default: -		BUG(); +	if (access == ARCH_TIMER_VIRT_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); +			break; +		}  	}  	return val;  } -static irqreturn_t arch_timer_handler(int irq, void *dev_id) +static inline cycle_t arch_timer_counter_read(const int access)  { -	struct clock_event_device *evt = *(struct clock_event_device **)dev_id; -	unsigned long ctrl; +	cycle_t cval = 0; + +	if (access == ARCH_TIMER_PHYS_ACCESS) +		asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); + +	if (access == ARCH_TIMER_VIRT_ACCESS) +		asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); + +	return cval; +} + +static inline cycle_t arch_counter_get_cntpct(void) +{ +	return arch_timer_counter_read(ARCH_TIMER_PHYS_ACCESS); +} -	ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); +static inline cycle_t arch_counter_get_cntvct(void) +{ +	return arch_timer_counter_read(ARCH_TIMER_VIRT_ACCESS); +} + +static irqreturn_t inline timer_handler(const int access, +					struct clock_event_device *evt) +{ +	unsigned long ctrl; +	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);  	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {  		ctrl |= ARCH_TIMER_CTRL_IT_MASK; -		arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); +		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);  		evt->event_handler(evt);  		return IRQ_HANDLED;  	} @@ -97,63 +161,100 @@ static irqreturn_t arch_timer_handler(int irq, void *dev_id)  	return IRQ_NONE;  } -static void arch_timer_disable(void) +static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)  { -	unsigned long ctrl; +	struct clock_event_device *evt = *(struct clock_event_device **)dev_id; -	ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); -	ctrl &= ~ARCH_TIMER_CTRL_ENABLE; -	arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); +	return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);  } -static void arch_timer_set_mode(enum clock_event_mode mode, -				struct clock_event_device *clk) +static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)  { +	struct clock_event_device *evt = *(struct clock_event_device **)dev_id; + +	return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); +} + +static inline void timer_set_mode(const int access, int mode) +{ +	unsigned long ctrl;  	switch (mode) {  	case CLOCK_EVT_MODE_UNUSED:  	case CLOCK_EVT_MODE_SHUTDOWN: -		arch_timer_disable(); +		ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); +		ctrl &= ~ARCH_TIMER_CTRL_ENABLE; +		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);  		break;  	default:  		break;  	}  } -static int arch_timer_set_next_event(unsigned long evt, -				     struct clock_event_device *unused) +static void arch_timer_set_mode_virt(enum clock_event_mode mode, +				     struct clock_event_device *clk)  { -	unsigned long ctrl; +	timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode); +} -	ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); +static void arch_timer_set_mode_phys(enum clock_event_mode mode, +				     struct clock_event_device *clk) +{ +	timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode); +} + +static inline void set_next_event(const int access, unsigned long evt) +{ +	unsigned long ctrl; +	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);  	ctrl |= ARCH_TIMER_CTRL_ENABLE;  	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; +	arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt); +	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); +} -	arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt); -	arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); +static int arch_timer_set_next_event_virt(unsigned long evt, +					  struct clock_event_device *unused) +{ +	set_next_event(ARCH_TIMER_VIRT_ACCESS, evt); +	return 0; +} +static int arch_timer_set_next_event_phys(unsigned long evt, +					  struct clock_event_device *unused) +{ +	set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);  	return 0;  }  static int __cpuinit arch_timer_setup(struct clock_event_device *clk)  { -	/* Be safe... */ -	arch_timer_disable(); -  	clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;  	clk->name = "arch_sys_timer";  	clk->rating = 450; -	clk->set_mode = arch_timer_set_mode; -	clk->set_next_event = arch_timer_set_next_event; -	clk->irq = arch_timer_ppi; +	if (arch_timer_use_virtual) { +		clk->irq = arch_timer_ppi[VIRT_PPI]; +		clk->set_mode = arch_timer_set_mode_virt; +		clk->set_next_event = arch_timer_set_next_event_virt; +	} else { +		clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; +		clk->set_mode = arch_timer_set_mode_phys; +		clk->set_next_event = arch_timer_set_next_event_phys; +	} + +	clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);  	clockevents_config_and_register(clk, arch_timer_rate,  					0xf, 0x7fffffff);  	*__this_cpu_ptr(arch_timer_evt) = clk; -	enable_percpu_irq(clk->irq, 0); -	if (arch_timer_ppi2) -		enable_percpu_irq(arch_timer_ppi2, 0); +	if (arch_timer_use_virtual) +		enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); +	else { +		enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); +		if (arch_timer_ppi[PHYS_NONSECURE_PPI]) +			enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); +	}  	return 0;  } @@ -173,8 +274,8 @@ static int arch_timer_available(void)  		return -ENXIO;  	if (arch_timer_rate == 0) { -		arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0); -		freq = arch_timer_reg_read(ARCH_TIMER_REG_FREQ); +		freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS, +					   ARCH_TIMER_REG_FREQ);  		/* Check the timer frequency. */  		if (freq == 0) { @@ -185,52 +286,57 @@ static int arch_timer_available(void)  		arch_timer_rate = freq;  	} -	pr_info_once("Architected local timer running at %lu.%02luMHz.\n", -		     arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100); +	pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n", +		     arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100, +		     arch_timer_use_virtual ? "virt" : "phys");  	return 0;  } -static inline cycle_t arch_counter_get_cntpct(void) +static u32 notrace arch_counter_get_cntpct32(void)  { -	u32 cvall, cvalh; - -	asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh)); +	cycle_t cnt = arch_counter_get_cntpct(); -	return ((cycle_t) cvalh << 32) | cvall; -} - -static inline cycle_t arch_counter_get_cntvct(void) -{ -	u32 cvall, cvalh; - -	asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh)); - -	return ((cycle_t) cvalh << 32) | cvall; +	/* +	 * The sched_clock infrastructure only knows about counters +	 * with at most 32bits. Forget about the upper 24 bits for the +	 * time being... +	 */ +	return (u32)cnt;  }  static u32 notrace arch_counter_get_cntvct32(void)  { -	cycle_t cntvct = arch_counter_get_cntvct(); +	cycle_t cnt = arch_counter_get_cntvct();  	/*  	 * The sched_clock infrastructure only knows about counters  	 * with at most 32bits. Forget about the upper 24 bits for the  	 * time being...  	 */ -	return (u32)(cntvct & (u32)~0); +	return (u32)cnt;  }  static cycle_t arch_counter_read(struct clocksource *cs)  { +	/* +	 * Always use the physical counter for the clocksource. +	 * CNTHCTL.PL1PCTEN must be set to 1. +	 */  	return arch_counter_get_cntpct();  } -int read_current_timer(unsigned long *timer_val) +static unsigned long arch_timer_read_current_timer(void)  { -	if (!arch_timer_rate) -		return -ENXIO; -	*timer_val = arch_counter_get_cntpct(); -	return 0; +	return arch_counter_get_cntpct(); +} + +static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) +{ +	/* +	 * Always use the physical counter for the clocksource. +	 * CNTHCTL.PL1PCTEN must be set to 1. +	 */ +	return arch_counter_get_cntpct();  }  static struct clocksource clocksource_counter = { @@ -241,14 +347,32 @@ static struct clocksource clocksource_counter = {  	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,  }; +static struct cyclecounter cyclecounter = { +	.read	= arch_counter_read_cc, +	.mask	= CLOCKSOURCE_MASK(56), +}; + +static struct timecounter timecounter; + +struct timecounter *arch_timer_get_timecounter(void) +{ +	return &timecounter; +} +  static void __cpuinit arch_timer_stop(struct clock_event_device *clk)  {  	pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",  		 clk->irq, smp_processor_id()); -	disable_percpu_irq(clk->irq); -	if (arch_timer_ppi2) -		disable_percpu_irq(arch_timer_ppi2); -	arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk); + +	if (arch_timer_use_virtual) +		disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); +	else { +		disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); +		if (arch_timer_ppi[PHYS_NONSECURE_PPI]) +			disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); +	} + +	clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);  }  static struct local_timer_ops arch_timer_ops __cpuinitdata = { @@ -261,36 +385,48 @@ static struct clock_event_device arch_timer_global_evt;  static int __init arch_timer_register(void)  {  	int err; +	int ppi;  	err = arch_timer_available();  	if (err) -		return err; +		goto out;  	arch_timer_evt = alloc_percpu(struct clock_event_device *); -	if (!arch_timer_evt) -		return -ENOMEM; +	if (!arch_timer_evt) { +		err = -ENOMEM; +		goto out; +	}  	clocksource_register_hz(&clocksource_counter, arch_timer_rate); +	cyclecounter.mult = clocksource_counter.mult; +	cyclecounter.shift = clocksource_counter.shift; +	timecounter_init(&timecounter, &cyclecounter, +			 arch_counter_get_cntpct()); + +	if (arch_timer_use_virtual) { +		ppi = arch_timer_ppi[VIRT_PPI]; +		err = request_percpu_irq(ppi, arch_timer_handler_virt, +					 "arch_timer", arch_timer_evt); +	} else { +		ppi = arch_timer_ppi[PHYS_SECURE_PPI]; +		err = request_percpu_irq(ppi, arch_timer_handler_phys, +					 "arch_timer", arch_timer_evt); +		if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { +			ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; +			err = request_percpu_irq(ppi, arch_timer_handler_phys, +						 "arch_timer", arch_timer_evt); +			if (err) +				free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], +						arch_timer_evt); +		} +	} -	err = request_percpu_irq(arch_timer_ppi, arch_timer_handler, -				 "arch_timer", arch_timer_evt);  	if (err) {  		pr_err("arch_timer: can't register interrupt %d (%d)\n", -		       arch_timer_ppi, err); +		       ppi, err);  		goto out_free;  	} -	if (arch_timer_ppi2) { -		err = request_percpu_irq(arch_timer_ppi2, arch_timer_handler, -					 "arch_timer", arch_timer_evt); -		if (err) { -			pr_err("arch_timer: can't register interrupt %d (%d)\n", -			       arch_timer_ppi2, err); -			arch_timer_ppi2 = 0; -			goto out_free_irq; -		} -	} -  	err = local_timer_register(&arch_timer_ops);  	if (err) {  		/* @@ -302,21 +438,29 @@ static int __init arch_timer_register(void)  		arch_timer_global_evt.cpumask = cpumask_of(0);  		err = arch_timer_setup(&arch_timer_global_evt);  	} -  	if (err)  		goto out_free_irq; -	init_current_timer_delay(arch_timer_rate); +	/* Use the architected timer for the delay loop. */ +	arch_delay_timer.read_current_timer = &arch_timer_read_current_timer; +	arch_delay_timer.freq = arch_timer_rate; +	register_current_timer_delay(&arch_delay_timer);  	return 0;  out_free_irq: -	free_percpu_irq(arch_timer_ppi, arch_timer_evt); -	if (arch_timer_ppi2) -		free_percpu_irq(arch_timer_ppi2, arch_timer_evt); +	if (arch_timer_use_virtual) +		free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); +	else { +		free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], +				arch_timer_evt); +		if (arch_timer_ppi[PHYS_NONSECURE_PPI]) +			free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], +					arch_timer_evt); +	}  out_free:  	free_percpu(arch_timer_evt); - +out:  	return err;  } @@ -329,6 +473,7 @@ int __init arch_timer_of_register(void)  {  	struct device_node *np;  	u32 freq; +	int i;  	np = of_find_matching_node(NULL, arch_timer_of_match);  	if (!np) { @@ -340,22 +485,40 @@ int __init arch_timer_of_register(void)  	if (!of_property_read_u32(np, "clock-frequency", &freq))  		arch_timer_rate = freq; -	arch_timer_ppi = irq_of_parse_and_map(np, 0); -	arch_timer_ppi2 = irq_of_parse_and_map(np, 1); -	pr_info("arch_timer: found %s irqs %d %d\n", -		np->name, arch_timer_ppi, arch_timer_ppi2); +	for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) +		arch_timer_ppi[i] = irq_of_parse_and_map(np, i); + +	/* +	 * If no interrupt provided for virtual timer, we'll have to +	 * stick to the physical timer. It'd better be accessible... +	 */ +	if (!arch_timer_ppi[VIRT_PPI]) { +		arch_timer_use_virtual = false; + +		if (!arch_timer_ppi[PHYS_SECURE_PPI] || +		    !arch_timer_ppi[PHYS_NONSECURE_PPI]) { +			pr_warn("arch_timer: No interrupt available, giving up\n"); +			return -EINVAL; +		} +	}  	return arch_timer_register();  }  int __init arch_timer_sched_clock_init(void)  { +	u32 (*cnt32)(void);  	int err;  	err = arch_timer_available();  	if (err)  		return err; -	setup_sched_clock(arch_counter_get_cntvct32, 32, arch_timer_rate); +	if (arch_timer_use_virtual) +		cnt32 = arch_counter_get_cntvct32; +	else +		cnt32 = arch_counter_get_cntpct32; + +	setup_sched_clock(cnt32, 32, arch_timer_rate);  	return 0;  } | 
