diff options
Diffstat (limited to 'arch/arm/mach-vexpress/tc2_pm.c')
| -rw-r--r-- | arch/arm/mach-vexpress/tc2_pm.c | 132 | 
1 files changed, 78 insertions, 54 deletions
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c index 7aeb5d60e48..b743a0ae02c 100644 --- a/arch/arm/mach-vexpress/tc2_pm.c +++ b/arch/arm/mach-vexpress/tc2_pm.c @@ -12,10 +12,12 @@   * published by the Free Software Foundation.   */ +#include <linux/delay.h>  #include <linux/init.h>  #include <linux/io.h>  #include <linux/kernel.h>  #include <linux/of_address.h> +#include <linux/of_irq.h>  #include <linux/spinlock.h>  #include <linux/errno.h>  #include <linux/irqchip/arm-gic.h> @@ -31,11 +33,17 @@  #include "spc.h"  /* SCC conf registers */ +#define RESET_CTRL		0x018 +#define RESET_A15_NCORERESET(cpu)	(1 << (2 + (cpu))) +#define RESET_A7_NCORERESET(cpu)	(1 << (16 + (cpu))) +  #define A15_CONF		0x400  #define A7_CONF			0x500  #define SYS_INFO		0x700  #define SPC_BASE		0xb00 +static void __iomem *scc; +  /*   * We can't use regular spinlocks. In the switcher case, it is possible   * for an outbound CPU to call power_down() after its inbound counterpart @@ -131,6 +139,16 @@ static void tc2_pm_down(u64 residency)  	} else  		BUG(); +	/* +	 * If the CPU is committed to power down, make sure +	 * the power controller will be in charge of waking it +	 * up upon IRQ, ie IRQ lines are cut from GIC CPU IF +	 * to the CPU by disabling the GIC CPU IF to prevent wfi +	 * from completing execution behind power controller back +	 */ +	if (!skip_wfi) +		gic_cpu_if_down(); +  	if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {  		arch_spin_unlock(&tc2_pm_lock); @@ -146,32 +164,7 @@ static void tc2_pm_down(u64 residency)  			: : "r" (0x400) );  		} -		/* -		 * We need to disable and flush the whole (L1 and L2) cache. -		 * Let's do it in the safest possible way i.e. with -		 * no memory access within the following sequence -		 * including the stack. -		 * -		 * Note: fp is preserved to the stack explicitly prior doing -		 * this since adding it to the clobber list is incompatible -		 * with having CONFIG_FRAME_POINTER=y. -		 */ -		asm volatile( -		"str	fp, [sp, #-4]! \n\t" -		"mrc	p15, 0, r0, c1, c0, 0	@ get CR \n\t" -		"bic	r0, r0, #"__stringify(CR_C)" \n\t" -		"mcr	p15, 0, r0, c1, c0, 0	@ set CR \n\t" -		"isb	\n\t" -		"bl	v7_flush_dcache_all \n\t" -		"clrex	\n\t" -		"mrc	p15, 0, r0, c1, c0, 1	@ get AUXCR \n\t" -		"bic	r0, r0, #(1 << 6)	@ disable local coherency \n\t" -		"mcr	p15, 0, r0, c1, c0, 1	@ set AUXCR \n\t" -		"isb	\n\t" -		"dsb	\n\t" -		"ldr	fp, [sp], #4" -		: : : "r0","r1","r2","r3","r4","r5","r6","r7", -		      "r9","r10","lr","memory"); +		v7_exit_coherency_flush(all);  		cci_disable_port_by_cpu(mpidr); @@ -187,26 +180,7 @@ static void tc2_pm_down(u64 residency)  		arch_spin_unlock(&tc2_pm_lock); -		/* -		 * We need to disable and flush only the L1 cache. -		 * Let's do it in the safest possible way as above. -		 */ -		asm volatile( -		"str	fp, [sp, #-4]! \n\t" -		"mrc	p15, 0, r0, c1, c0, 0	@ get CR \n\t" -		"bic	r0, r0, #"__stringify(CR_C)" \n\t" -		"mcr	p15, 0, r0, c1, c0, 0	@ set CR \n\t" -		"isb	\n\t" -		"bl	v7_flush_dcache_louis \n\t" -		"clrex	\n\t" -		"mrc	p15, 0, r0, c1, c0, 1	@ get AUXCR \n\t" -		"bic	r0, r0, #(1 << 6)	@ disable local coherency \n\t" -		"mcr	p15, 0, r0, c1, c0, 1	@ set AUXCR \n\t" -		"isb	\n\t" -		"dsb	\n\t" -		"ldr	fp, [sp], #4" -		: : : "r0","r1","r2","r3","r4","r5","r6","r7", -		      "r9","r10","lr","memory"); +		v7_exit_coherency_flush(louis);  	}  	__mcpm_cpu_down(cpu, cluster); @@ -223,6 +197,55 @@ static void tc2_pm_power_down(void)  	tc2_pm_down(0);  } +static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) +{ +	u32 mask = cluster ? +		  RESET_A7_NCORERESET(cpu) +		: RESET_A15_NCORERESET(cpu); + +	return !(readl_relaxed(scc + RESET_CTRL) & mask); +} + +#define POLL_MSEC 10 +#define TIMEOUT_MSEC 1000 + +static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) +{ +	unsigned tries; + +	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); +	BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); + +	for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) { +		/* +		 * Only examine the hardware state if the target CPU has +		 * caught up at least as far as tc2_pm_down(): +		 */ +		if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) { +			pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n", +				 __func__, cpu, cluster, +				 readl_relaxed(scc + RESET_CTRL)); + +			/* +			 * We need the CPU to reach WFI, but the power +			 * controller may put the cluster in reset and +			 * power it off as soon as that happens, before +			 * we have a chance to see STANDBYWFI. +			 * +			 * So we need to check for both conditions: +			 */ +			if (tc2_core_in_reset(cpu, cluster) || +			    ve_spc_cpu_in_wfi(cpu, cluster)) +				return 0; /* success: the CPU is halted */ +		} + +		/* Otherwise, wait and retry: */ +		msleep(POLL_MSEC); +	} + +	return -ETIMEDOUT; /* timeout */ +} +  static void tc2_pm_suspend(u64 residency)  {  	unsigned int mpidr, cpu, cluster; @@ -231,7 +254,6 @@ static void tc2_pm_suspend(u64 residency)  	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);  	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);  	ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); -	gic_cpu_if_down();  	tc2_pm_down(residency);  } @@ -266,10 +288,11 @@ static void tc2_pm_powered_up(void)  }  static const struct mcpm_platform_ops tc2_pm_power_ops = { -	.power_up	= tc2_pm_power_up, -	.power_down	= tc2_pm_power_down, -	.suspend	= tc2_pm_suspend, -	.powered_up	= tc2_pm_powered_up, +	.power_up		= tc2_pm_power_up, +	.power_down		= tc2_pm_power_down, +	.wait_for_powerdown	= tc2_pm_wait_for_powerdown, +	.suspend		= tc2_pm_suspend, +	.powered_up		= tc2_pm_powered_up,  };  static bool __init tc2_pm_usage_count_init(void) @@ -302,8 +325,7 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)  static int __init tc2_pm_init(void)  { -	int ret; -	void __iomem *scc; +	int ret, irq;  	u32 a15_cluster_id, a7_cluster_id, sys_info;  	struct device_node *np; @@ -327,13 +349,15 @@ static int __init tc2_pm_init(void)  	tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;  	tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf; +	irq = irq_of_parse_and_map(np, 0); +  	/*  	 * A subset of the SCC registers is also used to communicate  	 * with the SPC (power controller). We need to be able to  	 * drive it very early in the boot process to power up  	 * processors, so we initialize the SPC driver here.  	 */ -	ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id); +	ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq);  	if (ret)  		return ret;  | 
