diff options
Diffstat (limited to 'arch/arm/vfp/vfpmodule.c')
| -rw-r--r-- | arch/arm/vfp/vfpmodule.c | 422 | 
1 files changed, 329 insertions, 93 deletions
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 8063a322c79..2f37e1d6cb4 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -8,14 +8,23 @@   * it under the terms of the GNU General Public License version 2 as   * published by the Free Software Foundation.   */ -#include <linux/module.h>  #include <linux/types.h> +#include <linux/cpu.h> +#include <linux/cpu_pm.h> +#include <linux/hardirq.h>  #include <linux/kernel.h> +#include <linux/notifier.h>  #include <linux/signal.h>  #include <linux/sched.h> +#include <linux/smp.h>  #include <linux/init.h> +#include <linux/uaccess.h> +#include <linux/user.h> +#include <linux/export.h> +#include <asm/cp15.h>  #include <asm/cputype.h> +#include <asm/system_info.h>  #include <asm/thread_notify.h>  #include <asm/vfp.h> @@ -30,7 +39,6 @@ void vfp_support_entry(void);  void vfp_null_entry(void);  void (*vfp_vector)(void) = vfp_null_entry; -union vfp_state *last_VFP_context[NR_CPUS];  /*   * Dual-use variable. @@ -40,6 +48,46 @@ union vfp_state *last_VFP_context[NR_CPUS];  unsigned int VFP_arch;  /* + * The pointer to the vfpstate structure of the thread which currently + * owns the context held in the VFP hardware, or NULL if the hardware + * context is invalid. + * + * For UP, this is sufficient to tell which thread owns the VFP context. + * However, for SMP, we also need to check the CPU number stored in the + * saved state too to catch migrations. + */ +union vfp_state *vfp_current_hw_state[NR_CPUS]; + +/* + * Is 'thread's most up to date state stored in this CPUs hardware? + * Must be called from non-preemptible context. + */ +static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread) +{ +#ifdef CONFIG_SMP +	if (thread->vfpstate.hard.cpu != cpu) +		return false; +#endif +	return vfp_current_hw_state[cpu] == &thread->vfpstate; +} + +/* + * Force a reload of the VFP context from the thread structure.  We do + * this by ensuring that access to the VFP hardware is disabled, and + * clear vfp_current_hw_state.  Must be called from non-preemptible context. + */ +static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) +{ +	if (vfp_state_in_hw(cpu, thread)) { +		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); +		vfp_current_hw_state[cpu] = NULL; +	} +#ifdef CONFIG_SMP +	thread->vfpstate.hard.cpu = NR_CPUS; +#endif +} + +/*   * Per-thread VFP initialization.   */  static void vfp_thread_flush(struct thread_info *thread) @@ -47,21 +95,27 @@ static void vfp_thread_flush(struct thread_info *thread)  	union vfp_state *vfp = &thread->vfpstate;  	unsigned int cpu; -	memset(vfp, 0, sizeof(union vfp_state)); - -	vfp->hard.fpexc = FPEXC_EN; -	vfp->hard.fpscr = FPSCR_ROUND_NEAREST; -  	/*  	 * Disable VFP to ensure we initialize it first.  We must ensure -	 * that the modification of last_VFP_context[] and hardware disable -	 * are done for the same CPU and without preemption. +	 * that the modification of vfp_current_hw_state[] and hardware +	 * disable are done for the same CPU and without preemption. +	 * +	 * Do this first to ensure that preemption won't overwrite our +	 * state saving should access to the VFP be enabled at this point.  	 */  	cpu = get_cpu(); -	if (last_VFP_context[cpu] == vfp) -		last_VFP_context[cpu] = NULL; +	if (vfp_current_hw_state[cpu] == vfp) +		vfp_current_hw_state[cpu] = NULL;  	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);  	put_cpu(); + +	memset(vfp, 0, sizeof(union vfp_state)); + +	vfp->hard.fpexc = FPEXC_EN; +	vfp->hard.fpscr = FPSCR_ROUND_NEAREST; +#ifdef CONFIG_SMP +	vfp->hard.cpu = NR_CPUS; +#endif  }  static void vfp_thread_exit(struct thread_info *thread) @@ -70,11 +124,22 @@ static void vfp_thread_exit(struct thread_info *thread)  	union vfp_state *vfp = &thread->vfpstate;  	unsigned int cpu = get_cpu(); -	if (last_VFP_context[cpu] == vfp) -		last_VFP_context[cpu] = NULL; +	if (vfp_current_hw_state[cpu] == vfp) +		vfp_current_hw_state[cpu] = NULL;  	put_cpu();  } +static void vfp_thread_copy(struct thread_info *thread) +{ +	struct thread_info *parent = current_thread_info(); + +	vfp_sync_hwstate(parent); +	thread->vfpstate = parent->vfpstate; +#ifdef CONFIG_SMP +	thread->vfpstate.hard.cpu = NR_CPUS; +#endif +} +  /*   * When this function is called with the following 'cmd's, the following   * is true while this function is being run: @@ -101,29 +166,25 @@ static void vfp_thread_exit(struct thread_info *thread)  static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)  {  	struct thread_info *thread = v; +	u32 fpexc; +#ifdef CONFIG_SMP +	unsigned int cpu; +#endif -	if (likely(cmd == THREAD_NOTIFY_SWITCH)) { -		u32 fpexc = fmrx(FPEXC); +	switch (cmd) { +	case THREAD_NOTIFY_SWITCH: +		fpexc = fmrx(FPEXC);  #ifdef CONFIG_SMP -		unsigned int cpu = thread->cpu; +		cpu = thread->cpu;  		/*  		 * On SMP, if VFP is enabled, save the old state in  		 * case the thread migrates to a different CPU. The  		 * restoring is done lazily.  		 */ -		if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { -			vfp_save_state(last_VFP_context[cpu], fpexc); -			last_VFP_context[cpu]->hard.cpu = cpu; -		} -		/* -		 * Thread migration, just force the reloading of the -		 * state on the new CPU in case the VFP registers -		 * contain stale data. -		 */ -		if (thread->vfpstate.hard.cpu != cpu) -			last_VFP_context[cpu] = NULL; +		if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) +			vfp_save_state(vfp_current_hw_state[cpu], fpexc);  #endif  		/* @@ -131,13 +192,20 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)  		 * old state.  		 */  		fmxr(FPEXC, fpexc & ~FPEXC_EN); -		return NOTIFY_DONE; -	} +		break; -	if (cmd == THREAD_NOTIFY_FLUSH) +	case THREAD_NOTIFY_FLUSH:  		vfp_thread_flush(thread); -	else +		break; + +	case THREAD_NOTIFY_EXIT:  		vfp_thread_exit(thread); +		break; + +	case THREAD_NOTIFY_COPY: +		vfp_thread_copy(thread); +		break; +	}  	return NOTIFY_DONE;  } @@ -150,7 +218,7 @@ static struct notifier_block vfp_notifier_block = {   * Raise a SIGFPE for the current process.   * sicode describes the signal being raised.   */ -void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) +static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)  {  	siginfo_t info; @@ -174,11 +242,11 @@ static void vfp_panic(char *reason, u32 inst)  {  	int i; -	printk(KERN_ERR "VFP: Error: %s\n", reason); -	printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n", +	pr_err("VFP: Error: %s\n", reason); +	pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",  		fmrx(FPEXC), fmrx(FPSCR), inst);  	for (i = 0; i < 32; i += 2) -		printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n", +		pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n",  		       i, vfp_get_float(i), i+1, vfp_get_float(i+1));  } @@ -346,7 +414,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)  	 * If there isn't a second FP instruction, exit now. Note that  	 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.  	 */ -	if (fpexc ^ (FPEXC_EX | FPEXC_FP2V)) +	if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))  		goto exit;  	/* @@ -366,7 +434,10 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)  static void vfp_enable(void *unused)  { -	u32 access = get_copro_access(); +	u32 access; + +	BUG_ON(preemptible()); +	access = get_copro_access();  	/*  	 * Enable full access to VFP (cp10 and cp11) @@ -374,70 +445,79 @@ static void vfp_enable(void *unused)  	set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));  } -#ifdef CONFIG_PM -#include <linux/sysdev.h> - -static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) +#ifdef CONFIG_CPU_PM +static int vfp_pm_suspend(void)  {  	struct thread_info *ti = current_thread_info();  	u32 fpexc = fmrx(FPEXC);  	/* if vfp is on, then save state for resumption */  	if (fpexc & FPEXC_EN) { -		printk(KERN_DEBUG "%s: saving vfp state\n", __func__); +		pr_debug("%s: saving vfp state\n", __func__);  		vfp_save_state(&ti->vfpstate, fpexc);  		/* disable, just in case */  		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); +	} else if (vfp_current_hw_state[ti->cpu]) { +#ifndef CONFIG_SMP +		fmxr(FPEXC, fpexc | FPEXC_EN); +		vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc); +		fmxr(FPEXC, fpexc); +#endif  	}  	/* clear any information we had about last context state */ -	memset(last_VFP_context, 0, sizeof(last_VFP_context)); +	vfp_current_hw_state[ti->cpu] = NULL;  	return 0;  } -static int vfp_pm_resume(struct sys_device *dev) +static void vfp_pm_resume(void)  {  	/* ensure we have access to the vfp */  	vfp_enable(NULL);  	/* and disable it to ensure the next usage restores the state */  	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); - -	return 0;  } -static struct sysdev_class vfp_pm_sysclass = { -	.name		= "vfp", -	.suspend	= vfp_pm_suspend, -	.resume		= vfp_pm_resume, -}; +static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, +	void *v) +{ +	switch (cmd) { +	case CPU_PM_ENTER: +		vfp_pm_suspend(); +		break; +	case CPU_PM_ENTER_FAILED: +	case CPU_PM_EXIT: +		vfp_pm_resume(); +		break; +	} +	return NOTIFY_OK; +} -static struct sys_device vfp_pm_sysdev = { -	.cls	= &vfp_pm_sysclass, +static struct notifier_block vfp_cpu_pm_notifier_block = { +	.notifier_call = vfp_cpu_pm_notifier,  };  static void vfp_pm_init(void)  { -	sysdev_class_register(&vfp_pm_sysclass); -	sysdev_register(&vfp_pm_sysdev); +	cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);  } -  #else  static inline void vfp_pm_init(void) { } -#endif /* CONFIG_PM */ +#endif /* CONFIG_CPU_PM */ +/* + * Ensure that the VFP state stored in 'thread->vfpstate' is up to date + * with the hardware state. + */  void vfp_sync_hwstate(struct thread_info *thread)  {  	unsigned int cpu = get_cpu(); -	/* -	 * If the thread we're interested in is the current owner of the -	 * hardware VFP state, then we need to save its state. -	 */ -	if (last_VFP_context[cpu] == &thread->vfpstate) { +	if (vfp_state_in_hw(cpu, thread)) {  		u32 fpexc = fmrx(FPEXC);  		/* @@ -451,40 +531,189 @@ void vfp_sync_hwstate(struct thread_info *thread)  	put_cpu();  } +/* Ensure that the thread reloads the hardware VFP state on the next use. */  void vfp_flush_hwstate(struct thread_info *thread)  {  	unsigned int cpu = get_cpu(); +	vfp_force_reload(cpu, thread); + +	put_cpu(); +} + +/* + * Save the current VFP state into the provided structures and prepare + * for entry into a new function (signal handler). + */ +int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, +				    struct user_vfp_exc __user *ufp_exc) +{ +	struct thread_info *thread = current_thread_info(); +	struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; +	int err = 0; + +	/* Ensure that the saved hwstate is up-to-date. */ +	vfp_sync_hwstate(thread); +  	/* -	 * If the thread we're interested in is the current owner of the -	 * hardware VFP state, then we need to save its state. +	 * Copy the floating point registers. There can be unused +	 * registers see asm/hwcap.h for details.  	 */ -	if (last_VFP_context[cpu] == &thread->vfpstate) { -		u32 fpexc = fmrx(FPEXC); +	err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs, +			      sizeof(hwstate->fpregs)); +	/* +	 * Copy the status and control register. +	 */ +	__put_user_error(hwstate->fpscr, &ufp->fpscr, err); -		fmxr(FPEXC, fpexc & ~FPEXC_EN); +	/* +	 * Copy the exception registers. +	 */ +	__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err); +	__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); +	__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); -		/* -		 * Set the context to NULL to force a reload the next time -		 * the thread uses the VFP. -		 */ -		last_VFP_context[cpu] = NULL; -	} +	if (err) +		return -EFAULT; + +	/* Ensure that VFP is disabled. */ +	vfp_flush_hwstate(thread); -#ifdef CONFIG_SMP  	/* -	 * For SMP we still have to take care of the case where the thread -	 * migrates to another CPU and then back to the original CPU on which -	 * the last VFP user is still the same thread. Mark the thread VFP -	 * state as belonging to a non-existent CPU so that the saved one will -	 * be reloaded in the above case. +	 * As per the PCS, clear the length and stride bits for function +	 * entry.  	 */ -	thread->vfpstate.hard.cpu = NR_CPUS; +	hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK); +	return 0; +} + +/* Sanitise and restore the current VFP state from the provided structures. */ +int vfp_restore_user_hwstate(struct user_vfp __user *ufp, +			     struct user_vfp_exc __user *ufp_exc) +{ +	struct thread_info *thread = current_thread_info(); +	struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; +	unsigned long fpexc; +	int err = 0; + +	/* Disable VFP to avoid corrupting the new thread state. */ +	vfp_flush_hwstate(thread); + +	/* +	 * Copy the floating point registers. There can be unused +	 * registers see asm/hwcap.h for details. +	 */ +	err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs, +				sizeof(hwstate->fpregs)); +	/* +	 * Copy the status and control register. +	 */ +	__get_user_error(hwstate->fpscr, &ufp->fpscr, err); + +	/* +	 * Sanitise and restore the exception registers. +	 */ +	__get_user_error(fpexc, &ufp_exc->fpexc, err); + +	/* Ensure the VFP is enabled. */ +	fpexc |= FPEXC_EN; + +	/* Ensure FPINST2 is invalid and the exception flag is cleared. */ +	fpexc &= ~(FPEXC_EX | FPEXC_FP2V); +	hwstate->fpexc = fpexc; + +	__get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); +	__get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); + +	return err ? -EFAULT : 0; +} + +/* + * VFP hardware can lose all context when a CPU goes offline. + * As we will be running in SMP mode with CPU hotplug, we will save the + * hardware state at every thread switch.  We clear our held state when + * a CPU has been killed, indicating that the VFP hardware doesn't contain + * a threads VFP state.  When a CPU starts up, we re-enable access to the + * VFP hardware. + * + * Both CPU_DYING and CPU_STARTING are called on the CPU which + * is being offlined/onlined. + */ +static int vfp_hotplug(struct notifier_block *b, unsigned long action, +	void *hcpu) +{ +	if (action == CPU_DYING || action == CPU_DYING_FROZEN) +		vfp_current_hw_state[(long)hcpu] = NULL; +	else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) +		vfp_enable(NULL); +	return NOTIFY_OK; +} + +void vfp_kmode_exception(void) +{ +	/* +	 * If we reach this point, a floating point exception has been raised +	 * while running in kernel mode. If the NEON/VFP unit was enabled at the +	 * time, it means a VFP instruction has been issued that requires +	 * software assistance to complete, something which is not currently +	 * supported in kernel mode. +	 * If the NEON/VFP unit was disabled, and the location pointed to below +	 * is properly preceded by a call to kernel_neon_begin(), something has +	 * caused the task to be scheduled out and back in again. In this case, +	 * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should +	 * be helpful in localizing the problem. +	 */ +	if (fmrx(FPEXC) & FPEXC_EN) +		pr_crit("BUG: unsupported FP instruction in kernel mode\n"); +	else +		pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n"); +} + +#ifdef CONFIG_KERNEL_MODE_NEON + +/* + * Kernel-side NEON support functions + */ +void kernel_neon_begin(void) +{ +	struct thread_info *thread = current_thread_info(); +	unsigned int cpu; +	u32 fpexc; + +	/* +	 * Kernel mode NEON is only allowed outside of interrupt context +	 * with preemption disabled. This will make sure that the kernel +	 * mode NEON register contents never need to be preserved. +	 */ +	BUG_ON(in_interrupt()); +	cpu = get_cpu(); + +	fpexc = fmrx(FPEXC) | FPEXC_EN; +	fmxr(FPEXC, fpexc); + +	/* +	 * Save the userland NEON/VFP state. Under UP, +	 * the owner could be a task other than 'current' +	 */ +	if (vfp_state_in_hw(cpu, thread)) +		vfp_save_state(&thread->vfpstate, fpexc); +#ifndef CONFIG_SMP +	else if (vfp_current_hw_state[cpu] != NULL) +		vfp_save_state(vfp_current_hw_state[cpu], fpexc);  #endif +	vfp_current_hw_state[cpu] = NULL; +} +EXPORT_SYMBOL(kernel_neon_begin); + +void kernel_neon_end(void) +{ +	/* Disable the NEON/VFP unit. */ +	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);  	put_cpu();  } +EXPORT_SYMBOL(kernel_neon_end); -#include <linux/smp.h> +#endif /* CONFIG_KERNEL_MODE_NEON */  /*   * VFP support code initialisation. @@ -495,7 +724,7 @@ static int __init vfp_init(void)  	unsigned int cpu_arch = cpu_architecture();  	if (cpu_arch >= CPU_ARCH_ARMv6) -		vfp_enable(NULL); +		on_each_cpu(vfp_enable, NULL, 1);  	/*  	 * First check that there is a VFP that we can use. @@ -508,16 +737,16 @@ static int __init vfp_init(void)  	barrier();  	vfp_vector = vfp_null_entry; -	printk(KERN_INFO "VFP support v0.3: "); +	pr_info("VFP support v0.3: ");  	if (VFP_arch) -		printk("not present\n"); +		pr_cont("not present\n");  	else if (vfpsid & FPSID_NODOUBLE) { -		printk("no double precision support\n"); +		pr_cont("no double precision support\n");  	} else { -		smp_call_function(vfp_enable, NULL, 1); +		hotcpu_notifier(vfp_hotplug, 0);  		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */ -		printk("implementor %02x architecture %d part %02x variant %x rev %x\n", +		pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",  			(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,  			(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,  			(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, @@ -539,14 +768,16 @@ static int __init vfp_init(void)  			elf_hwcap |= HWCAP_VFPv3;  			/* -			 * Check for VFPv3 D16. CPUs in this configuration -			 * only have 16 x 64bit registers. +			 * Check for VFPv3 D16 and VFPv4 D16.  CPUs in +			 * this configuration only have 16 x 64bit +			 * registers.  			 */  			if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) -				elf_hwcap |= HWCAP_VFPv3D16; +				elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */ +			else +				elf_hwcap |= HWCAP_VFPD32;  		}  #endif -#ifdef CONFIG_NEON  		/*  		 * Check for the presence of the Advanced SIMD  		 * load/store instructions, integer and single @@ -554,12 +785,17 @@ static int __init vfp_init(void)  		 * for NEON if the hardware has the MVFR registers.  		 */  		if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { +#ifdef CONFIG_NEON  			if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)  				elf_hwcap |= HWCAP_NEON; -		}  #endif +#ifdef CONFIG_VFPv3 +			if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) +				elf_hwcap |= HWCAP_VFPv4; +#endif +		}  	}  	return 0;  } -late_initcall(vfp_init); +core_initcall(vfp_init);  | 
