aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/vfp
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r--arch/arm/vfp/Makefile6
-rw-r--r--arch/arm/vfp/entry.S44
-rw-r--r--arch/arm/vfp/vfpdouble.c2
-rw-r--r--arch/arm/vfp/vfphw.S144
-rw-r--r--arch/arm/vfp/vfpmodule.c422
-rw-r--r--arch/arm/vfp/vfpsingle.c2
6 files changed, 441 insertions, 179 deletions
diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile
index 39f6d8e1af7..a81404c09d5 100644
--- a/arch/arm/vfp/Makefile
+++ b/arch/arm/vfp/Makefile
@@ -4,10 +4,10 @@
# Copyright (C) 2001 ARM Limited
#
-# EXTRA_CFLAGS := -DDEBUG
-# EXTRA_AFLAGS := -DDEBUG
+# ccflags-y := -DDEBUG
+# asflags-y := -DDEBUG
-KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp)
+KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp -mfloat-abi=soft)
LDFLAGS +=--no-warn-mismatch
obj-y += vfp.o
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index 4fa9903b83c..fe6ca574d09 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -7,25 +7,25 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * Basic entry code, called from the kernel's undefined instruction trap.
- * r0 = faulted instruction
- * r5 = faulted PC+4
- * r9 = successful return
- * r10 = thread_info structure
- * lr = failure return
*/
+#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
-#include "../kernel/entry-header.S"
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+@ VFP entry point.
+@
+@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
+@ r2 = PC value to resume execution after successful emulation
+@ r9 = normal "successful" return address
+@ r10 = this threads thread_info structure
+@ lr = unrecognised instruction return address
+@ IRQs enabled.
+@
ENTRY(do_vfp)
-#ifdef CONFIG_PREEMPT
- ldr r4, [r10, #TI_PREEMPT] @ get preempt count
- add r11, r4, #1 @ increment it
- str r11, [r10, #TI_PREEMPT]
-#endif
- enable_irq
+ inc_preempt_count r10, r4
ldr r4, .LCvfp
ldr r11, [r10, #TI_CPU] @ CPU number
add r10, r10, #TI_VFPSTATE @ r10 = workspace
@@ -33,12 +33,7 @@ ENTRY(do_vfp)
ENDPROC(do_vfp)
ENTRY(vfp_null_entry)
-#ifdef CONFIG_PREEMPT
- get_thread_info r10
- ldr r4, [r10, #TI_PREEMPT] @ get preempt count
- sub r11, r4, #1 @ decrement it
- str r11, [r10, #TI_PREEMPT]
-#endif
+ dec_preempt_count_ti r10, r4
mov pc, lr
ENDPROC(vfp_null_entry)
@@ -51,14 +46,9 @@ ENDPROC(vfp_null_entry)
__INIT
ENTRY(vfp_testing_entry)
-#ifdef CONFIG_PREEMPT
- get_thread_info r10
- ldr r4, [r10, #TI_PREEMPT] @ get preempt count
- sub r11, r4, #1 @ decrement it
- str r11, [r10, #TI_PREEMPT]
-#endif
+ dec_preempt_count_ti r10, r4
ldr r0, VFP_arch_address
- str r5, [r0] @ known non-zero value
+ str r0, [r0] @ set to non-zero value
mov pc, r9 @ we have handled the fault
ENDPROC(vfp_testing_entry)
diff --git a/arch/arm/vfp/vfpdouble.c b/arch/arm/vfp/vfpdouble.c
index 6cac43bd1d8..423f56dd402 100644
--- a/arch/arm/vfp/vfpdouble.c
+++ b/arch/arm/vfp/vfpdouble.c
@@ -866,6 +866,8 @@ vfp_double_multiply_accumulate(int dd, int dn, int dm, u32 fpscr, u32 negate, ch
vdp.sign = vfp_sign_negate(vdp.sign);
vfp_double_unpack(&vdn, vfp_get_double(dd));
+ if (vdn.exponent == 0 && vdn.significand)
+ vfp_double_normalise_denormal(&vdn);
if (negate & NEG_SUBTRACT)
vdn.sign = vfp_sign_negate(vdn.sign);
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index d66cead97d2..be807625ed8 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -14,19 +14,25 @@
* r10 points at the start of the private FP workspace in the thread structure
* sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
*/
+#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
-#include "../kernel/entry-header.S"
+#include <linux/kern_levels.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
.macro DBGSTR, str
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
- add r0, pc, #4
+ ldr r0, =1f
bl printk
- b 1f
- .asciz "<7>VFP: \str\n"
- .balign 4
-1: ldmfd sp!, {r0-r3, ip, lr}
+ ldmfd sp!, {r0-r3, ip, lr}
+
+ .pushsection .rodata, "a"
+1: .ascii KERN_DEBUG "VFP: \str\n"
+ .byte 0
+ .previous
#endif
.endm
@@ -34,12 +40,14 @@
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
mov r1, \arg
- add r0, pc, #4
+ ldr r0, =1f
bl printk
- b 1f
- .asciz "<7>VFP: \str\n"
- .balign 4
-1: ldmfd sp!, {r0-r3, ip, lr}
+ ldmfd sp!, {r0-r3, ip, lr}
+
+ .pushsection .rodata, "a"
+1: .ascii KERN_DEBUG "VFP: \str\n"
+ .byte 0
+ .previous
#endif
.endm
@@ -49,55 +57,62 @@
mov r3, \arg3
mov r2, \arg2
mov r1, \arg1
- add r0, pc, #4
+ ldr r0, =1f
bl printk
- b 1f
- .asciz "<7>VFP: \str\n"
- .balign 4
-1: ldmfd sp!, {r0-r3, ip, lr}
+ ldmfd sp!, {r0-r3, ip, lr}
+
+ .pushsection .rodata, "a"
+1: .ascii KERN_DEBUG "VFP: \str\n"
+ .byte 0
+ .previous
#endif
.endm
@ VFP hardware support entry point.
@
-@ r0 = faulted instruction
-@ r2 = faulted PC+4
-@ r9 = successful return
+@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
+@ r2 = PC value to resume execution after successful emulation
+@ r9 = normal "successful" return address
@ r10 = vfp_state union
@ r11 = CPU number
-@ lr = failure return
-
+@ lr = unrecognised instruction return address
+@ IRQs enabled.
ENTRY(vfp_support_entry)
DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
+ ldr r3, [sp, #S_PSR] @ Neither lazy restore nor FP exceptions
+ and r3, r3, #MODE_MASK @ are supported in kernel mode
+ teq r3, #USR_MODE
+ bne vfp_kmode_exception @ Returns through lr
+
VFPFMRX r1, FPEXC @ Is the VFP enabled?
DBGSTR1 "fpexc %08x", r1
tst r1, #FPEXC_EN
bne look_for_VFP_exceptions @ VFP is already enabled
DBGSTR1 "enable %x", r10
- ldr r3, last_VFP_context_address
+ ldr r3, vfp_current_hw_state_address
orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set
- ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer
+ ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer
bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
- cmp r4, r10
- beq check_for_exception @ we are returning to the same
- @ process, so the registers are
- @ still there. In this case, we do
- @ not want to drop a pending exception.
+ cmp r4, r10 @ this thread owns the hw context?
+#ifndef CONFIG_SMP
+ @ For UP, checking that this thread owns the hw context is
+ @ sufficient to determine that the hardware state is valid.
+ beq vfp_hw_state_valid
+
+ @ On UP, we lazily save the VFP context. As a different
+ @ thread wants ownership of the VFP hardware, save the old
+ @ state if there was a previous (valid) owner.
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
@ exceptions, so we can get at the
@ rest of it
-#ifndef CONFIG_SMP
- @ Save out the current registers to the old thread state
- @ No need for SMP since this is not done lazily
-
DBGSTR1 "save old state %p", r4
- cmp r4, #0
- beq no_old_VFP_process
+ cmp r4, #0 @ if the vfp_current_hw_state is NULL
+ beq vfp_reload_hw @ then the hw state needs reloading
VFPFSTMIA r4, r5 @ save the working registers
VFPFMRX r5, FPSCR @ current status
#ifndef CONFIG_CPU_FEROCEON
@@ -110,13 +125,35 @@ ENTRY(vfp_support_entry)
1:
#endif
stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2
- @ and point r4 at the word at the
- @ start of the register dump
+vfp_reload_hw:
+
+#else
+ @ For SMP, if this thread does not own the hw context, then we
+ @ need to reload it. No need to save the old state as on SMP,
+ @ we always save the state when we switch away from a thread.
+ bne vfp_reload_hw
+
+ @ This thread has ownership of the current hardware context.
+ @ However, it may have been migrated to another CPU, in which
+ @ case the saved state is newer than the hardware context.
+ @ Check this by looking at the CPU number which the state was
+ @ last loaded onto.
+ ldr ip, [r10, #VFP_CPU]
+ teq ip, r11
+ beq vfp_hw_state_valid
+
+vfp_reload_hw:
+ @ We're loading this threads state into the VFP hardware. Update
+ @ the CPU number which contains the most up to date VFP context.
+ str r11, [r10, #VFP_CPU]
+
+ VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
+ @ exceptions, so we can get at the
+ @ rest of it
#endif
-no_old_VFP_process:
DBGSTR1 "load state %p", r10
- str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer
+ str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer
@ Load the saved state back into the VFP
VFPFLDMIA r10, r5 @ reload the working registers while
@ FPEXC is in a safe state
@@ -132,21 +169,20 @@ no_old_VFP_process:
#endif
VFPFMXR FPSCR, r5 @ restore status
-check_for_exception:
+@ The context stored in the VFP hardware is up to date with this thread
+vfp_hw_state_valid:
tst r1, #FPEXC_EX
bne process_exception @ might as well handle the pending
@ exception before retrying branch
@ out before setting an FPEXC that
@ stops us reading stuff
- VFPFMXR FPEXC, r1 @ restore FPEXC last
- sub r2, r2, #4
- str r2, [sp, #S_PC] @ retry the instruction
-#ifdef CONFIG_PREEMPT
- get_thread_info r10
- ldr r4, [r10, #TI_PREEMPT] @ get preempt count
- sub r11, r4, #1 @ decrement it
- str r11, [r10, #TI_PREEMPT]
-#endif
+ VFPFMXR FPEXC, r1 @ Restore FPEXC last
+ sub r2, r2, #4 @ Retry current instruction - if Thumb
+ str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
+ @ else it's one 32-bit instruction, so
+ @ always subtract 4 from the following
+ @ instruction address.
+ dec_preempt_count_ti r10, r4
mov pc, r9 @ we think we have handled things
@@ -165,12 +201,7 @@ look_for_VFP_exceptions:
@ not recognised by VFP
DBGSTR "not VFP"
-#ifdef CONFIG_PREEMPT
- get_thread_info r10
- ldr r4, [r10, #TI_PREEMPT] @ get preempt count
- sub r11, r4, #1 @ decrement it
- str r11, [r10, #TI_PREEMPT]
-#endif
+ dec_preempt_count_ti r10, r4
mov pc, lr
process_exception:
@@ -206,8 +237,9 @@ ENTRY(vfp_save_state)
mov pc, lr
ENDPROC(vfp_save_state)
-last_VFP_context_address:
- .word last_VFP_context
+ .align
+vfp_current_hw_state_address:
+ .word vfp_current_hw_state
.macro tbl_branch, base, tmp, shift
#ifdef CONFIG_THUMB2_KERNEL
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 8063a322c79..2f37e1d6cb4 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -8,14 +8,23 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/types.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/hardirq.h>
#include <linux/kernel.h>
+#include <linux/notifier.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/user.h>
+#include <linux/export.h>
+#include <asm/cp15.h>
#include <asm/cputype.h>
+#include <asm/system_info.h>
#include <asm/thread_notify.h>
#include <asm/vfp.h>
@@ -30,7 +39,6 @@ void vfp_support_entry(void);
void vfp_null_entry(void);
void (*vfp_vector)(void) = vfp_null_entry;
-union vfp_state *last_VFP_context[NR_CPUS];
/*
* Dual-use variable.
@@ -40,6 +48,46 @@ union vfp_state *last_VFP_context[NR_CPUS];
unsigned int VFP_arch;
/*
+ * The pointer to the vfpstate structure of the thread which currently
+ * owns the context held in the VFP hardware, or NULL if the hardware
+ * context is invalid.
+ *
+ * For UP, this is sufficient to tell which thread owns the VFP context.
+ * However, for SMP, we also need to check the CPU number stored in the
+ * saved state too to catch migrations.
+ */
+union vfp_state *vfp_current_hw_state[NR_CPUS];
+
+/*
+ * Is 'thread's most up to date state stored in this CPUs hardware?
+ * Must be called from non-preemptible context.
+ */
+static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
+{
+#ifdef CONFIG_SMP
+ if (thread->vfpstate.hard.cpu != cpu)
+ return false;
+#endif
+ return vfp_current_hw_state[cpu] == &thread->vfpstate;
+}
+
+/*
+ * Force a reload of the VFP context from the thread structure. We do
+ * this by ensuring that access to the VFP hardware is disabled, and
+ * clear vfp_current_hw_state. Must be called from non-preemptible context.
+ */
+static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
+{
+ if (vfp_state_in_hw(cpu, thread)) {
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ vfp_current_hw_state[cpu] = NULL;
+ }
+#ifdef CONFIG_SMP
+ thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
+}
+
+/*
* Per-thread VFP initialization.
*/
static void vfp_thread_flush(struct thread_info *thread)
@@ -47,21 +95,27 @@ static void vfp_thread_flush(struct thread_info *thread)
union vfp_state *vfp = &thread->vfpstate;
unsigned int cpu;
- memset(vfp, 0, sizeof(union vfp_state));
-
- vfp->hard.fpexc = FPEXC_EN;
- vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
-
/*
* Disable VFP to ensure we initialize it first. We must ensure
- * that the modification of last_VFP_context[] and hardware disable
- * are done for the same CPU and without preemption.
+ * that the modification of vfp_current_hw_state[] and hardware
+ * disable are done for the same CPU and without preemption.
+ *
+ * Do this first to ensure that preemption won't overwrite our
+ * state saving should access to the VFP be enabled at this point.
*/
cpu = get_cpu();
- if (last_VFP_context[cpu] == vfp)
- last_VFP_context[cpu] = NULL;
+ if (vfp_current_hw_state[cpu] == vfp)
+ vfp_current_hw_state[cpu] = NULL;
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
put_cpu();
+
+ memset(vfp, 0, sizeof(union vfp_state));
+
+ vfp->hard.fpexc = FPEXC_EN;
+ vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+#ifdef CONFIG_SMP
+ vfp->hard.cpu = NR_CPUS;
+#endif
}
static void vfp_thread_exit(struct thread_info *thread)
@@ -70,11 +124,22 @@ static void vfp_thread_exit(struct thread_info *thread)
union vfp_state *vfp = &thread->vfpstate;
unsigned int cpu = get_cpu();
- if (last_VFP_context[cpu] == vfp)
- last_VFP_context[cpu] = NULL;
+ if (vfp_current_hw_state[cpu] == vfp)
+ vfp_current_hw_state[cpu] = NULL;
put_cpu();
}
+static void vfp_thread_copy(struct thread_info *thread)
+{
+ struct thread_info *parent = current_thread_info();
+
+ vfp_sync_hwstate(parent);
+ thread->vfpstate = parent->vfpstate;
+#ifdef CONFIG_SMP
+ thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
+}
+
/*
* When this function is called with the following 'cmd's, the following
* is true while this function is being run:
@@ -101,29 +166,25 @@ static void vfp_thread_exit(struct thread_info *thread)
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
struct thread_info *thread = v;
+ u32 fpexc;
+#ifdef CONFIG_SMP
+ unsigned int cpu;
+#endif
- if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
- u32 fpexc = fmrx(FPEXC);
+ switch (cmd) {
+ case THREAD_NOTIFY_SWITCH:
+ fpexc = fmrx(FPEXC);
#ifdef CONFIG_SMP
- unsigned int cpu = thread->cpu;
+ cpu = thread->cpu;
/*
* On SMP, if VFP is enabled, save the old state in
* case the thread migrates to a different CPU. The
* restoring is done lazily.
*/
- if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
- vfp_save_state(last_VFP_context[cpu], fpexc);
- last_VFP_context[cpu]->hard.cpu = cpu;
- }
- /*
- * Thread migration, just force the reloading of the
- * state on the new CPU in case the VFP registers
- * contain stale data.
- */
- if (thread->vfpstate.hard.cpu != cpu)
- last_VFP_context[cpu] = NULL;
+ if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
+ vfp_save_state(vfp_current_hw_state[cpu], fpexc);
#endif
/*
@@ -131,13 +192,20 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
* old state.
*/
fmxr(FPEXC, fpexc & ~FPEXC_EN);
- return NOTIFY_DONE;
- }
+ break;
- if (cmd == THREAD_NOTIFY_FLUSH)
+ case THREAD_NOTIFY_FLUSH:
vfp_thread_flush(thread);
- else
+ break;
+
+ case THREAD_NOTIFY_EXIT:
vfp_thread_exit(thread);
+ break;
+
+ case THREAD_NOTIFY_COPY:
+ vfp_thread_copy(thread);
+ break;
+ }
return NOTIFY_DONE;
}
@@ -150,7 +218,7 @@ static struct notifier_block vfp_notifier_block = {
* Raise a SIGFPE for the current process.
* sicode describes the signal being raised.
*/
-void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
+static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
{
siginfo_t info;
@@ -174,11 +242,11 @@ static void vfp_panic(char *reason, u32 inst)
{
int i;
- printk(KERN_ERR "VFP: Error: %s\n", reason);
- printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
+ pr_err("VFP: Error: %s\n", reason);
+ pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
fmrx(FPEXC), fmrx(FPSCR), inst);
for (i = 0; i < 32; i += 2)
- printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
+ pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
i, vfp_get_float(i), i+1, vfp_get_float(i+1));
}
@@ -346,7 +414,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
* If there isn't a second FP instruction, exit now. Note that
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
*/
- if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
+ if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
goto exit;
/*
@@ -366,7 +434,10 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
static void vfp_enable(void *unused)
{
- u32 access = get_copro_access();
+ u32 access;
+
+ BUG_ON(preemptible());
+ access = get_copro_access();
/*
* Enable full access to VFP (cp10 and cp11)
@@ -374,70 +445,79 @@ static void vfp_enable(void *unused)
set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
}
-#ifdef CONFIG_PM
-#include <linux/sysdev.h>
-
-static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state)
+#ifdef CONFIG_CPU_PM
+static int vfp_pm_suspend(void)
{
struct thread_info *ti = current_thread_info();
u32 fpexc = fmrx(FPEXC);
/* if vfp is on, then save state for resumption */
if (fpexc & FPEXC_EN) {
- printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
+ pr_debug("%s: saving vfp state\n", __func__);
vfp_save_state(&ti->vfpstate, fpexc);
/* disable, just in case */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ } else if (vfp_current_hw_state[ti->cpu]) {
+#ifndef CONFIG_SMP
+ fmxr(FPEXC, fpexc | FPEXC_EN);
+ vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
+ fmxr(FPEXC, fpexc);
+#endif
}
/* clear any information we had about last context state */
- memset(last_VFP_context, 0, sizeof(last_VFP_context));
+ vfp_current_hw_state[ti->cpu] = NULL;
return 0;
}
-static int vfp_pm_resume(struct sys_device *dev)
+static void vfp_pm_resume(void)
{
/* ensure we have access to the vfp */
vfp_enable(NULL);
/* and disable it to ensure the next usage restores the state */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
-
- return 0;
}
-static struct sysdev_class vfp_pm_sysclass = {
- .name = "vfp",
- .suspend = vfp_pm_suspend,
- .resume = vfp_pm_resume,
-};
+static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ vfp_pm_suspend();
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ vfp_pm_resume();
+ break;
+ }
+ return NOTIFY_OK;
+}
-static struct sys_device vfp_pm_sysdev = {
- .cls = &vfp_pm_sysclass,
+static struct notifier_block vfp_cpu_pm_notifier_block = {
+ .notifier_call = vfp_cpu_pm_notifier,
};
static void vfp_pm_init(void)
{
- sysdev_class_register(&vfp_pm_sysclass);
- sysdev_register(&vfp_pm_sysdev);
+ cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
}
-
#else
static inline void vfp_pm_init(void) { }
-#endif /* CONFIG_PM */
+#endif /* CONFIG_CPU_PM */
+/*
+ * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
+ * with the hardware state.
+ */
void vfp_sync_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
- /*
- * If the thread we're interested in is the current owner of the
- * hardware VFP state, then we need to save its state.
- */
- if (last_VFP_context[cpu] == &thread->vfpstate) {
+ if (vfp_state_in_hw(cpu, thread)) {
u32 fpexc = fmrx(FPEXC);
/*
@@ -451,40 +531,189 @@ void vfp_sync_hwstate(struct thread_info *thread)
put_cpu();
}
+/* Ensure that the thread reloads the hardware VFP state on the next use. */
void vfp_flush_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
+ vfp_force_reload(cpu, thread);
+
+ put_cpu();
+}
+
+/*
+ * Save the current VFP state into the provided structures and prepare
+ * for entry into a new function (signal handler).
+ */
+int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
+ struct user_vfp_exc __user *ufp_exc)
+{
+ struct thread_info *thread = current_thread_info();
+ struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+ int err = 0;
+
+ /* Ensure that the saved hwstate is up-to-date. */
+ vfp_sync_hwstate(thread);
+
/*
- * If the thread we're interested in is the current owner of the
- * hardware VFP state, then we need to save its state.
+ * Copy the floating point registers. There can be unused
+ * registers see asm/hwcap.h for details.
*/
- if (last_VFP_context[cpu] == &thread->vfpstate) {
- u32 fpexc = fmrx(FPEXC);
+ err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
+ sizeof(hwstate->fpregs));
+ /*
+ * Copy the status and control register.
+ */
+ __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
- fmxr(FPEXC, fpexc & ~FPEXC_EN);
+ /*
+ * Copy the exception registers.
+ */
+ __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
+ __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+ __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
- /*
- * Set the context to NULL to force a reload the next time
- * the thread uses the VFP.
- */
- last_VFP_context[cpu] = NULL;
- }
+ if (err)
+ return -EFAULT;
+
+ /* Ensure that VFP is disabled. */
+ vfp_flush_hwstate(thread);
-#ifdef CONFIG_SMP
/*
- * For SMP we still have to take care of the case where the thread
- * migrates to another CPU and then back to the original CPU on which
- * the last VFP user is still the same thread. Mark the thread VFP
- * state as belonging to a non-existent CPU so that the saved one will
- * be reloaded in the above case.
+ * As per the PCS, clear the length and stride bits for function
+ * entry.
*/
- thread->vfpstate.hard.cpu = NR_CPUS;
+ hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
+ return 0;
+}
+
+/* Sanitise and restore the current VFP state from the provided structures. */
+int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
+ struct user_vfp_exc __user *ufp_exc)
+{
+ struct thread_info *thread = current_thread_info();
+ struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+ unsigned long fpexc;
+ int err = 0;
+
+ /* Disable VFP to avoid corrupting the new thread state. */
+ vfp_flush_hwstate(thread);
+
+ /*
+ * Copy the floating point registers. There can be unused
+ * registers see asm/hwcap.h for details.
+ */
+ err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
+ sizeof(hwstate->fpregs));
+ /*
+ * Copy the status and control register.
+ */
+ __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
+
+ /*
+ * Sanitise and restore the exception registers.
+ */
+ __get_user_error(fpexc, &ufp_exc->fpexc, err);
+
+ /* Ensure the VFP is enabled. */
+ fpexc |= FPEXC_EN;
+
+ /* Ensure FPINST2 is invalid and the exception flag is cleared. */
+ fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
+ hwstate->fpexc = fpexc;
+
+ __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+ __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+
+ return err ? -EFAULT : 0;
+}
+
+/*
+ * VFP hardware can lose all context when a CPU goes offline.
+ * As we will be running in SMP mode with CPU hotplug, we will save the
+ * hardware state at every thread switch. We clear our held state when
+ * a CPU has been killed, indicating that the VFP hardware doesn't contain
+ * a threads VFP state. When a CPU starts up, we re-enable access to the
+ * VFP hardware.
+ *
+ * Both CPU_DYING and CPU_STARTING are called on the CPU which
+ * is being offlined/onlined.
+ */
+static int vfp_hotplug(struct notifier_block *b, unsigned long action,
+ void *hcpu)
+{
+ if (action == CPU_DYING || action == CPU_DYING_FROZEN)
+ vfp_current_hw_state[(long)hcpu] = NULL;
+ else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+ vfp_enable(NULL);
+ return NOTIFY_OK;
+}
+
+void vfp_kmode_exception(void)
+{
+ /*
+ * If we reach this point, a floating point exception has been raised
+ * while running in kernel mode. If the NEON/VFP unit was enabled at the
+ * time, it means a VFP instruction has been issued that requires
+ * software assistance to complete, something which is not currently
+ * supported in kernel mode.
+ * If the NEON/VFP unit was disabled, and the location pointed to below
+ * is properly preceded by a call to kernel_neon_begin(), something has
+ * caused the task to be scheduled out and back in again. In this case,
+ * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should
+ * be helpful in localizing the problem.
+ */
+ if (fmrx(FPEXC) & FPEXC_EN)
+ pr_crit("BUG: unsupported FP instruction in kernel mode\n");
+ else
+ pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n");
+}
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin(void)
+{
+ struct thread_info *thread = current_thread_info();
+ unsigned int cpu;
+ u32 fpexc;
+
+ /*
+ * Kernel mode NEON is only allowed outside of interrupt context
+ * with preemption disabled. This will make sure that the kernel
+ * mode NEON register contents never need to be preserved.
+ */
+ BUG_ON(in_interrupt());
+ cpu = get_cpu();
+
+ fpexc = fmrx(FPEXC) | FPEXC_EN;
+ fmxr(FPEXC, fpexc);
+
+ /*
+ * Save the userland NEON/VFP state. Under UP,
+ * the owner could be a task other than 'current'
+ */
+ if (vfp_state_in_hw(cpu, thread))
+ vfp_save_state(&thread->vfpstate, fpexc);
+#ifndef CONFIG_SMP
+ else if (vfp_current_hw_state[cpu] != NULL)
+ vfp_save_state(vfp_current_hw_state[cpu], fpexc);
#endif
+ vfp_current_hw_state[cpu] = NULL;
+}
+EXPORT_SYMBOL(kernel_neon_begin);
+
+void kernel_neon_end(void)
+{
+ /* Disable the NEON/VFP unit. */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
put_cpu();
}
+EXPORT_SYMBOL(kernel_neon_end);
-#include <linux/smp.h>
+#endif /* CONFIG_KERNEL_MODE_NEON */
/*
* VFP support code initialisation.
@@ -495,7 +724,7 @@ static int __init vfp_init(void)
unsigned int cpu_arch = cpu_architecture();
if (cpu_arch >= CPU_ARCH_ARMv6)
- vfp_enable(NULL);
+ on_each_cpu(vfp_enable, NULL, 1);
/*
* First check that there is a VFP that we can use.
@@ -508,16 +737,16 @@ static int __init vfp_init(void)
barrier();
vfp_vector = vfp_null_entry;
- printk(KERN_INFO "VFP support v0.3: ");
+ pr_info("VFP support v0.3: ");
if (VFP_arch)
- printk("not present\n");
+ pr_cont("not present\n");
else if (vfpsid & FPSID_NODOUBLE) {
- printk("no double precision support\n");
+ pr_cont("no double precision support\n");
} else {
- smp_call_function(vfp_enable, NULL, 1);
+ hotcpu_notifier(vfp_hotplug, 0);
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
- printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
+ pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
@@ -539,14 +768,16 @@ static int __init vfp_init(void)
elf_hwcap |= HWCAP_VFPv3;
/*
- * Check for VFPv3 D16. CPUs in this configuration
- * only have 16 x 64bit registers.
+ * Check for VFPv3 D16 and VFPv4 D16. CPUs in
+ * this configuration only have 16 x 64bit
+ * registers.
*/
if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
- elf_hwcap |= HWCAP_VFPv3D16;
+ elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
+ else
+ elf_hwcap |= HWCAP_VFPD32;
}
#endif
-#ifdef CONFIG_NEON
/*
* Check for the presence of the Advanced SIMD
* load/store instructions, integer and single
@@ -554,12 +785,17 @@ static int __init vfp_init(void)
* for NEON if the hardware has the MVFR registers.
*/
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
+#ifdef CONFIG_NEON
if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
elf_hwcap |= HWCAP_NEON;
- }
#endif
+#ifdef CONFIG_VFPv3
+ if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
+ elf_hwcap |= HWCAP_VFPv4;
+#endif
+ }
}
return 0;
}
-late_initcall(vfp_init);
+core_initcall(vfp_init);
diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c
index b252631b406..4f96c1617aa 100644
--- a/arch/arm/vfp/vfpsingle.c
+++ b/arch/arm/vfp/vfpsingle.c
@@ -915,6 +915,8 @@ vfp_single_multiply_accumulate(int sd, int sn, s32 m, u32 fpscr, u32 negate, cha
v = vfp_get_float(sd);
pr_debug("VFP: s%u = %08x\n", sd, v);
vfp_single_unpack(&vsn, v);
+ if (vsn.exponent == 0 && vsn.significand)
+ vfp_single_normalise_denormal(&vsn);
if (negate & NEG_SUBTRACT)
vsn.sign = vfp_sign_negate(vsn.sign);