aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/vfp
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r--arch/arm/vfp/Makefile6
-rw-r--r--arch/arm/vfp/entry.S44
-rw-r--r--arch/arm/vfp/vfp.h8
-rw-r--r--arch/arm/vfp/vfpdouble.c27
-rw-r--r--arch/arm/vfp/vfphw.S280
-rw-r--r--arch/arm/vfp/vfpinstr.h6
-rw-r--r--arch/arm/vfp/vfpmodule.c621
-rw-r--r--arch/arm/vfp/vfpsingle.c5
8 files changed, 818 insertions, 179 deletions
diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile
index 7e136e77971..a81404c09d5 100644
--- a/arch/arm/vfp/Makefile
+++ b/arch/arm/vfp/Makefile
@@ -4,10 +4,10 @@
# Copyright (C) 2001 ARM Limited
#
-# EXTRA_CFLAGS := -DDEBUG
-# EXTRA_AFLAGS := -DDEBUG
+# ccflags-y := -DDEBUG
+# asflags-y := -DDEBUG
-AFLAGS :=$(AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp)
+KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp -mfloat-abi=soft)
LDFLAGS +=--no-warn-mismatch
obj-y += vfp.o
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index 7b595547c1c..fe6ca574d09 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -7,27 +7,37 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * Basic entry code, called from the kernel's undefined instruction trap.
- * r0 = faulted instruction
- * r5 = faulted PC+4
- * r9 = successful return
- * r10 = thread_info structure
- * lr = failure return
*/
-#include <linux/linkage.h>
#include <linux/init.h>
-#include <asm/asm-offsets.h>
-#include <asm/assembler.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
- .globl do_vfp
-do_vfp:
- enable_irq
+@ VFP entry point.
+@
+@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
+@ r2 = PC value to resume execution after successful emulation
+@ r9 = normal "successful" return address
+@ r10 = this threads thread_info structure
+@ lr = unrecognised instruction return address
+@ IRQs enabled.
+@
+ENTRY(do_vfp)
+ inc_preempt_count r10, r4
ldr r4, .LCvfp
+ ldr r11, [r10, #TI_CPU] @ CPU number
add r10, r10, #TI_VFPSTATE @ r10 = workspace
ldr pc, [r4] @ call VFP entry point
+ENDPROC(do_vfp)
+
+ENTRY(vfp_null_entry)
+ dec_preempt_count_ti r10, r4
+ mov pc, lr
+ENDPROC(vfp_null_entry)
+ .align 2
.LCvfp:
.word vfp_vector
@@ -35,12 +45,14 @@ do_vfp:
@ failure to the VFP initialisation code.
__INIT
- .globl vfp_testing_entry
-vfp_testing_entry:
+ENTRY(vfp_testing_entry)
+ dec_preempt_count_ti r10, r4
ldr r0, VFP_arch_address
- str r5, [r0] @ known non-zero value
+ str r0, [r0] @ set to non-zero value
mov pc, r9 @ we have handled the fault
+ENDPROC(vfp_testing_entry)
+ .align 2
VFP_arch_address:
.word VFP_arch
diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h
index f2797896e6d..c8c98dd44ad 100644
--- a/arch/arm/vfp/vfp.h
+++ b/arch/arm/vfp/vfp.h
@@ -265,7 +265,11 @@ struct vfp_double {
* which returns (double)0.0. This is useful for the compare with
* zero instructions.
*/
+#ifdef CONFIG_VFPv3
+#define VFP_REG_ZERO 32
+#else
#define VFP_REG_ZERO 16
+#endif
extern u64 vfp_get_double(unsigned int reg);
extern void vfp_put_double(u64 val, unsigned int reg);
@@ -361,12 +365,16 @@ u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand);
* OP_SCALAR - this operation always operates in scalar mode
* OP_SD - the instruction exceptionally writes to a single precision result.
* OP_DD - the instruction exceptionally writes to a double precision result.
+ * OP_SM - the instruction exceptionally reads from a single precision operand.
*/
#define OP_SCALAR (1 << 0)
#define OP_SD (1 << 1)
#define OP_DD (1 << 1)
+#define OP_SM (1 << 2)
struct op {
u32 (* const fn)(int dd, int dn, int dm, u32 fpscr);
u32 flags;
};
+
+extern void vfp_save_state(void *location, u32 fpexc);
diff --git a/arch/arm/vfp/vfpdouble.c b/arch/arm/vfp/vfpdouble.c
index 4fc05ee0a2e..423f56dd402 100644
--- a/arch/arm/vfp/vfpdouble.c
+++ b/arch/arm/vfp/vfpdouble.c
@@ -34,7 +34,6 @@
#include <linux/bitops.h>
#include <asm/div64.h>
-#include <asm/ptrace.h>
#include <asm/vfp.h>
#include "vfpinstr.h"
@@ -56,7 +55,7 @@ static void vfp_double_normalise_denormal(struct vfp_double *vd)
{
int bits = 31 - fls(vd->significand >> 32);
if (bits == 31)
- bits = 62 - fls(vd->significand);
+ bits = 63 - fls(vd->significand);
vfp_double_dump("normalise_denormal: in", vd);
@@ -669,8 +668,8 @@ static struct op fops_ext[32] = {
[FEXT_TO_IDX(FEXT_FCMPZ)] = { vfp_double_fcmpz, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FCMPEZ)] = { vfp_double_fcmpez, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FCVT)] = { vfp_double_fcvts, OP_SCALAR|OP_SD },
- [FEXT_TO_IDX(FEXT_FUITO)] = { vfp_double_fuito, OP_SCALAR },
- [FEXT_TO_IDX(FEXT_FSITO)] = { vfp_double_fsito, OP_SCALAR },
+ [FEXT_TO_IDX(FEXT_FUITO)] = { vfp_double_fuito, OP_SCALAR|OP_SM },
+ [FEXT_TO_IDX(FEXT_FSITO)] = { vfp_double_fsito, OP_SCALAR|OP_SM },
[FEXT_TO_IDX(FEXT_FTOUI)] = { vfp_double_ftoui, OP_SCALAR|OP_SD },
[FEXT_TO_IDX(FEXT_FTOUIZ)] = { vfp_double_ftouiz, OP_SCALAR|OP_SD },
[FEXT_TO_IDX(FEXT_FTOSI)] = { vfp_double_ftosi, OP_SCALAR|OP_SD },
@@ -867,6 +866,8 @@ vfp_double_multiply_accumulate(int dd, int dn, int dm, u32 fpscr, u32 negate, ch
vdp.sign = vfp_sign_negate(vdp.sign);
vfp_double_unpack(&vdn, vfp_get_double(dd));
+ if (vdn.exponent == 0 && vdn.significand)
+ vfp_double_normalise_denormal(&vdn);
if (negate & NEG_SUBTRACT)
vdn.sign = vfp_sign_negate(vdn.sign);
@@ -1129,11 +1130,11 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
u32 exceptions = 0;
unsigned int dest;
unsigned int dn = vfp_get_dn(inst);
- unsigned int dm = vfp_get_dm(inst);
+ unsigned int dm;
unsigned int vecitr, veclen, vecstride;
struct op *fop;
- vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK)) * 2;
+ vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK));
fop = (op == FOP_EXT) ? &fops_ext[FEXT_TO_IDX(inst)] : &fops[FOP_TO_IDX(op)];
@@ -1147,6 +1148,14 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
dest = vfp_get_dd(inst);
/*
+ * f[us]ito takes a sN operand, not a dN operand.
+ */
+ if (fop->flags & OP_SM)
+ dm = vfp_get_sm(inst);
+ else
+ dm = vfp_get_dm(inst);
+
+ /*
* If destination bank is zero, vector length is always '1'.
* ARM DDI0100F C5.1.3, C5.3.2.
*/
@@ -1185,10 +1194,10 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
* CHECK: It appears to be undefined whether we stop when
* we encounter an exception. We continue.
*/
- dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 6);
- dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 6);
+ dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 3);
+ dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 3);
if (FREG_BANK(dm) != 0)
- dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 6);
+ dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 3);
}
return exceptions;
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index e51e6679c40..be807625ed8 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -14,19 +14,25 @@
* r10 points at the start of the private FP workspace in the thread structure
* sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
*/
+#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
-#include "../kernel/entry-header.S"
+#include <linux/kern_levels.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
.macro DBGSTR, str
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
- add r0, pc, #4
+ ldr r0, =1f
bl printk
- b 1f
- .asciz "<7>VFP: \str\n"
- .balign 4
-1: ldmfd sp!, {r0-r3, ip, lr}
+ ldmfd sp!, {r0-r3, ip, lr}
+
+ .pushsection .rodata, "a"
+1: .ascii KERN_DEBUG "VFP: \str\n"
+ .byte 0
+ .previous
#endif
.endm
@@ -34,12 +40,14 @@
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
mov r1, \arg
- add r0, pc, #4
+ ldr r0, =1f
bl printk
- b 1f
- .asciz "<7>VFP: \str\n"
- .balign 4
-1: ldmfd sp!, {r0-r3, ip, lr}
+ ldmfd sp!, {r0-r3, ip, lr}
+
+ .pushsection .rodata, "a"
+1: .ascii KERN_DEBUG "VFP: \str\n"
+ .byte 0
+ .previous
#endif
.endm
@@ -49,107 +57,155 @@
mov r3, \arg3
mov r2, \arg2
mov r1, \arg1
- add r0, pc, #4
+ ldr r0, =1f
bl printk
- b 1f
- .asciz "<7>VFP: \str\n"
- .balign 4
-1: ldmfd sp!, {r0-r3, ip, lr}
+ ldmfd sp!, {r0-r3, ip, lr}
+
+ .pushsection .rodata, "a"
+1: .ascii KERN_DEBUG "VFP: \str\n"
+ .byte 0
+ .previous
#endif
.endm
@ VFP hardware support entry point.
@
-@ r0 = faulted instruction
-@ r2 = faulted PC+4
-@ r9 = successful return
+@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
+@ r2 = PC value to resume execution after successful emulation
+@ r9 = normal "successful" return address
@ r10 = vfp_state union
-@ lr = failure return
-
- .globl vfp_support_entry
-vfp_support_entry:
+@ r11 = CPU number
+@ lr = unrecognised instruction return address
+@ IRQs enabled.
+ENTRY(vfp_support_entry)
DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
+ ldr r3, [sp, #S_PSR] @ Neither lazy restore nor FP exceptions
+ and r3, r3, #MODE_MASK @ are supported in kernel mode
+ teq r3, #USR_MODE
+ bne vfp_kmode_exception @ Returns through lr
+
VFPFMRX r1, FPEXC @ Is the VFP enabled?
DBGSTR1 "fpexc %08x", r1
- tst r1, #FPEXC_ENABLE
+ tst r1, #FPEXC_EN
bne look_for_VFP_exceptions @ VFP is already enabled
DBGSTR1 "enable %x", r10
- ldr r3, last_VFP_context_address
- orr r1, r1, #FPEXC_ENABLE @ user FPEXC has the enable bit set
- ldr r4, [r3] @ last_VFP_context pointer
- bic r5, r1, #FPEXC_EXCEPTION @ make sure exceptions are disabled
- cmp r4, r10
- beq check_for_exception @ we are returning to the same
- @ process, so the registers are
- @ still there. In this case, we do
- @ not want to drop a pending exception.
+ ldr r3, vfp_current_hw_state_address
+ orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set
+ ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer
+ bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
+ cmp r4, r10 @ this thread owns the hw context?
+#ifndef CONFIG_SMP
+ @ For UP, checking that this thread owns the hw context is
+ @ sufficient to determine that the hardware state is valid.
+ beq vfp_hw_state_valid
+
+ @ On UP, we lazily save the VFP context. As a different
+ @ thread wants ownership of the VFP hardware, save the old
+ @ state if there was a previous (valid) owner.
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
@ exceptions, so we can get at the
@ rest of it
- @ Save out the current registers to the old thread state
-
DBGSTR1 "save old state %p", r4
- cmp r4, #0
- beq no_old_VFP_process
+ cmp r4, #0 @ if the vfp_current_hw_state is NULL
+ beq vfp_reload_hw @ then the hw state needs reloading
+ VFPFSTMIA r4, r5 @ save the working registers
VFPFMRX r5, FPSCR @ current status
- VFPFMRX r6, FPINST @ FPINST (always there, rev0 onwards)
- tst r1, #FPEXC_FPV2 @ is there an FPINST2 to read?
- VFPFMRX r8, FPINST2, NE @ FPINST2 if needed - avoids reading
- @ nonexistant reg on rev0
- VFPFSTMIA r4 @ save the working registers
+#ifndef CONFIG_CPU_FEROCEON
+ tst r1, #FPEXC_EX @ is there additional state to save?
+ beq 1f
+ VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set)
+ tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
+ beq 1f
+ VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present)
+1:
+#endif
stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2
- @ and point r4 at the word at the
- @ start of the register dump
+vfp_reload_hw:
+
+#else
+ @ For SMP, if this thread does not own the hw context, then we
+ @ need to reload it. No need to save the old state as on SMP,
+ @ we always save the state when we switch away from a thread.
+ bne vfp_reload_hw
+
+ @ This thread has ownership of the current hardware context.
+ @ However, it may have been migrated to another CPU, in which
+ @ case the saved state is newer than the hardware context.
+ @ Check this by looking at the CPU number which the state was
+ @ last loaded onto.
+ ldr ip, [r10, #VFP_CPU]
+ teq ip, r11
+ beq vfp_hw_state_valid
+
+vfp_reload_hw:
+ @ We're loading this threads state into the VFP hardware. Update
+ @ the CPU number which contains the most up to date VFP context.
+ str r11, [r10, #VFP_CPU]
+
+ VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
+ @ exceptions, so we can get at the
+ @ rest of it
+#endif
-no_old_VFP_process:
DBGSTR1 "load state %p", r10
- str r10, [r3] @ update the last_VFP_context pointer
+ str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer
@ Load the saved state back into the VFP
- VFPFLDMIA r10 @ reload the working registers while
+ VFPFLDMIA r10, r5 @ reload the working registers while
@ FPEXC is in a safe state
ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2
- tst r1, #FPEXC_FPV2 @ is there an FPINST2 to write?
- VFPFMXR FPINST2, r8, NE @ FPINST2 if needed - avoids writing
- @ nonexistant reg on rev0
- VFPFMXR FPINST, r6
+#ifndef CONFIG_CPU_FEROCEON
+ tst r1, #FPEXC_EX @ is there additional state to restore?
+ beq 1f
+ VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set)
+ tst r1, #FPEXC_FP2V @ is there an FPINST2 to write?
+ beq 1f
+ VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present)
+1:
+#endif
VFPFMXR FPSCR, r5 @ restore status
-check_for_exception:
- tst r1, #FPEXC_EXCEPTION
+@ The context stored in the VFP hardware is up to date with this thread
+vfp_hw_state_valid:
+ tst r1, #FPEXC_EX
bne process_exception @ might as well handle the pending
@ exception before retrying branch
@ out before setting an FPEXC that
@ stops us reading stuff
- VFPFMXR FPEXC, r1 @ restore FPEXC last
- sub r2, r2, #4
- str r2, [sp, #S_PC] @ retry the instruction
+ VFPFMXR FPEXC, r1 @ Restore FPEXC last
+ sub r2, r2, #4 @ Retry current instruction - if Thumb
+ str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
+ @ else it's one 32-bit instruction, so
+ @ always subtract 4 from the following
+ @ instruction address.
+ dec_preempt_count_ti r10, r4
mov pc, r9 @ we think we have handled things
look_for_VFP_exceptions:
- tst r1, #FPEXC_EXCEPTION
+ @ Check for synchronous or asynchronous exception
+ tst r1, #FPEXC_EX | FPEXC_DEX
bne process_exception
+ @ On some implementations of the VFP subarch 1, setting FPSCR.IXE
+ @ causes all the CDP instructions to be bounced synchronously without
+ @ setting the FPEXC.EX bit
VFPFMRX r5, FPSCR
- tst r5, #FPSCR_IXE @ IXE doesn't set FPEXC_EXCEPTION !
+ tst r5, #FPSCR_IXE
bne process_exception
@ Fall into hand on to next handler - appropriate coproc instr
@ not recognised by VFP
DBGSTR "not VFP"
+ dec_preempt_count_ti r10, r4
mov pc, lr
process_exception:
DBGSTR "bounce"
- sub r2, r2, #4
- str r2, [sp, #S_PC] @ retry the instruction on exit from
- @ the imprecise exception handling in
- @ the support code
mov r2, sp @ nothing stacked - regdump is at TOS
mov lr, r9 @ setup for a return to the user code.
@@ -157,55 +213,105 @@ process_exception:
@ r0 holds the trigger instruction
@ r1 holds the FPEXC value
@ r2 pointer to register dump
- b VFP9_bounce @ we have handled this - the support
+ b VFP_bounce @ we have handled this - the support
@ code will raise an exception if
@ required. If not, the user code will
@ retry the faulted instruction
+ENDPROC(vfp_support_entry)
-last_VFP_context_address:
- .word last_VFP_context
+ENTRY(vfp_save_state)
+ @ Save the current VFP state
+ @ r0 - save location
+ @ r1 - FPEXC
+ DBGSTR1 "save VFP state %p", r0
+ VFPFSTMIA r0, r2 @ save the working registers
+ VFPFMRX r2, FPSCR @ current status
+ tst r1, #FPEXC_EX @ is there additional state to save?
+ beq 1f
+ VFPFMRX r3, FPINST @ FPINST (only if FPEXC.EX is set)
+ tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
+ beq 1f
+ VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present)
+1:
+ stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
+ mov pc, lr
+ENDPROC(vfp_save_state)
+
+ .align
+vfp_current_hw_state_address:
+ .word vfp_current_hw_state
- .globl vfp_get_float
-vfp_get_float:
- add pc, pc, r0, lsl #3
+ .macro tbl_branch, base, tmp, shift
+#ifdef CONFIG_THUMB2_KERNEL
+ adr \tmp, 1f
+ add \tmp, \tmp, \base, lsl \shift
+ mov pc, \tmp
+#else
+ add pc, pc, \base, lsl \shift
mov r0, r0
+#endif
+1:
+ .endm
+
+ENTRY(vfp_get_float)
+ tbl_branch r0, r3, #3
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0
+1: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0
mov pc, lr
- mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1
+ .org 1b + 8
+1: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1
mov pc, lr
+ .org 1b + 8
.endr
+ENDPROC(vfp_get_float)
- .globl vfp_put_float
-vfp_put_float:
- add pc, pc, r1, lsl #3
- mov r0, r0
+ENTRY(vfp_put_float)
+ tbl_branch r1, r3, #3
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0
+1: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0
mov pc, lr
- mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1
+ .org 1b + 8
+1: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1
mov pc, lr
+ .org 1b + 8
.endr
+ENDPROC(vfp_put_float)
- .globl vfp_get_double
-vfp_get_double:
- add pc, pc, r0, lsl #3
- mov r0, r0
+ENTRY(vfp_get_double)
+ tbl_branch r0, r3, #3
+ .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+1: fmrrd r0, r1, d\dr
+ mov pc, lr
+ .org 1b + 8
+ .endr
+#ifdef CONFIG_VFPv3
+ @ d16 - d31 registers
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- fmrrd r0, r1, d\dr
+1: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr
mov pc, lr
+ .org 1b + 8
.endr
+#endif
- @ virtual register 16 for compare with zero
+ @ virtual register 16 (or 32 if VFPv3) for compare with zero
mov r0, #0
mov r1, #0
mov pc, lr
+ENDPROC(vfp_get_double)
- .globl vfp_put_double
-vfp_put_double:
- add pc, pc, r2, lsl #3
- mov r0, r0
+ENTRY(vfp_put_double)
+ tbl_branch r2, r3, #3
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- fmdrr d\dr, r0, r1
+1: fmdrr d\dr, r0, r1
mov pc, lr
+ .org 1b + 8
.endr
+#ifdef CONFIG_VFPv3
+ @ d16 - d31 registers
+ .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr
+ mov pc, lr
+ .org 1b + 8
+ .endr
+#endif
+ENDPROC(vfp_put_double)
diff --git a/arch/arm/vfp/vfpinstr.h b/arch/arm/vfp/vfpinstr.h
index 7f343a4beca..15b95b5ab97 100644
--- a/arch/arm/vfp/vfpinstr.h
+++ b/arch/arm/vfp/vfpinstr.h
@@ -52,11 +52,11 @@
#define FEXT_TO_IDX(inst) ((inst & 0x000f0000) >> 15 | (inst & (1 << 7)) >> 7)
#define vfp_get_sd(inst) ((inst & 0x0000f000) >> 11 | (inst & (1 << 22)) >> 22)
-#define vfp_get_dd(inst) ((inst & 0x0000f000) >> 12)
+#define vfp_get_dd(inst) ((inst & 0x0000f000) >> 12 | (inst & (1 << 22)) >> 18)
#define vfp_get_sm(inst) ((inst & 0x0000000f) << 1 | (inst & (1 << 5)) >> 5)
-#define vfp_get_dm(inst) ((inst & 0x0000000f))
+#define vfp_get_dm(inst) ((inst & 0x0000000f) | (inst & (1 << 5)) >> 1)
#define vfp_get_sn(inst) ((inst & 0x000f0000) >> 15 | (inst & (1 << 7)) >> 7)
-#define vfp_get_dn(inst) ((inst & 0x000f0000) >> 16)
+#define vfp_get_dn(inst) ((inst & 0x000f0000) >> 16 | (inst & (1 << 7)) >> 3)
#define vfp_single(inst) (((inst) & 0x0000f00) == 0xa00)
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index dedbb449632..2f37e1d6cb4 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -8,13 +8,23 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/types.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/hardirq.h>
#include <linux/kernel.h>
+#include <linux/notifier.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/user.h>
+#include <linux/export.h>
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+#include <asm/system_info.h>
#include <asm/thread_notify.h>
#include <asm/vfp.h>
@@ -26,9 +36,9 @@
*/
void vfp_testing_entry(void);
void vfp_support_entry(void);
+void vfp_null_entry(void);
-void (*vfp_vector)(void) = vfp_testing_entry;
-union vfp_state *last_VFP_context;
+void (*vfp_vector)(void) = vfp_null_entry;
/*
* Dual-use variable.
@@ -37,39 +47,165 @@ union vfp_state *last_VFP_context;
*/
unsigned int VFP_arch;
+/*
+ * The pointer to the vfpstate structure of the thread which currently
+ * owns the context held in the VFP hardware, or NULL if the hardware
+ * context is invalid.
+ *
+ * For UP, this is sufficient to tell which thread owns the VFP context.
+ * However, for SMP, we also need to check the CPU number stored in the
+ * saved state too to catch migrations.
+ */
+union vfp_state *vfp_current_hw_state[NR_CPUS];
+
+/*
+ * Is 'thread's most up to date state stored in this CPUs hardware?
+ * Must be called from non-preemptible context.
+ */
+static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
+{
+#ifdef CONFIG_SMP
+ if (thread->vfpstate.hard.cpu != cpu)
+ return false;
+#endif
+ return vfp_current_hw_state[cpu] == &thread->vfpstate;
+}
+
+/*
+ * Force a reload of the VFP context from the thread structure. We do
+ * this by ensuring that access to the VFP hardware is disabled, and
+ * clear vfp_current_hw_state. Must be called from non-preemptible context.
+ */
+static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
+{
+ if (vfp_state_in_hw(cpu, thread)) {
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ vfp_current_hw_state[cpu] = NULL;
+ }
+#ifdef CONFIG_SMP
+ thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
+}
+
+/*
+ * Per-thread VFP initialization.
+ */
+static void vfp_thread_flush(struct thread_info *thread)
+{
+ union vfp_state *vfp = &thread->vfpstate;
+ unsigned int cpu;
+
+ /*
+ * Disable VFP to ensure we initialize it first. We must ensure
+ * that the modification of vfp_current_hw_state[] and hardware
+ * disable are done for the same CPU and without preemption.
+ *
+ * Do this first to ensure that preemption won't overwrite our
+ * state saving should access to the VFP be enabled at this point.
+ */
+ cpu = get_cpu();
+ if (vfp_current_hw_state[cpu] == vfp)
+ vfp_current_hw_state[cpu] = NULL;
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ put_cpu();
+
+ memset(vfp, 0, sizeof(union vfp_state));
+
+ vfp->hard.fpexc = FPEXC_EN;
+ vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+#ifdef CONFIG_SMP
+ vfp->hard.cpu = NR_CPUS;
+#endif
+}
+
+static void vfp_thread_exit(struct thread_info *thread)
+{
+ /* release case: Per-thread VFP cleanup. */
+ union vfp_state *vfp = &thread->vfpstate;
+ unsigned int cpu = get_cpu();
+
+ if (vfp_current_hw_state[cpu] == vfp)
+ vfp_current_hw_state[cpu] = NULL;
+ put_cpu();
+}
+
+static void vfp_thread_copy(struct thread_info *thread)
+{
+ struct thread_info *parent = current_thread_info();
+
+ vfp_sync_hwstate(parent);
+ thread->vfpstate = parent->vfpstate;
+#ifdef CONFIG_SMP
+ thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
+}
+
+/*
+ * When this function is called with the following 'cmd's, the following
+ * is true while this function is being run:
+ * THREAD_NOFTIFY_SWTICH:
+ * - the previously running thread will not be scheduled onto another CPU.
+ * - the next thread to be run (v) will not be running on another CPU.
+ * - thread->cpu is the local CPU number
+ * - not preemptible as we're called in the middle of a thread switch
+ * THREAD_NOTIFY_FLUSH:
+ * - the thread (v) will be running on the local CPU, so
+ * v === current_thread_info()
+ * - thread->cpu is the local CPU number at the time it is accessed,
+ * but may change at any time.
+ * - we could be preempted if tree preempt rcu is enabled, so
+ * it is unsafe to use thread->cpu.
+ * THREAD_NOTIFY_EXIT
+ * - the thread (v) will be running on the local CPU, so
+ * v === current_thread_info()
+ * - thread->cpu is the local CPU number at the time it is accessed,
+ * but may change at any time.
+ * - we could be preempted if tree preempt rcu is enabled, so
+ * it is unsafe to use thread->cpu.
+ */
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
struct thread_info *thread = v;
- union vfp_state *vfp;
+ u32 fpexc;
+#ifdef CONFIG_SMP
+ unsigned int cpu;
+#endif
+
+ switch (cmd) {
+ case THREAD_NOTIFY_SWITCH:
+ fpexc = fmrx(FPEXC);
+
+#ifdef CONFIG_SMP
+ cpu = thread->cpu;
- if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
/*
- * Always disable VFP so we can lazily save/restore the
- * old state.
+ * On SMP, if VFP is enabled, save the old state in
+ * case the thread migrates to a different CPU. The
+ * restoring is done lazily.
*/
- fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE);
- return NOTIFY_DONE;
- }
+ if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
+ vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+#endif
- vfp = &thread->vfpstate;
- if (cmd == THREAD_NOTIFY_FLUSH) {
/*
- * Per-thread VFP initialisation.
+ * Always disable VFP so we can lazily save/restore the
+ * old state.
*/
- memset(vfp, 0, sizeof(union vfp_state));
+ fmxr(FPEXC, fpexc & ~FPEXC_EN);
+ break;
- vfp->hard.fpexc = FPEXC_ENABLE;
- vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+ case THREAD_NOTIFY_FLUSH:
+ vfp_thread_flush(thread);
+ break;
- /*
- * Disable VFP to ensure we initialise it first.
- */
- fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE);
- }
+ case THREAD_NOTIFY_EXIT:
+ vfp_thread_exit(thread);
+ break;
- /* flush and release case: Per-thread VFP cleanup. */
- if (last_VFP_context == vfp)
- last_VFP_context = NULL;
+ case THREAD_NOTIFY_COPY:
+ vfp_thread_copy(thread);
+ break;
+ }
return NOTIFY_DONE;
}
@@ -82,7 +218,7 @@ static struct notifier_block vfp_notifier_block = {
* Raise a SIGFPE for the current process.
* sicode describes the signal being raised.
*/
-void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
+static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
{
siginfo_t info;
@@ -90,7 +226,7 @@ void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
info.si_signo = SIGFPE;
info.si_code = sicode;
- info.si_addr = (void *)(instruction_pointer(regs) - 4);
+ info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
/*
* This is the same as NWFPE, because it's not clear what
@@ -102,15 +238,15 @@ void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
send_sig_info(SIGFPE, &info, current);
}
-static void vfp_panic(char *reason)
+static void vfp_panic(char *reason, u32 inst)
{
int i;
- printk(KERN_ERR "VFP: Error: %s\n", reason);
- printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
- fmrx(FPEXC), fmrx(FPSCR), fmrx(FPINST));
+ pr_err("VFP: Error: %s\n", reason);
+ pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
+ fmrx(FPEXC), fmrx(FPSCR), inst);
for (i = 0; i < 32; i += 2)
- printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
+ pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
i, vfp_get_float(i), i+1, vfp_get_float(i+1));
}
@@ -124,7 +260,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
pr_debug("VFP: raising exceptions %08x\n", exceptions);
if (exceptions == VFP_EXCEPTION_ERROR) {
- vfp_panic("unhandled bounce");
+ vfp_panic("unhandled bounce", inst);
vfp_raise_sigfpe(0, regs);
return;
}
@@ -148,6 +284,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
/*
* These are arranged in priority order, least to highest.
*/
+ RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV);
RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES);
RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND);
RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
@@ -196,35 +333,66 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
/*
* Package up a bounce condition.
*/
-void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
+void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
{
- u32 fpscr, orig_fpscr, exceptions, inst;
+ u32 fpscr, orig_fpscr, fpsid, exceptions;
pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
/*
- * Enable access to the VFP so we can handle the bounce.
+ * At this point, FPEXC can have the following configuration:
+ *
+ * EX DEX IXE
+ * 0 1 x - synchronous exception
+ * 1 x 0 - asynchronous exception
+ * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later
+ * 0 0 1 - synchronous on VFP9 (non-standard subarch 1
+ * implementation), undefined otherwise
+ *
+ * Clear various bits and enable access to the VFP so we can
+ * handle the bounce.
*/
- fmxr(FPEXC, fpexc & ~(FPEXC_EXCEPTION|FPEXC_INV|FPEXC_UFC|FPEXC_IOC));
+ fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));
+ fpsid = fmrx(FPSID);
orig_fpscr = fpscr = fmrx(FPSCR);
/*
- * If we are running with inexact exceptions enabled, we need to
- * emulate the trigger instruction. Note that as we're emulating
- * the trigger instruction, we need to increment PC.
+ * Check for the special VFP subarch 1 and FPSCR.IXE bit case
*/
- if (fpscr & FPSCR_IXE) {
- regs->ARM_pc += 4;
+ if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
+ && (fpscr & FPSCR_IXE)) {
+ /*
+ * Synchronous exception, emulate the trigger instruction
+ */
goto emulate;
}
- barrier();
+ if (fpexc & FPEXC_EX) {
+#ifndef CONFIG_CPU_FEROCEON
+ /*
+ * Asynchronous exception. The instruction is read from FPINST
+ * and the interrupted instruction has to be restarted.
+ */
+ trigger = fmrx(FPINST);
+ regs->ARM_pc -= 4;
+#endif
+ } else if (!(fpexc & FPEXC_DEX)) {
+ /*
+ * Illegal combination of bits. It can be caused by an
+ * unallocated VFP instruction but with FPSCR.IXE set and not
+ * on VFP subarch 1.
+ */
+ vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
+ goto exit;
+ }
/*
- * Modify fpscr to indicate the number of iterations remaining
+ * Modify fpscr to indicate the number of iterations remaining.
+ * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
+ * whether FPEXC.VECITR or FPSCR.LEN is used.
*/
- if (fpexc & FPEXC_EXCEPTION) {
+ if (fpexc & (FPEXC_EX | FPEXC_VV)) {
u32 len;
len = fpexc + (1 << FPEXC_LENGTH_BIT);
@@ -238,16 +406,16 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
* FPEXC bounce reason, but this appears to be unreliable.
* Emulate the bounced instruction instead.
*/
- inst = fmrx(FPINST);
- exceptions = vfp_emulate_instruction(inst, fpscr, regs);
+ exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
if (exceptions)
- vfp_raise_exceptions(exceptions, inst, orig_fpscr, regs);
+ vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
/*
- * If there isn't a second FP instruction, exit now.
+ * If there isn't a second FP instruction, exit now. Note that
+ * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
*/
- if (!(fpexc & FPEXC_FPV2))
- return;
+ if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
+ goto exit;
/*
* The barrier() here prevents fpinst2 being read
@@ -255,46 +423,379 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
*/
barrier();
trigger = fmrx(FPINST2);
- orig_fpscr = fpscr = fmrx(FPSCR);
emulate:
- exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
+ exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
if (exceptions)
vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+ exit:
+ preempt_enable();
}
-
+
+static void vfp_enable(void *unused)
+{
+ u32 access;
+
+ BUG_ON(preemptible());
+ access = get_copro_access();
+
+ /*
+ * Enable full access to VFP (cp10 and cp11)
+ */
+ set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
+}
+
+#ifdef CONFIG_CPU_PM
+static int vfp_pm_suspend(void)
+{
+ struct thread_info *ti = current_thread_info();
+ u32 fpexc = fmrx(FPEXC);
+
+ /* if vfp is on, then save state for resumption */
+ if (fpexc & FPEXC_EN) {
+ pr_debug("%s: saving vfp state\n", __func__);
+ vfp_save_state(&ti->vfpstate, fpexc);
+
+ /* disable, just in case */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ } else if (vfp_current_hw_state[ti->cpu]) {
+#ifndef CONFIG_SMP
+ fmxr(FPEXC, fpexc | FPEXC_EN);
+ vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
+ fmxr(FPEXC, fpexc);
+#endif
+ }
+
+ /* clear any information we had about last context state */
+ vfp_current_hw_state[ti->cpu] = NULL;
+
+ return 0;
+}
+
+static void vfp_pm_resume(void)
+{
+ /* ensure we have access to the vfp */
+ vfp_enable(NULL);
+
+ /* and disable it to ensure the next usage restores the state */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+}
+
+static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ vfp_pm_suspend();
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ vfp_pm_resume();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block vfp_cpu_pm_notifier_block = {
+ .notifier_call = vfp_cpu_pm_notifier,
+};
+
+static void vfp_pm_init(void)
+{
+ cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
+}
+
+#else
+static inline void vfp_pm_init(void) { }
+#endif /* CONFIG_CPU_PM */
+
+/*
+ * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
+ * with the hardware state.
+ */
+void vfp_sync_hwstate(struct thread_info *thread)
+{
+ unsigned int cpu = get_cpu();
+
+ if (vfp_state_in_hw(cpu, thread)) {
+ u32 fpexc = fmrx(FPEXC);
+
+ /*
+ * Save the last VFP state on this CPU.
+ */
+ fmxr(FPEXC, fpexc | FPEXC_EN);
+ vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
+ fmxr(FPEXC, fpexc);
+ }
+
+ put_cpu();
+}
+
+/* Ensure that the thread reloads the hardware VFP state on the next use. */
+void vfp_flush_hwstate(struct thread_info *thread)
+{
+ unsigned int cpu = get_cpu();
+
+ vfp_force_reload(cpu, thread);
+
+ put_cpu();
+}
+
+/*
+ * Save the current VFP state into the provided structures and prepare
+ * for entry into a new function (signal handler).
+ */
+int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
+ struct user_vfp_exc __user *ufp_exc)
+{
+ struct thread_info *thread = current_thread_info();
+ struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+ int err = 0;
+
+ /* Ensure that the saved hwstate is up-to-date. */
+ vfp_sync_hwstate(thread);
+
+ /*
+ * Copy the floating point registers. There can be unused
+ * registers see asm/hwcap.h for details.
+ */
+ err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
+ sizeof(hwstate->fpregs));
+ /*
+ * Copy the status and control register.
+ */
+ __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
+
+ /*
+ * Copy the exception registers.
+ */
+ __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
+ __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+ __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+
+ if (err)
+ return -EFAULT;
+
+ /* Ensure that VFP is disabled. */
+ vfp_flush_hwstate(thread);
+
+ /*
+ * As per the PCS, clear the length and stride bits for function
+ * entry.
+ */
+ hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
+ return 0;
+}
+
+/* Sanitise and restore the current VFP state from the provided structures. */
+int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
+ struct user_vfp_exc __user *ufp_exc)
+{
+ struct thread_info *thread = current_thread_info();
+ struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+ unsigned long fpexc;
+ int err = 0;
+
+ /* Disable VFP to avoid corrupting the new thread state. */
+ vfp_flush_hwstate(thread);
+
+ /*
+ * Copy the floating point registers. There can be unused
+ * registers see asm/hwcap.h for details.
+ */
+ err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
+ sizeof(hwstate->fpregs));
+ /*
+ * Copy the status and control register.
+ */
+ __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
+
+ /*
+ * Sanitise and restore the exception registers.
+ */
+ __get_user_error(fpexc, &ufp_exc->fpexc, err);
+
+ /* Ensure the VFP is enabled. */
+ fpexc |= FPEXC_EN;
+
+ /* Ensure FPINST2 is invalid and the exception flag is cleared. */
+ fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
+ hwstate->fpexc = fpexc;
+
+ __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+ __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+
+ return err ? -EFAULT : 0;
+}
+
+/*
+ * VFP hardware can lose all context when a CPU goes offline.
+ * As we will be running in SMP mode with CPU hotplug, we will save the
+ * hardware state at every thread switch. We clear our held state when
+ * a CPU has been killed, indicating that the VFP hardware doesn't contain
+ * a threads VFP state. When a CPU starts up, we re-enable access to the
+ * VFP hardware.
+ *
+ * Both CPU_DYING and CPU_STARTING are called on the CPU which
+ * is being offlined/onlined.
+ */
+static int vfp_hotplug(struct notifier_block *b, unsigned long action,
+ void *hcpu)
+{
+ if (action == CPU_DYING || action == CPU_DYING_FROZEN)
+ vfp_current_hw_state[(long)hcpu] = NULL;
+ else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+ vfp_enable(NULL);
+ return NOTIFY_OK;
+}
+
+void vfp_kmode_exception(void)
+{
+ /*
+ * If we reach this point, a floating point exception has been raised
+ * while running in kernel mode. If the NEON/VFP unit was enabled at the
+ * time, it means a VFP instruction has been issued that requires
+ * software assistance to complete, something which is not currently
+ * supported in kernel mode.
+ * If the NEON/VFP unit was disabled, and the location pointed to below
+ * is properly preceded by a call to kernel_neon_begin(), something has
+ * caused the task to be scheduled out and back in again. In this case,
+ * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should
+ * be helpful in localizing the problem.
+ */
+ if (fmrx(FPEXC) & FPEXC_EN)
+ pr_crit("BUG: unsupported FP instruction in kernel mode\n");
+ else
+ pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n");
+}
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin(void)
+{
+ struct thread_info *thread = current_thread_info();
+ unsigned int cpu;
+ u32 fpexc;
+
+ /*
+ * Kernel mode NEON is only allowed outside of interrupt context
+ * with preemption disabled. This will make sure that the kernel
+ * mode NEON register contents never need to be preserved.
+ */
+ BUG_ON(in_interrupt());
+ cpu = get_cpu();
+
+ fpexc = fmrx(FPEXC) | FPEXC_EN;
+ fmxr(FPEXC, fpexc);
+
+ /*
+ * Save the userland NEON/VFP state. Under UP,
+ * the owner could be a task other than 'current'
+ */
+ if (vfp_state_in_hw(cpu, thread))
+ vfp_save_state(&thread->vfpstate, fpexc);
+#ifndef CONFIG_SMP
+ else if (vfp_current_hw_state[cpu] != NULL)
+ vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+#endif
+ vfp_current_hw_state[cpu] = NULL;
+}
+EXPORT_SYMBOL(kernel_neon_begin);
+
+void kernel_neon_end(void)
+{
+ /* Disable the NEON/VFP unit. */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ put_cpu();
+}
+EXPORT_SYMBOL(kernel_neon_end);
+
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
/*
* VFP support code initialisation.
*/
static int __init vfp_init(void)
{
unsigned int vfpsid;
+ unsigned int cpu_arch = cpu_architecture();
+
+ if (cpu_arch >= CPU_ARCH_ARMv6)
+ on_each_cpu(vfp_enable, NULL, 1);
/*
* First check that there is a VFP that we can use.
* The handler is already setup to just log calls, so
* we just need to read the VFPSID register.
*/
+ vfp_vector = vfp_testing_entry;
+ barrier();
vfpsid = fmrx(FPSID);
+ barrier();
+ vfp_vector = vfp_null_entry;
- printk(KERN_INFO "VFP support v0.3: ");
- if (VFP_arch) {
- printk("not present\n");
- } else if (vfpsid & FPSID_NODOUBLE) {
- printk("no double precision support\n");
+ pr_info("VFP support v0.3: ");
+ if (VFP_arch)
+ pr_cont("not present\n");
+ else if (vfpsid & FPSID_NODOUBLE) {
+ pr_cont("no double precision support\n");
} else {
+ hotcpu_notifier(vfp_hotplug, 0);
+
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
- printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
+ pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
+
vfp_vector = vfp_support_entry;
thread_register_notifier(&vfp_notifier_block);
+ vfp_pm_init();
+
+ /*
+ * We detected VFP, and the support code is
+ * in place; report VFP support to userspace.
+ */
+ elf_hwcap |= HWCAP_VFP;
+#ifdef CONFIG_VFPv3
+ if (VFP_arch >= 2) {
+ elf_hwcap |= HWCAP_VFPv3;
+
+ /*
+ * Check for VFPv3 D16 and VFPv4 D16. CPUs in
+ * this configuration only have 16 x 64bit
+ * registers.
+ */
+ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
+ elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
+ else
+ elf_hwcap |= HWCAP_VFPD32;
+ }
+#endif
+ /*
+ * Check for the presence of the Advanced SIMD
+ * load/store instructions, integer and single
+ * precision floating point operations. Only check
+ * for NEON if the hardware has the MVFR registers.
+ */
+ if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
+#ifdef CONFIG_NEON
+ if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
+ elf_hwcap |= HWCAP_NEON;
+#endif
+#ifdef CONFIG_VFPv3
+ if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
+ elf_hwcap |= HWCAP_VFPv4;
+#endif
+ }
}
return 0;
}
-late_initcall(vfp_init);
+core_initcall(vfp_init);
diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c
index ab5e9503bae..4f96c1617aa 100644
--- a/arch/arm/vfp/vfpsingle.c
+++ b/arch/arm/vfp/vfpsingle.c
@@ -34,7 +34,6 @@
#include <linux/bitops.h>
#include <asm/div64.h>
-#include <asm/ptrace.h>
#include <asm/vfp.h>
#include "vfpinstr.h"
@@ -198,8 +197,10 @@ u32 vfp_single_normaliseround(int sd, struct vfp_single *vs, u32 fpscr, u32 exce
vfp_single_dump("pack: final", vs);
{
s32 d = vfp_single_pack(vs);
+#ifdef DEBUG
pr_debug("VFP: %s: d(s%d)=%08x exceptions=%08x\n", func,
sd, d, exceptions);
+#endif
vfp_put_float(d, sd);
}
@@ -914,6 +915,8 @@ vfp_single_multiply_accumulate(int sd, int sn, s32 m, u32 fpscr, u32 negate, cha
v = vfp_get_float(sd);
pr_debug("VFP: s%u = %08x\n", sd, v);
vfp_single_unpack(&vsn, v);
+ if (vsn.exponent == 0 && vsn.significand)
+ vfp_single_normalise_denormal(&vsn);
if (negate & NEG_SUBTRACT)
vsn.sign = vfp_sign_negate(vsn.sign);