diff options
Diffstat (limited to 'arch/arm/vfp')
| -rw-r--r-- | arch/arm/vfp/Makefile | 9 | ||||
| -rw-r--r-- | arch/arm/vfp/entry.S | 42 | ||||
| -rw-r--r-- | arch/arm/vfp/vfp.h | 41 | ||||
| -rw-r--r-- | arch/arm/vfp/vfpdouble.c | 157 | ||||
| -rw-r--r-- | arch/arm/vfp/vfphw.S | 286 | ||||
| -rw-r--r-- | arch/arm/vfp/vfpinstr.h | 14 | ||||
| -rw-r--r-- | arch/arm/vfp/vfpmodule.c | 633 | ||||
| -rw-r--r-- | arch/arm/vfp/vfpsingle.c | 140 |
8 files changed, 1003 insertions, 319 deletions
diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile index afabac31dd1..a81404c09d5 100644 --- a/arch/arm/vfp/Makefile +++ b/arch/arm/vfp/Makefile @@ -4,9 +4,12 @@ # Copyright (C) 2001 ARM Limited # -# EXTRA_CFLAGS := -DDEBUG -# EXTRA_AFLAGS := -DDEBUG +# ccflags-y := -DDEBUG +# asflags-y := -DDEBUG + +KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp -mfloat-abi=soft) +LDFLAGS +=--no-warn-mismatch obj-y += vfp.o -vfp-$(CONFIG_VFP) += entry.o vfpmodule.o vfphw.o vfpsingle.o vfpdouble.o +vfp-$(CONFIG_VFP) += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index 6f17187ab32..fe6ca574d09 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S @@ -7,25 +7,37 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * Basic entry code, called from the kernel's undefined instruction trap. - * r0 = faulted instruction - * r5 = faulted PC+4 - * r9 = successful return - * r10 = thread_info structure - * lr = failure return */ -#include <linux/linkage.h> #include <linux/init.h> -#include <asm/asm-offsets.h> +#include <linux/linkage.h> +#include <asm/thread_info.h> #include <asm/vfpmacros.h> +#include <asm/assembler.h> +#include <asm/asm-offsets.h> - .globl do_vfp -do_vfp: +@ VFP entry point. +@ +@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) +@ r2 = PC value to resume execution after successful emulation +@ r9 = normal "successful" return address +@ r10 = this threads thread_info structure +@ lr = unrecognised instruction return address +@ IRQs enabled. +@ +ENTRY(do_vfp) + inc_preempt_count r10, r4 ldr r4, .LCvfp + ldr r11, [r10, #TI_CPU] @ CPU number add r10, r10, #TI_VFPSTATE @ r10 = workspace ldr pc, [r4] @ call VFP entry point +ENDPROC(do_vfp) + +ENTRY(vfp_null_entry) + dec_preempt_count_ti r10, r4 + mov pc, lr +ENDPROC(vfp_null_entry) + .align 2 .LCvfp: .word vfp_vector @@ -33,12 +45,14 @@ do_vfp: @ failure to the VFP initialisation code. __INIT - .globl vfp_testing_entry -vfp_testing_entry: +ENTRY(vfp_testing_entry) + dec_preempt_count_ti r10, r4 ldr r0, VFP_arch_address - str r5, [r0] @ known non-zero value + str r0, [r0] @ set to non-zero value mov pc, r9 @ we have handled the fault +ENDPROC(vfp_testing_entry) + .align 2 VFP_arch_address: .word VFP_arch diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h index 4b97950984e..c8c98dd44ad 100644 --- a/arch/arm/vfp/vfp.h +++ b/arch/arm/vfp/vfp.h @@ -156,7 +156,7 @@ struct vfp_single { }; extern s32 vfp_get_float(unsigned int reg); -extern void vfp_put_float(unsigned int reg, s32 val); +extern void vfp_put_float(s32 val, unsigned int reg); /* * VFP_SINGLE_MANTISSA_BITS - number of bits in the mantissa @@ -265,9 +265,13 @@ struct vfp_double { * which returns (double)0.0. This is useful for the compare with * zero instructions. */ +#ifdef CONFIG_VFPv3 +#define VFP_REG_ZERO 32 +#else #define VFP_REG_ZERO 16 +#endif extern u64 vfp_get_double(unsigned int reg); -extern void vfp_put_double(unsigned int reg, u64 val); +extern void vfp_put_double(u64 val, unsigned int reg); #define VFP_DOUBLE_MANTISSA_BITS (52) #define VFP_DOUBLE_EXPONENT_BITS (11) @@ -341,15 +345,36 @@ static inline int vfp_double_type(struct vfp_double *s) u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func); -/* - * System registers - */ -extern u32 vfp_get_sys(unsigned int reg); -extern void vfp_put_sys(unsigned int reg, u32 val); - u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand); /* * A special flag to tell the normalisation code not to normalise. */ #define VFP_NAN_FLAG 0x100 + +/* + * A bit pattern used to indicate the initial (unset) value of the + * exception mask, in case nothing handles an instruction. This + * doesn't include the NAN flag, which get masked out before + * we check for an error. + */ +#define VFP_EXCEPTION_ERROR ((u32)-1 & ~VFP_NAN_FLAG) + +/* + * A flag to tell vfp instruction type. + * OP_SCALAR - this operation always operates in scalar mode + * OP_SD - the instruction exceptionally writes to a single precision result. + * OP_DD - the instruction exceptionally writes to a double precision result. + * OP_SM - the instruction exceptionally reads from a single precision operand. + */ +#define OP_SCALAR (1 << 0) +#define OP_SD (1 << 1) +#define OP_DD (1 << 1) +#define OP_SM (1 << 2) + +struct op { + u32 (* const fn)(int dd, int dn, int dm, u32 fpscr); + u32 flags; +}; + +extern void vfp_save_state(void *location, u32 fpexc); diff --git a/arch/arm/vfp/vfpdouble.c b/arch/arm/vfp/vfpdouble.c index 9b367a65cb4..423f56dd402 100644 --- a/arch/arm/vfp/vfpdouble.c +++ b/arch/arm/vfp/vfpdouble.c @@ -34,7 +34,6 @@ #include <linux/bitops.h> #include <asm/div64.h> -#include <asm/ptrace.h> #include <asm/vfp.h> #include "vfpinstr.h" @@ -56,7 +55,7 @@ static void vfp_double_normalise_denormal(struct vfp_double *vd) { int bits = 31 - fls(vd->significand >> 32); if (bits == 31) - bits = 62 - fls(vd->significand); + bits = 63 - fls(vd->significand); vfp_double_dump("normalise_denormal: in", vd); @@ -195,9 +194,9 @@ u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exce s64 d = vfp_double_pack(vd); pr_debug("VFP: %s: d(d%d)=%016llx exceptions=%08x\n", func, dd, d, exceptions); - vfp_put_double(dd, d); + vfp_put_double(d, dd); } - return exceptions & ~VFP_NAN_FLAG; + return exceptions; } /* @@ -250,19 +249,19 @@ vfp_propagate_nan(struct vfp_double *vdd, struct vfp_double *vdn, */ static u32 vfp_double_fabs(int dd, int unused, int dm, u32 fpscr) { - vfp_put_double(dd, vfp_double_packed_abs(vfp_get_double(dm))); + vfp_put_double(vfp_double_packed_abs(vfp_get_double(dm)), dd); return 0; } static u32 vfp_double_fcpy(int dd, int unused, int dm, u32 fpscr) { - vfp_put_double(dd, vfp_get_double(dm)); + vfp_put_double(vfp_get_double(dm), dd); return 0; } static u32 vfp_double_fneg(int dd, int unused, int dm, u32 fpscr) { - vfp_put_double(dd, vfp_double_packed_negate(vfp_get_double(dm))); + vfp_put_double(vfp_double_packed_negate(vfp_get_double(dm)), dd); return 0; } @@ -287,7 +286,7 @@ static u32 vfp_double_fsqrt(int dd, int unused, int dm, u32 fpscr) vdp = &vfp_double_default_qnan; ret = FPSCR_IOC; } - vfp_put_double(dd, vfp_double_pack(vdp)); + vfp_put_double(vfp_double_pack(vdp), dd); return ret; } @@ -465,7 +464,7 @@ static u32 vfp_double_fcvts(int sd, int unused, int dm, u32 fpscr) */ if (tm & (VFP_INFINITY|VFP_NAN)) { vsd.exponent = 255; - if (tm & VFP_NAN) + if (tm == VFP_QNAN) vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN; goto pack_nan; } else if (tm & VFP_ZERO) @@ -476,7 +475,7 @@ static u32 vfp_double_fcvts(int sd, int unused, int dm, u32 fpscr) return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fcvts"); pack_nan: - vfp_put_float(sd, vfp_single_pack(&vsd)); + vfp_put_float(vfp_single_pack(&vsd), sd); return exceptions; } @@ -573,7 +572,7 @@ static u32 vfp_double_ftoui(int sd, int unused, int dm, u32 fpscr) pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions); - vfp_put_float(sd, d); + vfp_put_float(d, sd); return exceptions; } @@ -588,6 +587,7 @@ static u32 vfp_double_ftosi(int sd, int unused, int dm, u32 fpscr) struct vfp_double vdm; u32 d, exceptions = 0; int rmode = fpscr & FPSCR_RMODE_MASK; + int tm; vfp_double_unpack(&vdm, vfp_get_double(dm)); vfp_double_dump("VDM", &vdm); @@ -595,10 +595,14 @@ static u32 vfp_double_ftosi(int sd, int unused, int dm, u32 fpscr) /* * Do we have denormalised number? */ - if (vfp_double_type(&vdm) & VFP_DENORMAL) + tm = vfp_double_type(&vdm); + if (tm & VFP_DENORMAL) exceptions |= FPSCR_IDC; - if (vdm.exponent >= 1023 + 32) { + if (tm & VFP_NAN) { + d = 0; + exceptions |= FPSCR_IOC; + } else if (vdm.exponent >= 1023 + 32) { d = 0x7fffffff; if (vdm.sign) d = ~d; @@ -643,7 +647,7 @@ static u32 vfp_double_ftosi(int sd, int unused, int dm, u32 fpscr) pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions); - vfp_put_float(sd, (s32)d); + vfp_put_float((s32)d, sd); return exceptions; } @@ -654,22 +658,22 @@ static u32 vfp_double_ftosiz(int dd, int unused, int dm, u32 fpscr) } -static u32 (* const fop_extfns[32])(int dd, int unused, int dm, u32 fpscr) = { - [FEXT_TO_IDX(FEXT_FCPY)] = vfp_double_fcpy, - [FEXT_TO_IDX(FEXT_FABS)] = vfp_double_fabs, - [FEXT_TO_IDX(FEXT_FNEG)] = vfp_double_fneg, - [FEXT_TO_IDX(FEXT_FSQRT)] = vfp_double_fsqrt, - [FEXT_TO_IDX(FEXT_FCMP)] = vfp_double_fcmp, - [FEXT_TO_IDX(FEXT_FCMPE)] = vfp_double_fcmpe, - [FEXT_TO_IDX(FEXT_FCMPZ)] = vfp_double_fcmpz, - [FEXT_TO_IDX(FEXT_FCMPEZ)] = vfp_double_fcmpez, - [FEXT_TO_IDX(FEXT_FCVT)] = vfp_double_fcvts, - [FEXT_TO_IDX(FEXT_FUITO)] = vfp_double_fuito, - [FEXT_TO_IDX(FEXT_FSITO)] = vfp_double_fsito, - [FEXT_TO_IDX(FEXT_FTOUI)] = vfp_double_ftoui, - [FEXT_TO_IDX(FEXT_FTOUIZ)] = vfp_double_ftouiz, - [FEXT_TO_IDX(FEXT_FTOSI)] = vfp_double_ftosi, - [FEXT_TO_IDX(FEXT_FTOSIZ)] = vfp_double_ftosiz, +static struct op fops_ext[32] = { + [FEXT_TO_IDX(FEXT_FCPY)] = { vfp_double_fcpy, 0 }, + [FEXT_TO_IDX(FEXT_FABS)] = { vfp_double_fabs, 0 }, + [FEXT_TO_IDX(FEXT_FNEG)] = { vfp_double_fneg, 0 }, + [FEXT_TO_IDX(FEXT_FSQRT)] = { vfp_double_fsqrt, 0 }, + [FEXT_TO_IDX(FEXT_FCMP)] = { vfp_double_fcmp, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FCMPE)] = { vfp_double_fcmpe, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FCMPZ)] = { vfp_double_fcmpz, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FCMPEZ)] = { vfp_double_fcmpez, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FCVT)] = { vfp_double_fcvts, OP_SCALAR|OP_SD }, + [FEXT_TO_IDX(FEXT_FUITO)] = { vfp_double_fuito, OP_SCALAR|OP_SM }, + [FEXT_TO_IDX(FEXT_FSITO)] = { vfp_double_fsito, OP_SCALAR|OP_SM }, + [FEXT_TO_IDX(FEXT_FTOUI)] = { vfp_double_ftoui, OP_SCALAR|OP_SD }, + [FEXT_TO_IDX(FEXT_FTOUIZ)] = { vfp_double_ftouiz, OP_SCALAR|OP_SD }, + [FEXT_TO_IDX(FEXT_FTOSI)] = { vfp_double_ftosi, OP_SCALAR|OP_SD }, + [FEXT_TO_IDX(FEXT_FTOSIZ)] = { vfp_double_ftosiz, OP_SCALAR|OP_SD }, }; @@ -862,6 +866,8 @@ vfp_double_multiply_accumulate(int dd, int dn, int dm, u32 fpscr, u32 negate, ch vdp.sign = vfp_sign_negate(vdp.sign); vfp_double_unpack(&vdn, vfp_get_double(dd)); + if (vdn.exponent == 0 && vdn.significand) + vfp_double_normalise_denormal(&vdn); if (negate & NEG_SUBTRACT) vdn.sign = vfp_sign_negate(vdn.sign); @@ -1079,7 +1085,7 @@ static u32 vfp_double_fdiv(int dd, int dn, int dm, u32 fpscr) vdn_nan: exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr); pack: - vfp_put_double(dd, vfp_double_pack(&vdd)); + vfp_put_double(vfp_double_pack(&vdd), dd); return exceptions; vdm_nan: @@ -1099,20 +1105,20 @@ static u32 vfp_double_fdiv(int dd, int dn, int dm, u32 fpscr) goto pack; invalid: - vfp_put_double(dd, vfp_double_pack(&vfp_double_default_qnan)); + vfp_put_double(vfp_double_pack(&vfp_double_default_qnan), dd); return FPSCR_IOC; } -static u32 (* const fop_fns[16])(int dd, int dn, int dm, u32 fpscr) = { - [FOP_TO_IDX(FOP_FMAC)] = vfp_double_fmac, - [FOP_TO_IDX(FOP_FNMAC)] = vfp_double_fnmac, - [FOP_TO_IDX(FOP_FMSC)] = vfp_double_fmsc, - [FOP_TO_IDX(FOP_FNMSC)] = vfp_double_fnmsc, - [FOP_TO_IDX(FOP_FMUL)] = vfp_double_fmul, - [FOP_TO_IDX(FOP_FNMUL)] = vfp_double_fnmul, - [FOP_TO_IDX(FOP_FADD)] = vfp_double_fadd, - [FOP_TO_IDX(FOP_FSUB)] = vfp_double_fsub, - [FOP_TO_IDX(FOP_FDIV)] = vfp_double_fdiv, +static struct op fops[16] = { + [FOP_TO_IDX(FOP_FMAC)] = { vfp_double_fmac, 0 }, + [FOP_TO_IDX(FOP_FNMAC)] = { vfp_double_fnmac, 0 }, + [FOP_TO_IDX(FOP_FMSC)] = { vfp_double_fmsc, 0 }, + [FOP_TO_IDX(FOP_FNMSC)] = { vfp_double_fnmsc, 0 }, + [FOP_TO_IDX(FOP_FMUL)] = { vfp_double_fmul, 0 }, + [FOP_TO_IDX(FOP_FNMUL)] = { vfp_double_fnmul, 0 }, + [FOP_TO_IDX(FOP_FADD)] = { vfp_double_fadd, 0 }, + [FOP_TO_IDX(FOP_FSUB)] = { vfp_double_fsub, 0 }, + [FOP_TO_IDX(FOP_FDIV)] = { vfp_double_fdiv, 0 }, }; #define FREG_BANK(x) ((x) & 0x0c) @@ -1122,67 +1128,76 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr) { u32 op = inst & FOP_MASK; u32 exceptions = 0; - unsigned int dd = vfp_get_sd(inst); - unsigned int dn = vfp_get_sn(inst); - unsigned int dm = vfp_get_sm(inst); + unsigned int dest; + unsigned int dn = vfp_get_dn(inst); + unsigned int dm; unsigned int vecitr, veclen, vecstride; - u32 (*fop)(int, int, s32, u32); + struct op *fop; + + vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK)); + + fop = (op == FOP_EXT) ? &fops_ext[FEXT_TO_IDX(inst)] : &fops[FOP_TO_IDX(op)]; - veclen = fpscr & FPSCR_LENGTH_MASK; - vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK)) * 2; + /* + * fcvtds takes an sN register number as destination, not dN. + * It also always operates on scalars. + */ + if (fop->flags & OP_SD) + dest = vfp_get_sd(inst); + else + dest = vfp_get_dd(inst); + + /* + * f[us]ito takes a sN operand, not a dN operand. + */ + if (fop->flags & OP_SM) + dm = vfp_get_sm(inst); + else + dm = vfp_get_dm(inst); /* * If destination bank is zero, vector length is always '1'. * ARM DDI0100F C5.1.3, C5.3.2. */ - if (FREG_BANK(dd) == 0) + if ((fop->flags & OP_SCALAR) || (FREG_BANK(dest) == 0)) veclen = 0; + else + veclen = fpscr & FPSCR_LENGTH_MASK; pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride, (veclen >> FPSCR_LENGTH_BIT) + 1); - fop = (op == FOP_EXT) ? fop_extfns[dn] : fop_fns[FOP_TO_IDX(op)]; - if (!fop) + if (!fop->fn) goto invalid; for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) { u32 except; + char type; + type = fop->flags & OP_SD ? 's' : 'd'; if (op == FOP_EXT) - pr_debug("VFP: itr%d (d%u.%u) = op[%u] (d%u.%u)\n", + pr_debug("VFP: itr%d (%c%u) = op[%u] (d%u)\n", vecitr >> FPSCR_LENGTH_BIT, - dd >> 1, dd & 1, dn, - dm >> 1, dm & 1); + type, dest, dn, dm); else - pr_debug("VFP: itr%d (d%u.%u) = (d%u.%u) op[%u] (d%u.%u)\n", + pr_debug("VFP: itr%d (%c%u) = (d%u) op[%u] (d%u)\n", vecitr >> FPSCR_LENGTH_BIT, - dd >> 1, dd & 1, - dn >> 1, dn & 1, - FOP_TO_IDX(op), - dm >> 1, dm & 1); + type, dest, dn, FOP_TO_IDX(op), dm); - except = fop(dd, dn, dm, fpscr); + except = fop->fn(dest, dn, dm, fpscr); pr_debug("VFP: itr%d: exceptions=%08x\n", vecitr >> FPSCR_LENGTH_BIT, except); exceptions |= except; /* - * This ensures that comparisons only operate on scalars; - * comparisons always return with one FPSCR status bit set. - */ - if (except & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) - break; - - /* * CHECK: It appears to be undefined whether we stop when * we encounter an exception. We continue. */ - - dd = FREG_BANK(dd) + ((FREG_IDX(dd) + vecstride) & 6); - dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 6); + dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 3); + dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 3); if (FREG_BANK(dm) != 0) - dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 6); + dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 3); } return exceptions; diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index de4ca1223c5..be807625ed8 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -14,19 +14,25 @@ * r10 points at the start of the private FP workspace in the thread structure * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h) */ +#include <linux/init.h> +#include <linux/linkage.h> #include <asm/thread_info.h> #include <asm/vfpmacros.h> -#include "../kernel/entry-header.S" +#include <linux/kern_levels.h> +#include <asm/assembler.h> +#include <asm/asm-offsets.h> .macro DBGSTR, str #ifdef DEBUG stmfd sp!, {r0-r3, ip, lr} - add r0, pc, #4 + ldr r0, =1f bl printk - b 1f - .asciz "<7>VFP: \str\n" - .balign 4 -1: ldmfd sp!, {r0-r3, ip, lr} + ldmfd sp!, {r0-r3, ip, lr} + + .pushsection .rodata, "a" +1: .ascii KERN_DEBUG "VFP: \str\n" + .byte 0 + .previous #endif .endm @@ -34,12 +40,14 @@ #ifdef DEBUG stmfd sp!, {r0-r3, ip, lr} mov r1, \arg - add r0, pc, #4 + ldr r0, =1f bl printk - b 1f - .asciz "<7>VFP: \str\n" - .balign 4 -1: ldmfd sp!, {r0-r3, ip, lr} + ldmfd sp!, {r0-r3, ip, lr} + + .pushsection .rodata, "a" +1: .ascii KERN_DEBUG "VFP: \str\n" + .byte 0 + .previous #endif .endm @@ -49,109 +57,155 @@ mov r3, \arg3 mov r2, \arg2 mov r1, \arg1 - add r0, pc, #4 + ldr r0, =1f bl printk - b 1f - .asciz "<7>VFP: \str\n" - .balign 4 -1: ldmfd sp!, {r0-r3, ip, lr} + ldmfd sp!, {r0-r3, ip, lr} + + .pushsection .rodata, "a" +1: .ascii KERN_DEBUG "VFP: \str\n" + .byte 0 + .previous #endif .endm @ VFP hardware support entry point. @ -@ r0 = faulted instruction -@ r2 = faulted PC+4 -@ r9 = successful return +@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) +@ r2 = PC value to resume execution after successful emulation +@ r9 = normal "successful" return address @ r10 = vfp_state union -@ lr = failure return - - .globl vfp_support_entry -vfp_support_entry: +@ r11 = CPU number +@ lr = unrecognised instruction return address +@ IRQs enabled. +ENTRY(vfp_support_entry) DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 + ldr r3, [sp, #S_PSR] @ Neither lazy restore nor FP exceptions + and r3, r3, #MODE_MASK @ are supported in kernel mode + teq r3, #USR_MODE + bne vfp_kmode_exception @ Returns through lr + VFPFMRX r1, FPEXC @ Is the VFP enabled? DBGSTR1 "fpexc %08x", r1 - tst r1, #FPEXC_ENABLE + tst r1, #FPEXC_EN bne look_for_VFP_exceptions @ VFP is already enabled DBGSTR1 "enable %x", r10 - ldr r3, last_VFP_context_address - orr r1, r1, #FPEXC_ENABLE @ user FPEXC has the enable bit set - ldr r4, [r3] @ last_VFP_context pointer - bic r5, r1, #FPEXC_EXCEPTION @ make sure exceptions are disabled - cmp r4, r10 - beq check_for_exception @ we are returning to the same - @ process, so the registers are - @ still there. In this case, we do - @ not want to drop a pending exception. + ldr r3, vfp_current_hw_state_address + orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set + ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer + bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled + cmp r4, r10 @ this thread owns the hw context? +#ifndef CONFIG_SMP + @ For UP, checking that this thread owns the hw context is + @ sufficient to determine that the hardware state is valid. + beq vfp_hw_state_valid + + @ On UP, we lazily save the VFP context. As a different + @ thread wants ownership of the VFP hardware, save the old + @ state if there was a previous (valid) owner. VFPFMXR FPEXC, r5 @ enable VFP, disable any pending @ exceptions, so we can get at the @ rest of it - @ Save out the current registers to the old thread state - DBGSTR1 "save old state %p", r4 - cmp r4, #0 - beq no_old_VFP_process + cmp r4, #0 @ if the vfp_current_hw_state is NULL + beq vfp_reload_hw @ then the hw state needs reloading + VFPFSTMIA r4, r5 @ save the working registers VFPFMRX r5, FPSCR @ current status - VFPFMRX r6, FPINST @ FPINST (always there, rev0 onwards) - tst r1, #FPEXC_FPV2 @ is there an FPINST2 to read? - VFPFMRX r8, FPINST2, NE @ FPINST2 if needed - avoids reading - @ nonexistant reg on rev0 - VFPFSTMIA r4 @ save the working registers - add r4, r4, #8*16+4 +#ifndef CONFIG_CPU_FEROCEON + tst r1, #FPEXC_EX @ is there additional state to save? + beq 1f + VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set) + tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? + beq 1f + VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present) +1: +#endif stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 - @ and point r4 at the word at the - @ start of the register dump +vfp_reload_hw: + +#else + @ For SMP, if this thread does not own the hw context, then we + @ need to reload it. No need to save the old state as on SMP, + @ we always save the state when we switch away from a thread. + bne vfp_reload_hw + + @ This thread has ownership of the current hardware context. + @ However, it may have been migrated to another CPU, in which + @ case the saved state is newer than the hardware context. + @ Check this by looking at the CPU number which the state was + @ last loaded onto. + ldr ip, [r10, #VFP_CPU] + teq ip, r11 + beq vfp_hw_state_valid + +vfp_reload_hw: + @ We're loading this threads state into the VFP hardware. Update + @ the CPU number which contains the most up to date VFP context. + str r11, [r10, #VFP_CPU] + + VFPFMXR FPEXC, r5 @ enable VFP, disable any pending + @ exceptions, so we can get at the + @ rest of it +#endif -no_old_VFP_process: DBGSTR1 "load state %p", r10 - str r10, [r3] @ update the last_VFP_context pointer + str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer @ Load the saved state back into the VFP - add r4, r10, #8*16+4 - ldmia r4, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2 - VFPFLDMIA r10 @ reload the working registers while + VFPFLDMIA r10, r5 @ reload the working registers while @ FPEXC is in a safe state - tst r1, #FPEXC_FPV2 @ is there an FPINST2 to write? - VFPFMXR FPINST2, r8, NE @ FPINST2 if needed - avoids writing - @ nonexistant reg on rev0 - VFPFMXR FPINST, r6 + ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2 +#ifndef CONFIG_CPU_FEROCEON + tst r1, #FPEXC_EX @ is there additional state to restore? + beq 1f + VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set) + tst r1, #FPEXC_FP2V @ is there an FPINST2 to write? + beq 1f + VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present) +1: +#endif VFPFMXR FPSCR, r5 @ restore status -check_for_exception: - tst r1, #FPEXC_EXCEPTION +@ The context stored in the VFP hardware is up to date with this thread +vfp_hw_state_valid: + tst r1, #FPEXC_EX bne process_exception @ might as well handle the pending @ exception before retrying branch @ out before setting an FPEXC that @ stops us reading stuff - VFPFMXR FPEXC, r1 @ restore FPEXC last - sub r2, r2, #4 - str r2, [sp, #S_PC] @ retry the instruction + VFPFMXR FPEXC, r1 @ Restore FPEXC last + sub r2, r2, #4 @ Retry current instruction - if Thumb + str r2, [sp, #S_PC] @ mode it's two 16-bit instructions, + @ else it's one 32-bit instruction, so + @ always subtract 4 from the following + @ instruction address. + dec_preempt_count_ti r10, r4 mov pc, r9 @ we think we have handled things look_for_VFP_exceptions: - tst r1, #FPEXC_EXCEPTION + @ Check for synchronous or asynchronous exception + tst r1, #FPEXC_EX | FPEXC_DEX bne process_exception + @ On some implementations of the VFP subarch 1, setting FPSCR.IXE + @ causes all the CDP instructions to be bounced synchronously without + @ setting the FPEXC.EX bit VFPFMRX r5, FPSCR - tst r5, #FPSCR_IXE @ IXE doesn't set FPEXC_EXCEPTION ! + tst r5, #FPSCR_IXE bne process_exception @ Fall into hand on to next handler - appropriate coproc instr @ not recognised by VFP DBGSTR "not VFP" + dec_preempt_count_ti r10, r4 mov pc, lr process_exception: DBGSTR "bounce" - sub r2, r2, #4 - str r2, [sp, #S_PC] @ retry the instruction on exit from - @ the imprecise exception handling in - @ the support code mov r2, sp @ nothing stacked - regdump is at TOS mov lr, r9 @ setup for a return to the user code. @@ -159,57 +213,105 @@ process_exception: @ r0 holds the trigger instruction @ r1 holds the FPEXC value @ r2 pointer to register dump - b VFP9_bounce @ we have handled this - the support + b VFP_bounce @ we have handled this - the support @ code will raise an exception if @ required. If not, the user code will @ retry the faulted instruction +ENDPROC(vfp_support_entry) -last_VFP_context_address: - .word last_VFP_context +ENTRY(vfp_save_state) + @ Save the current VFP state + @ r0 - save location + @ r1 - FPEXC + DBGSTR1 "save VFP state %p", r0 + VFPFSTMIA r0, r2 @ save the working registers + VFPFMRX r2, FPSCR @ current status + tst r1, #FPEXC_EX @ is there additional state to save? + beq 1f + VFPFMRX r3, FPINST @ FPINST (only if FPEXC.EX is set) + tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? + beq 1f + VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present) +1: + stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 + mov pc, lr +ENDPROC(vfp_save_state) + + .align +vfp_current_hw_state_address: + .word vfp_current_hw_state - .globl vfp_get_float -vfp_get_float: - add pc, pc, r0, lsl #3 + .macro tbl_branch, base, tmp, shift +#ifdef CONFIG_THUMB2_KERNEL + adr \tmp, 1f + add \tmp, \tmp, \base, lsl \shift + mov pc, \tmp +#else + add pc, pc, \base, lsl \shift mov r0, r0 +#endif +1: + .endm + +ENTRY(vfp_get_float) + tbl_branch r0, r3, #3 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 +1: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 mov pc, lr - mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 + .org 1b + 8 +1: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 mov pc, lr + .org 1b + 8 .endr +ENDPROC(vfp_get_float) - .globl vfp_put_float -vfp_put_float: - add pc, pc, r0, lsl #3 - mov r0, r0 +ENTRY(vfp_put_float) + tbl_branch r1, r3, #3 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - mcr p10, 0, r1, c\dr, c0, 0 @ fmsr r0, s0 +1: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 mov pc, lr - mcr p10, 0, r1, c\dr, c0, 4 @ fmsr r0, s1 + .org 1b + 8 +1: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 mov pc, lr + .org 1b + 8 .endr +ENDPROC(vfp_put_float) - .globl vfp_get_double -vfp_get_double: - mov r0, r0, lsr #1 - add pc, pc, r0, lsl #3 - mov r0, r0 +ENTRY(vfp_get_double) + tbl_branch r0, r3, #3 + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +1: fmrrd r0, r1, d\dr + mov pc, lr + .org 1b + 8 + .endr +#ifdef CONFIG_VFPv3 + @ d16 - d31 registers .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - mrrc p10, 1, r0, r1, c\dr @ fmrrd r0, r1, d\dr +1: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr mov pc, lr + .org 1b + 8 .endr +#endif - @ virtual register 16 for compare with zero + @ virtual register 16 (or 32 if VFPv3) for compare with zero mov r0, #0 mov r1, #0 mov pc, lr +ENDPROC(vfp_get_double) - .globl vfp_put_double -vfp_put_double: - mov r0, r0, lsr #1 - add pc, pc, r0, lsl #3 - mov r0, r0 +ENTRY(vfp_put_double) + tbl_branch r2, r3, #3 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - mcrr p10, 1, r1, r2, c\dr @ fmrrd r1, r2, d\dr +1: fmdrr d\dr, r0, r1 mov pc, lr + .org 1b + 8 .endr +#ifdef CONFIG_VFPv3 + @ d16 - d31 registers + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr + mov pc, lr + .org 1b + 8 + .endr +#endif +ENDPROC(vfp_put_double) diff --git a/arch/arm/vfp/vfpinstr.h b/arch/arm/vfp/vfpinstr.h index 6c819aeae00..15b95b5ab97 100644 --- a/arch/arm/vfp/vfpinstr.h +++ b/arch/arm/vfp/vfpinstr.h @@ -52,11 +52,11 @@ #define FEXT_TO_IDX(inst) ((inst & 0x000f0000) >> 15 | (inst & (1 << 7)) >> 7) #define vfp_get_sd(inst) ((inst & 0x0000f000) >> 11 | (inst & (1 << 22)) >> 22) -#define vfp_get_dd(inst) ((inst & 0x0000f000) >> 12) +#define vfp_get_dd(inst) ((inst & 0x0000f000) >> 12 | (inst & (1 << 22)) >> 18) #define vfp_get_sm(inst) ((inst & 0x0000000f) << 1 | (inst & (1 << 5)) >> 5) -#define vfp_get_dm(inst) ((inst & 0x0000000f)) +#define vfp_get_dm(inst) ((inst & 0x0000000f) | (inst & (1 << 5)) >> 1) #define vfp_get_sn(inst) ((inst & 0x000f0000) >> 15 | (inst & (1 << 7)) >> 7) -#define vfp_get_dn(inst) ((inst & 0x000f0000) >> 16) +#define vfp_get_dn(inst) ((inst & 0x000f0000) >> 16 | (inst & (1 << 7)) >> 3) #define vfp_single(inst) (((inst) & 0x0000f00) == 0xa00) @@ -73,14 +73,14 @@ #define fmrx(_vfp_) ({ \ u32 __v; \ - asm("mrc%? p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \ - : "=r" (__v)); \ + asm("mrc p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \ + : "=r" (__v) : : "cc"); \ __v; \ }) #define fmxr(_vfp_,_var_) \ - asm("mcr%? p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \ - : : "r" (_var_)) + asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \ + : : "r" (_var_) : "cc") u32 vfp_single_cpdo(u32 inst, u32 fpscr); u32 vfp_single_cprt(u32 inst, u32 fpscr, struct pt_regs *regs); diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 22f3da4e082..2f37e1d6cb4 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -8,13 +8,24 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/module.h> -#include <linux/config.h> #include <linux/types.h> +#include <linux/cpu.h> +#include <linux/cpu_pm.h> +#include <linux/hardirq.h> #include <linux/kernel.h> +#include <linux/notifier.h> #include <linux/signal.h> #include <linux/sched.h> +#include <linux/smp.h> #include <linux/init.h> +#include <linux/uaccess.h> +#include <linux/user.h> +#include <linux/export.h> + +#include <asm/cp15.h> +#include <asm/cputype.h> +#include <asm/system_info.h> +#include <asm/thread_notify.h> #include <asm/vfp.h> #include "vfpinstr.h" @@ -25,9 +36,9 @@ */ void vfp_testing_entry(void); void vfp_support_entry(void); +void vfp_null_entry(void); -void (*vfp_vector)(void) = vfp_testing_entry; -union vfp_state *last_VFP_context; +void (*vfp_vector)(void) = vfp_null_entry; /* * Dual-use variable. @@ -37,42 +48,177 @@ union vfp_state *last_VFP_context; unsigned int VFP_arch; /* - * Per-thread VFP initialisation. + * The pointer to the vfpstate structure of the thread which currently + * owns the context held in the VFP hardware, or NULL if the hardware + * context is invalid. + * + * For UP, this is sufficient to tell which thread owns the VFP context. + * However, for SMP, we also need to check the CPU number stored in the + * saved state too to catch migrations. + */ +union vfp_state *vfp_current_hw_state[NR_CPUS]; + +/* + * Is 'thread's most up to date state stored in this CPUs hardware? + * Must be called from non-preemptible context. */ -void vfp_flush_thread(union vfp_state *vfp) +static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread) { - memset(vfp, 0, sizeof(union vfp_state)); +#ifdef CONFIG_SMP + if (thread->vfpstate.hard.cpu != cpu) + return false; +#endif + return vfp_current_hw_state[cpu] == &thread->vfpstate; +} - vfp->hard.fpexc = FPEXC_ENABLE; - vfp->hard.fpscr = FPSCR_ROUND_NEAREST; +/* + * Force a reload of the VFP context from the thread structure. We do + * this by ensuring that access to the VFP hardware is disabled, and + * clear vfp_current_hw_state. Must be called from non-preemptible context. + */ +static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) +{ + if (vfp_state_in_hw(cpu, thread)) { + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + vfp_current_hw_state[cpu] = NULL; + } +#ifdef CONFIG_SMP + thread->vfpstate.hard.cpu = NR_CPUS; +#endif +} - /* - * Disable VFP to ensure we initialise it first. - */ - fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE); +/* + * Per-thread VFP initialization. + */ +static void vfp_thread_flush(struct thread_info *thread) +{ + union vfp_state *vfp = &thread->vfpstate; + unsigned int cpu; /* - * Ensure we don't try to overwrite our newly initialised - * state information on the first fault. + * Disable VFP to ensure we initialize it first. We must ensure + * that the modification of vfp_current_hw_state[] and hardware + * disable are done for the same CPU and without preemption. + * + * Do this first to ensure that preemption won't overwrite our + * state saving should access to the VFP be enabled at this point. */ - if (last_VFP_context == vfp) - last_VFP_context = NULL; + cpu = get_cpu(); + if (vfp_current_hw_state[cpu] == vfp) + vfp_current_hw_state[cpu] = NULL; + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + put_cpu(); + + memset(vfp, 0, sizeof(union vfp_state)); + + vfp->hard.fpexc = FPEXC_EN; + vfp->hard.fpscr = FPSCR_ROUND_NEAREST; +#ifdef CONFIG_SMP + vfp->hard.cpu = NR_CPUS; +#endif +} + +static void vfp_thread_exit(struct thread_info *thread) +{ + /* release case: Per-thread VFP cleanup. */ + union vfp_state *vfp = &thread->vfpstate; + unsigned int cpu = get_cpu(); + + if (vfp_current_hw_state[cpu] == vfp) + vfp_current_hw_state[cpu] = NULL; + put_cpu(); +} + +static void vfp_thread_copy(struct thread_info *thread) +{ + struct thread_info *parent = current_thread_info(); + + vfp_sync_hwstate(parent); + thread->vfpstate = parent->vfpstate; +#ifdef CONFIG_SMP + thread->vfpstate.hard.cpu = NR_CPUS; +#endif } /* - * Per-thread VFP cleanup. + * When this function is called with the following 'cmd's, the following + * is true while this function is being run: + * THREAD_NOFTIFY_SWTICH: + * - the previously running thread will not be scheduled onto another CPU. + * - the next thread to be run (v) will not be running on another CPU. + * - thread->cpu is the local CPU number + * - not preemptible as we're called in the middle of a thread switch + * THREAD_NOTIFY_FLUSH: + * - the thread (v) will be running on the local CPU, so + * v === current_thread_info() + * - thread->cpu is the local CPU number at the time it is accessed, + * but may change at any time. + * - we could be preempted if tree preempt rcu is enabled, so + * it is unsafe to use thread->cpu. + * THREAD_NOTIFY_EXIT + * - the thread (v) will be running on the local CPU, so + * v === current_thread_info() + * - thread->cpu is the local CPU number at the time it is accessed, + * but may change at any time. + * - we could be preempted if tree preempt rcu is enabled, so + * it is unsafe to use thread->cpu. */ -void vfp_release_thread(union vfp_state *vfp) +static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) { - if (last_VFP_context == vfp) - last_VFP_context = NULL; + struct thread_info *thread = v; + u32 fpexc; +#ifdef CONFIG_SMP + unsigned int cpu; +#endif + + switch (cmd) { + case THREAD_NOTIFY_SWITCH: + fpexc = fmrx(FPEXC); + +#ifdef CONFIG_SMP + cpu = thread->cpu; + + /* + * On SMP, if VFP is enabled, save the old state in + * case the thread migrates to a different CPU. The + * restoring is done lazily. + */ + if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) + vfp_save_state(vfp_current_hw_state[cpu], fpexc); +#endif + + /* + * Always disable VFP so we can lazily save/restore the + * old state. + */ + fmxr(FPEXC, fpexc & ~FPEXC_EN); + break; + + case THREAD_NOTIFY_FLUSH: + vfp_thread_flush(thread); + break; + + case THREAD_NOTIFY_EXIT: + vfp_thread_exit(thread); + break; + + case THREAD_NOTIFY_COPY: + vfp_thread_copy(thread); + break; + } + + return NOTIFY_DONE; } +static struct notifier_block vfp_notifier_block = { + .notifier_call = vfp_notifier, +}; + /* * Raise a SIGFPE for the current process. * sicode describes the signal being raised. */ -void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) +static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) { siginfo_t info; @@ -80,7 +226,7 @@ void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) info.si_signo = SIGFPE; info.si_code = sicode; - info.si_addr = (void *)(instruction_pointer(regs) - 4); + info.si_addr = (void __user *)(instruction_pointer(regs) - 4); /* * This is the same as NWFPE, because it's not clear what @@ -92,15 +238,15 @@ void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) send_sig_info(SIGFPE, &info, current); } -static void vfp_panic(char *reason) +static void vfp_panic(char *reason, u32 inst) { int i; - printk(KERN_ERR "VFP: Error: %s\n", reason); - printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n", - fmrx(FPEXC), fmrx(FPSCR), fmrx(FPINST)); + pr_err("VFP: Error: %s\n", reason); + pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n", + fmrx(FPEXC), fmrx(FPSCR), inst); for (i = 0; i < 32; i += 2) - printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n", + pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n", i, vfp_get_float(i), i+1, vfp_get_float(i+1)); } @@ -113,8 +259,8 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_ pr_debug("VFP: raising exceptions %08x\n", exceptions); - if (exceptions == (u32)-1) { - vfp_panic("unhandled bounce"); + if (exceptions == VFP_EXCEPTION_ERROR) { + vfp_panic("unhandled bounce", inst); vfp_raise_sigfpe(0, regs); return; } @@ -138,6 +284,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_ /* * These are arranged in priority order, least to highest. */ + RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV); RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES); RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND); RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF); @@ -152,7 +299,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_ */ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) { - u32 exceptions = (u32)-1; + u32 exceptions = VFP_EXCEPTION_ERROR; pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr); @@ -180,41 +327,72 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) * emulate it. */ } - return exceptions; + return exceptions & ~VFP_NAN_FLAG; } /* * Package up a bounce condition. */ -void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) +void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) { - u32 fpscr, orig_fpscr, exceptions, inst; + u32 fpscr, orig_fpscr, fpsid, exceptions; pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); /* - * Enable access to the VFP so we can handle the bounce. + * At this point, FPEXC can have the following configuration: + * + * EX DEX IXE + * 0 1 x - synchronous exception + * 1 x 0 - asynchronous exception + * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later + * 0 0 1 - synchronous on VFP9 (non-standard subarch 1 + * implementation), undefined otherwise + * + * Clear various bits and enable access to the VFP so we can + * handle the bounce. */ - fmxr(FPEXC, fpexc & ~(FPEXC_EXCEPTION|FPEXC_INV|FPEXC_UFC|FPEXC_IOC)); + fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK)); + fpsid = fmrx(FPSID); orig_fpscr = fpscr = fmrx(FPSCR); /* - * If we are running with inexact exceptions enabled, we need to - * emulate the trigger instruction. Note that as we're emulating - * the trigger instruction, we need to increment PC. + * Check for the special VFP subarch 1 and FPSCR.IXE bit case */ - if (fpscr & FPSCR_IXE) { - regs->ARM_pc += 4; + if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT) + && (fpscr & FPSCR_IXE)) { + /* + * Synchronous exception, emulate the trigger instruction + */ goto emulate; } - barrier(); + if (fpexc & FPEXC_EX) { +#ifndef CONFIG_CPU_FEROCEON + /* + * Asynchronous exception. The instruction is read from FPINST + * and the interrupted instruction has to be restarted. + */ + trigger = fmrx(FPINST); + regs->ARM_pc -= 4; +#endif + } else if (!(fpexc & FPEXC_DEX)) { + /* + * Illegal combination of bits. It can be caused by an + * unallocated VFP instruction but with FPSCR.IXE set and not + * on VFP subarch 1. + */ + vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs); + goto exit; + } /* - * Modify fpscr to indicate the number of iterations remaining + * Modify fpscr to indicate the number of iterations remaining. + * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates + * whether FPEXC.VECITR or FPSCR.LEN is used. */ - if (fpexc & FPEXC_EXCEPTION) { + if (fpexc & (FPEXC_EX | FPEXC_VV)) { u32 len; len = fpexc + (1 << FPEXC_LENGTH_BIT); @@ -228,16 +406,16 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) * FPEXC bounce reason, but this appears to be unreliable. * Emulate the bounced instruction instead. */ - inst = fmrx(FPINST); - exceptions = vfp_emulate_instruction(inst, fpscr, regs); + exceptions = vfp_emulate_instruction(trigger, fpscr, regs); if (exceptions) - vfp_raise_exceptions(exceptions, inst, orig_fpscr, regs); + vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); /* - * If there isn't a second FP instruction, exit now. + * If there isn't a second FP instruction, exit now. Note that + * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. */ - if (!(fpexc & FPEXC_FPV2)) - return; + if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V)) + goto exit; /* * The barrier() here prevents fpinst2 being read @@ -245,44 +423,379 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) */ barrier(); trigger = fmrx(FPINST2); - fpscr = fmrx(FPSCR); emulate: - exceptions = vfp_emulate_instruction(trigger, fpscr, regs); + exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); if (exceptions) vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); + exit: + preempt_enable(); } - + +static void vfp_enable(void *unused) +{ + u32 access; + + BUG_ON(preemptible()); + access = get_copro_access(); + + /* + * Enable full access to VFP (cp10 and cp11) + */ + set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); +} + +#ifdef CONFIG_CPU_PM +static int vfp_pm_suspend(void) +{ + struct thread_info *ti = current_thread_info(); + u32 fpexc = fmrx(FPEXC); + + /* if vfp is on, then save state for resumption */ + if (fpexc & FPEXC_EN) { + pr_debug("%s: saving vfp state\n", __func__); + vfp_save_state(&ti->vfpstate, fpexc); + + /* disable, just in case */ + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + } else if (vfp_current_hw_state[ti->cpu]) { +#ifndef CONFIG_SMP + fmxr(FPEXC, fpexc | FPEXC_EN); + vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc); + fmxr(FPEXC, fpexc); +#endif + } + + /* clear any information we had about last context state */ + vfp_current_hw_state[ti->cpu] = NULL; + + return 0; +} + +static void vfp_pm_resume(void) +{ + /* ensure we have access to the vfp */ + vfp_enable(NULL); + + /* and disable it to ensure the next usage restores the state */ + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); +} + +static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, + void *v) +{ + switch (cmd) { + case CPU_PM_ENTER: + vfp_pm_suspend(); + break; + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + vfp_pm_resume(); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block vfp_cpu_pm_notifier_block = { + .notifier_call = vfp_cpu_pm_notifier, +}; + +static void vfp_pm_init(void) +{ + cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block); +} + +#else +static inline void vfp_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + +/* + * Ensure that the VFP state stored in 'thread->vfpstate' is up to date + * with the hardware state. + */ +void vfp_sync_hwstate(struct thread_info *thread) +{ + unsigned int cpu = get_cpu(); + + if (vfp_state_in_hw(cpu, thread)) { + u32 fpexc = fmrx(FPEXC); + + /* + * Save the last VFP state on this CPU. + */ + fmxr(FPEXC, fpexc | FPEXC_EN); + vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); + fmxr(FPEXC, fpexc); + } + + put_cpu(); +} + +/* Ensure that the thread reloads the hardware VFP state on the next use. */ +void vfp_flush_hwstate(struct thread_info *thread) +{ + unsigned int cpu = get_cpu(); + + vfp_force_reload(cpu, thread); + + put_cpu(); +} + +/* + * Save the current VFP state into the provided structures and prepare + * for entry into a new function (signal handler). + */ +int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, + struct user_vfp_exc __user *ufp_exc) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; + int err = 0; + + /* Ensure that the saved hwstate is up-to-date. */ + vfp_sync_hwstate(thread); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs, + sizeof(hwstate->fpregs)); + /* + * Copy the status and control register. + */ + __put_user_error(hwstate->fpscr, &ufp->fpscr, err); + + /* + * Copy the exception registers. + */ + __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err); + __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); + __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); + + if (err) + return -EFAULT; + + /* Ensure that VFP is disabled. */ + vfp_flush_hwstate(thread); + + /* + * As per the PCS, clear the length and stride bits for function + * entry. + */ + hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK); + return 0; +} + +/* Sanitise and restore the current VFP state from the provided structures. */ +int vfp_restore_user_hwstate(struct user_vfp __user *ufp, + struct user_vfp_exc __user *ufp_exc) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; + unsigned long fpexc; + int err = 0; + + /* Disable VFP to avoid corrupting the new thread state. */ + vfp_flush_hwstate(thread); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs, + sizeof(hwstate->fpregs)); + /* + * Copy the status and control register. + */ + __get_user_error(hwstate->fpscr, &ufp->fpscr, err); + + /* + * Sanitise and restore the exception registers. + */ + __get_user_error(fpexc, &ufp_exc->fpexc, err); + + /* Ensure the VFP is enabled. */ + fpexc |= FPEXC_EN; + + /* Ensure FPINST2 is invalid and the exception flag is cleared. */ + fpexc &= ~(FPEXC_EX | FPEXC_FP2V); + hwstate->fpexc = fpexc; + + __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); + __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); + + return err ? -EFAULT : 0; +} + +/* + * VFP hardware can lose all context when a CPU goes offline. + * As we will be running in SMP mode with CPU hotplug, we will save the + * hardware state at every thread switch. We clear our held state when + * a CPU has been killed, indicating that the VFP hardware doesn't contain + * a threads VFP state. When a CPU starts up, we re-enable access to the + * VFP hardware. + * + * Both CPU_DYING and CPU_STARTING are called on the CPU which + * is being offlined/onlined. + */ +static int vfp_hotplug(struct notifier_block *b, unsigned long action, + void *hcpu) +{ + if (action == CPU_DYING || action == CPU_DYING_FROZEN) + vfp_current_hw_state[(long)hcpu] = NULL; + else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + vfp_enable(NULL); + return NOTIFY_OK; +} + +void vfp_kmode_exception(void) +{ + /* + * If we reach this point, a floating point exception has been raised + * while running in kernel mode. If the NEON/VFP unit was enabled at the + * time, it means a VFP instruction has been issued that requires + * software assistance to complete, something which is not currently + * supported in kernel mode. + * If the NEON/VFP unit was disabled, and the location pointed to below + * is properly preceded by a call to kernel_neon_begin(), something has + * caused the task to be scheduled out and back in again. In this case, + * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should + * be helpful in localizing the problem. + */ + if (fmrx(FPEXC) & FPEXC_EN) + pr_crit("BUG: unsupported FP instruction in kernel mode\n"); + else + pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n"); +} + +#ifdef CONFIG_KERNEL_MODE_NEON + +/* + * Kernel-side NEON support functions + */ +void kernel_neon_begin(void) +{ + struct thread_info *thread = current_thread_info(); + unsigned int cpu; + u32 fpexc; + + /* + * Kernel mode NEON is only allowed outside of interrupt context + * with preemption disabled. This will make sure that the kernel + * mode NEON register contents never need to be preserved. + */ + BUG_ON(in_interrupt()); + cpu = get_cpu(); + + fpexc = fmrx(FPEXC) | FPEXC_EN; + fmxr(FPEXC, fpexc); + + /* + * Save the userland NEON/VFP state. Under UP, + * the owner could be a task other than 'current' + */ + if (vfp_state_in_hw(cpu, thread)) + vfp_save_state(&thread->vfpstate, fpexc); +#ifndef CONFIG_SMP + else if (vfp_current_hw_state[cpu] != NULL) + vfp_save_state(vfp_current_hw_state[cpu], fpexc); +#endif + vfp_current_hw_state[cpu] = NULL; +} +EXPORT_SYMBOL(kernel_neon_begin); + +void kernel_neon_end(void) +{ + /* Disable the NEON/VFP unit. */ + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + put_cpu(); +} +EXPORT_SYMBOL(kernel_neon_end); + +#endif /* CONFIG_KERNEL_MODE_NEON */ + /* * VFP support code initialisation. */ static int __init vfp_init(void) { unsigned int vfpsid; + unsigned int cpu_arch = cpu_architecture(); + + if (cpu_arch >= CPU_ARCH_ARMv6) + on_each_cpu(vfp_enable, NULL, 1); /* * First check that there is a VFP that we can use. * The handler is already setup to just log calls, so * we just need to read the VFPSID register. */ + vfp_vector = vfp_testing_entry; + barrier(); vfpsid = fmrx(FPSID); + barrier(); + vfp_vector = vfp_null_entry; - printk(KERN_INFO "VFP support v0.3: "); - if (VFP_arch) { - printk("not present\n"); - } else if (vfpsid & FPSID_NODOUBLE) { - printk("no double precision support\n"); + pr_info("VFP support v0.3: "); + if (VFP_arch) + pr_cont("not present\n"); + else if (vfpsid & FPSID_NODOUBLE) { + pr_cont("no double precision support\n"); } else { + hotcpu_notifier(vfp_hotplug, 0); + VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ - printk("implementor %02x architecture %d part %02x variant %x rev %x\n", + pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n", (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); + vfp_vector = vfp_support_entry; + + thread_register_notifier(&vfp_notifier_block); + vfp_pm_init(); + + /* + * We detected VFP, and the support code is + * in place; report VFP support to userspace. + */ + elf_hwcap |= HWCAP_VFP; +#ifdef CONFIG_VFPv3 + if (VFP_arch >= 2) { + elf_hwcap |= HWCAP_VFPv3; + + /* + * Check for VFPv3 D16 and VFPv4 D16. CPUs in + * this configuration only have 16 x 64bit + * registers. + */ + if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) + elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */ + else + elf_hwcap |= HWCAP_VFPD32; + } +#endif + /* + * Check for the presence of the Advanced SIMD + * load/store instructions, integer and single + * precision floating point operations. Only check + * for NEON if the hardware has the MVFR registers. + */ + if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { +#ifdef CONFIG_NEON + if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) + elf_hwcap |= HWCAP_NEON; +#endif +#ifdef CONFIG_VFPv3 + if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) + elf_hwcap |= HWCAP_VFPv4; +#endif + } } return 0; } -late_initcall(vfp_init); +core_initcall(vfp_init); diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c index 14dd696ddeb..4f96c1617aa 100644 --- a/arch/arm/vfp/vfpsingle.c +++ b/arch/arm/vfp/vfpsingle.c @@ -34,7 +34,6 @@ #include <linux/bitops.h> #include <asm/div64.h> -#include <asm/ptrace.h> #include <asm/vfp.h> #include "vfpinstr.h" @@ -198,12 +197,14 @@ u32 vfp_single_normaliseround(int sd, struct vfp_single *vs, u32 fpscr, u32 exce vfp_single_dump("pack: final", vs); { s32 d = vfp_single_pack(vs); +#ifdef DEBUG pr_debug("VFP: %s: d(s%d)=%08x exceptions=%08x\n", func, sd, d, exceptions); - vfp_put_float(sd, d); +#endif + vfp_put_float(d, sd); } - return exceptions & ~VFP_NAN_FLAG; + return exceptions; } /* @@ -257,19 +258,19 @@ vfp_propagate_nan(struct vfp_single *vsd, struct vfp_single *vsn, */ static u32 vfp_single_fabs(int sd, int unused, s32 m, u32 fpscr) { - vfp_put_float(sd, vfp_single_packed_abs(m)); + vfp_put_float(vfp_single_packed_abs(m), sd); return 0; } static u32 vfp_single_fcpy(int sd, int unused, s32 m, u32 fpscr) { - vfp_put_float(sd, m); + vfp_put_float(m, sd); return 0; } static u32 vfp_single_fneg(int sd, int unused, s32 m, u32 fpscr) { - vfp_put_float(sd, vfp_single_packed_negate(m)); + vfp_put_float(vfp_single_packed_negate(m), sd); return 0; } @@ -333,7 +334,7 @@ static u32 vfp_single_fsqrt(int sd, int unused, s32 m, u32 fpscr) vsp = &vfp_single_default_qnan; ret = FPSCR_IOC; } - vfp_put_float(sd, vfp_single_pack(vsp)); + vfp_put_float(vfp_single_pack(vsp), sd); return ret; } @@ -506,7 +507,7 @@ static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr) */ if (tm & (VFP_INFINITY|VFP_NAN)) { vdd.exponent = 2047; - if (tm & VFP_NAN) + if (tm == VFP_QNAN) vdd.significand |= VFP_DOUBLE_SIGNIFICAND_QNAN; goto pack_nan; } else if (tm & VFP_ZERO) @@ -514,14 +515,10 @@ static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr) else vdd.exponent = vsm.exponent + (1023 - 127); - /* - * Technically, if bit 0 of dd is set, this is an invalid - * instruction. However, we ignore this for efficiency. - */ return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fcvtd"); pack_nan: - vfp_put_double(dd, vfp_double_pack(&vdd)); + vfp_put_double(vfp_double_pack(&vdd), dd); return exceptions; } @@ -617,7 +614,7 @@ static u32 vfp_single_ftoui(int sd, int unused, s32 m, u32 fpscr) pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions); - vfp_put_float(sd, d); + vfp_put_float(d, sd); return exceptions; } @@ -632,6 +629,7 @@ static u32 vfp_single_ftosi(int sd, int unused, s32 m, u32 fpscr) struct vfp_single vsm; u32 d, exceptions = 0; int rmode = fpscr & FPSCR_RMODE_MASK; + int tm; vfp_single_unpack(&vsm, m); vfp_single_dump("VSM", &vsm); @@ -639,10 +637,14 @@ static u32 vfp_single_ftosi(int sd, int unused, s32 m, u32 fpscr) /* * Do we have a denormalised number? */ + tm = vfp_single_type(&vsm); if (vfp_single_type(&vsm) & VFP_DENORMAL) exceptions |= FPSCR_IDC; - if (vsm.exponent >= 127 + 32) { + if (tm & VFP_NAN) { + d = 0; + exceptions |= FPSCR_IOC; + } else if (vsm.exponent >= 127 + 32) { /* * m >= 2^31-2^7: invalid */ @@ -691,7 +693,7 @@ static u32 vfp_single_ftosi(int sd, int unused, s32 m, u32 fpscr) pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions); - vfp_put_float(sd, (s32)d); + vfp_put_float((s32)d, sd); return exceptions; } @@ -701,22 +703,22 @@ static u32 vfp_single_ftosiz(int sd, int unused, s32 m, u32 fpscr) return vfp_single_ftosi(sd, unused, m, FPSCR_ROUND_TOZERO); } -static u32 (* const fop_extfns[32])(int sd, int unused, s32 m, u32 fpscr) = { - [FEXT_TO_IDX(FEXT_FCPY)] = vfp_single_fcpy, - [FEXT_TO_IDX(FEXT_FABS)] = vfp_single_fabs, - [FEXT_TO_IDX(FEXT_FNEG)] = vfp_single_fneg, - [FEXT_TO_IDX(FEXT_FSQRT)] = vfp_single_fsqrt, - [FEXT_TO_IDX(FEXT_FCMP)] = vfp_single_fcmp, - [FEXT_TO_IDX(FEXT_FCMPE)] = vfp_single_fcmpe, - [FEXT_TO_IDX(FEXT_FCMPZ)] = vfp_single_fcmpz, - [FEXT_TO_IDX(FEXT_FCMPEZ)] = vfp_single_fcmpez, - [FEXT_TO_IDX(FEXT_FCVT)] = vfp_single_fcvtd, - [FEXT_TO_IDX(FEXT_FUITO)] = vfp_single_fuito, - [FEXT_TO_IDX(FEXT_FSITO)] = vfp_single_fsito, - [FEXT_TO_IDX(FEXT_FTOUI)] = vfp_single_ftoui, - [FEXT_TO_IDX(FEXT_FTOUIZ)] = vfp_single_ftouiz, - [FEXT_TO_IDX(FEXT_FTOSI)] = vfp_single_ftosi, - [FEXT_TO_IDX(FEXT_FTOSIZ)] = vfp_single_ftosiz, +static struct op fops_ext[32] = { + [FEXT_TO_IDX(FEXT_FCPY)] = { vfp_single_fcpy, 0 }, + [FEXT_TO_IDX(FEXT_FABS)] = { vfp_single_fabs, 0 }, + [FEXT_TO_IDX(FEXT_FNEG)] = { vfp_single_fneg, 0 }, + [FEXT_TO_IDX(FEXT_FSQRT)] = { vfp_single_fsqrt, 0 }, + [FEXT_TO_IDX(FEXT_FCMP)] = { vfp_single_fcmp, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FCMPE)] = { vfp_single_fcmpe, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FCMPZ)] = { vfp_single_fcmpz, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FCMPEZ)] = { vfp_single_fcmpez, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FCVT)] = { vfp_single_fcvtd, OP_SCALAR|OP_DD }, + [FEXT_TO_IDX(FEXT_FUITO)] = { vfp_single_fuito, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FSITO)] = { vfp_single_fsito, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FTOUI)] = { vfp_single_ftoui, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FTOUIZ)] = { vfp_single_ftouiz, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FTOSI)] = { vfp_single_ftosi, OP_SCALAR }, + [FEXT_TO_IDX(FEXT_FTOSIZ)] = { vfp_single_ftosiz, OP_SCALAR }, }; @@ -913,6 +915,8 @@ vfp_single_multiply_accumulate(int sd, int sn, s32 m, u32 fpscr, u32 negate, cha v = vfp_get_float(sd); pr_debug("VFP: s%u = %08x\n", sd, v); vfp_single_unpack(&vsn, v); + if (vsn.exponent == 0 && vsn.significand) + vfp_single_normalise_denormal(&vsn); if (negate & NEG_SUBTRACT) vsn.sign = vfp_sign_negate(vsn.sign); @@ -1126,7 +1130,7 @@ static u32 vfp_single_fdiv(int sd, int sn, s32 m, u32 fpscr) vsn_nan: exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr); pack: - vfp_put_float(sd, vfp_single_pack(&vsd)); + vfp_put_float(vfp_single_pack(&vsd), sd); return exceptions; vsm_nan: @@ -1146,20 +1150,20 @@ static u32 vfp_single_fdiv(int sd, int sn, s32 m, u32 fpscr) goto pack; invalid: - vfp_put_float(sd, vfp_single_pack(&vfp_single_default_qnan)); + vfp_put_float(vfp_single_pack(&vfp_single_default_qnan), sd); return FPSCR_IOC; } -static u32 (* const fop_fns[16])(int sd, int sn, s32 m, u32 fpscr) = { - [FOP_TO_IDX(FOP_FMAC)] = vfp_single_fmac, - [FOP_TO_IDX(FOP_FNMAC)] = vfp_single_fnmac, - [FOP_TO_IDX(FOP_FMSC)] = vfp_single_fmsc, - [FOP_TO_IDX(FOP_FNMSC)] = vfp_single_fnmsc, - [FOP_TO_IDX(FOP_FMUL)] = vfp_single_fmul, - [FOP_TO_IDX(FOP_FNMUL)] = vfp_single_fnmul, - [FOP_TO_IDX(FOP_FADD)] = vfp_single_fadd, - [FOP_TO_IDX(FOP_FSUB)] = vfp_single_fsub, - [FOP_TO_IDX(FOP_FDIV)] = vfp_single_fdiv, +static struct op fops[16] = { + [FOP_TO_IDX(FOP_FMAC)] = { vfp_single_fmac, 0 }, + [FOP_TO_IDX(FOP_FNMAC)] = { vfp_single_fnmac, 0 }, + [FOP_TO_IDX(FOP_FMSC)] = { vfp_single_fmsc, 0 }, + [FOP_TO_IDX(FOP_FNMSC)] = { vfp_single_fnmsc, 0 }, + [FOP_TO_IDX(FOP_FMUL)] = { vfp_single_fmul, 0 }, + [FOP_TO_IDX(FOP_FNMUL)] = { vfp_single_fnmul, 0 }, + [FOP_TO_IDX(FOP_FADD)] = { vfp_single_fadd, 0 }, + [FOP_TO_IDX(FOP_FSUB)] = { vfp_single_fsub, 0 }, + [FOP_TO_IDX(FOP_FDIV)] = { vfp_single_fdiv, 0 }, }; #define FREG_BANK(x) ((x) & 0x18) @@ -1169,60 +1173,68 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr) { u32 op = inst & FOP_MASK; u32 exceptions = 0; - unsigned int sd = vfp_get_sd(inst); + unsigned int dest; unsigned int sn = vfp_get_sn(inst); unsigned int sm = vfp_get_sm(inst); unsigned int vecitr, veclen, vecstride; - u32 (*fop)(int, int, s32, u32); + struct op *fop; - veclen = fpscr & FPSCR_LENGTH_MASK; vecstride = 1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK); + fop = (op == FOP_EXT) ? &fops_ext[FEXT_TO_IDX(inst)] : &fops[FOP_TO_IDX(op)]; + + /* + * fcvtsd takes a dN register number as destination, not sN. + * Technically, if bit 0 of dd is set, this is an invalid + * instruction. However, we ignore this for efficiency. + * It also only operates on scalars. + */ + if (fop->flags & OP_DD) + dest = vfp_get_dd(inst); + else + dest = vfp_get_sd(inst); + /* * If destination bank is zero, vector length is always '1'. * ARM DDI0100F C5.1.3, C5.3.2. */ - if (FREG_BANK(sd) == 0) + if ((fop->flags & OP_SCALAR) || FREG_BANK(dest) == 0) veclen = 0; + else + veclen = fpscr & FPSCR_LENGTH_MASK; pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride, (veclen >> FPSCR_LENGTH_BIT) + 1); - fop = (op == FOP_EXT) ? fop_extfns[sn] : fop_fns[FOP_TO_IDX(op)]; - if (!fop) + if (!fop->fn) goto invalid; for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) { s32 m = vfp_get_float(sm); u32 except; + char type; + type = fop->flags & OP_DD ? 'd' : 's'; if (op == FOP_EXT) - pr_debug("VFP: itr%d (s%u) = op[%u] (s%u=%08x)\n", - vecitr >> FPSCR_LENGTH_BIT, sd, sn, sm, m); + pr_debug("VFP: itr%d (%c%u) = op[%u] (s%u=%08x)\n", + vecitr >> FPSCR_LENGTH_BIT, type, dest, sn, + sm, m); else - pr_debug("VFP: itr%d (s%u) = (s%u) op[%u] (s%u=%08x)\n", - vecitr >> FPSCR_LENGTH_BIT, sd, sn, + pr_debug("VFP: itr%d (%c%u) = (s%u) op[%u] (s%u=%08x)\n", + vecitr >> FPSCR_LENGTH_BIT, type, dest, sn, FOP_TO_IDX(op), sm, m); - except = fop(sd, sn, m, fpscr); + except = fop->fn(dest, sn, m, fpscr); pr_debug("VFP: itr%d: exceptions=%08x\n", vecitr >> FPSCR_LENGTH_BIT, except); exceptions |= except; /* - * This ensures that comparisons only operate on scalars; - * comparisons always return with one FPSCR status bit set. - */ - if (except & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) - break; - - /* * CHECK: It appears to be undefined whether we stop when * we encounter an exception. We continue. */ - - sd = FREG_BANK(sd) + ((FREG_IDX(sd) + vecstride) & 7); + dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 7); sn = FREG_BANK(sn) + ((FREG_IDX(sn) + vecstride) & 7); if (FREG_BANK(sm) != 0) sm = FREG_BANK(sm) + ((FREG_IDX(sm) + vecstride) & 7); |
