aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/vfp
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r--arch/arm/vfp/vfp.h18
-rw-r--r--arch/arm/vfp/vfpdouble.c50
-rw-r--r--arch/arm/vfp/vfphw.S10
-rw-r--r--arch/arm/vfp/vfpmodule.c4
-rw-r--r--arch/arm/vfp/vfpsingle.c55
5 files changed, 82 insertions, 55 deletions
diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h
index 4b97950984e..96fdf30f6a3 100644
--- a/arch/arm/vfp/vfp.h
+++ b/arch/arm/vfp/vfp.h
@@ -156,7 +156,7 @@ struct vfp_single {
};
extern s32 vfp_get_float(unsigned int reg);
-extern void vfp_put_float(unsigned int reg, s32 val);
+extern void vfp_put_float(s32 val, unsigned int reg);
/*
* VFP_SINGLE_MANTISSA_BITS - number of bits in the mantissa
@@ -267,7 +267,7 @@ struct vfp_double {
*/
#define VFP_REG_ZERO 16
extern u64 vfp_get_double(unsigned int reg);
-extern void vfp_put_double(unsigned int reg, u64 val);
+extern void vfp_put_double(u64 val, unsigned int reg);
#define VFP_DOUBLE_MANTISSA_BITS (52)
#define VFP_DOUBLE_EXPONENT_BITS (11)
@@ -341,15 +341,17 @@ static inline int vfp_double_type(struct vfp_double *s)
u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func);
-/*
- * System registers
- */
-extern u32 vfp_get_sys(unsigned int reg);
-extern void vfp_put_sys(unsigned int reg, u32 val);
-
u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand);
/*
* A special flag to tell the normalisation code not to normalise.
*/
#define VFP_NAN_FLAG 0x100
+
+/*
+ * A bit pattern used to indicate the initial (unset) value of the
+ * exception mask, in case nothing handles an instruction. This
+ * doesn't include the NAN flag, which get masked out before
+ * we check for an error.
+ */
+#define VFP_EXCEPTION_ERROR ((u32)-1 & ~VFP_NAN_FLAG)
diff --git a/arch/arm/vfp/vfpdouble.c b/arch/arm/vfp/vfpdouble.c
index 009038c8113..add48e36c2d 100644
--- a/arch/arm/vfp/vfpdouble.c
+++ b/arch/arm/vfp/vfpdouble.c
@@ -195,7 +195,7 @@ u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exce
s64 d = vfp_double_pack(vd);
pr_debug("VFP: %s: d(d%d)=%016llx exceptions=%08x\n", func,
dd, d, exceptions);
- vfp_put_double(dd, d);
+ vfp_put_double(d, dd);
}
return exceptions;
}
@@ -250,19 +250,19 @@ vfp_propagate_nan(struct vfp_double *vdd, struct vfp_double *vdn,
*/
static u32 vfp_double_fabs(int dd, int unused, int dm, u32 fpscr)
{
- vfp_put_double(dd, vfp_double_packed_abs(vfp_get_double(dm)));
+ vfp_put_double(vfp_double_packed_abs(vfp_get_double(dm)), dd);
return 0;
}
static u32 vfp_double_fcpy(int dd, int unused, int dm, u32 fpscr)
{
- vfp_put_double(dd, vfp_get_double(dm));
+ vfp_put_double(vfp_get_double(dm), dd);
return 0;
}
static u32 vfp_double_fneg(int dd, int unused, int dm, u32 fpscr)
{
- vfp_put_double(dd, vfp_double_packed_negate(vfp_get_double(dm)));
+ vfp_put_double(vfp_double_packed_negate(vfp_get_double(dm)), dd);
return 0;
}
@@ -287,7 +287,7 @@ static u32 vfp_double_fsqrt(int dd, int unused, int dm, u32 fpscr)
vdp = &vfp_double_default_qnan;
ret = FPSCR_IOC;
}
- vfp_put_double(dd, vfp_double_pack(vdp));
+ vfp_put_double(vfp_double_pack(vdp), dd);
return ret;
}
@@ -465,7 +465,7 @@ static u32 vfp_double_fcvts(int sd, int unused, int dm, u32 fpscr)
*/
if (tm & (VFP_INFINITY|VFP_NAN)) {
vsd.exponent = 255;
- if (tm & VFP_NAN)
+ if (tm == VFP_QNAN)
vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN;
goto pack_nan;
} else if (tm & VFP_ZERO)
@@ -476,7 +476,7 @@ static u32 vfp_double_fcvts(int sd, int unused, int dm, u32 fpscr)
return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fcvts");
pack_nan:
- vfp_put_float(sd, vfp_single_pack(&vsd));
+ vfp_put_float(vfp_single_pack(&vsd), sd);
return exceptions;
}
@@ -573,7 +573,7 @@ static u32 vfp_double_ftoui(int sd, int unused, int dm, u32 fpscr)
pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
- vfp_put_float(sd, d);
+ vfp_put_float(d, sd);
return exceptions;
}
@@ -648,7 +648,7 @@ static u32 vfp_double_ftosi(int sd, int unused, int dm, u32 fpscr)
pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
- vfp_put_float(sd, (s32)d);
+ vfp_put_float((s32)d, sd);
return exceptions;
}
@@ -1084,7 +1084,7 @@ static u32 vfp_double_fdiv(int dd, int dn, int dm, u32 fpscr)
vdn_nan:
exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr);
pack:
- vfp_put_double(dd, vfp_double_pack(&vdd));
+ vfp_put_double(vfp_double_pack(&vdd), dd);
return exceptions;
vdm_nan:
@@ -1104,7 +1104,7 @@ static u32 vfp_double_fdiv(int dd, int dn, int dm, u32 fpscr)
goto pack;
invalid:
- vfp_put_double(dd, vfp_double_pack(&vfp_double_default_qnan));
+ vfp_put_double(vfp_double_pack(&vfp_double_default_qnan), dd);
return FPSCR_IOC;
}
@@ -1127,7 +1127,7 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
{
u32 op = inst & FOP_MASK;
u32 exceptions = 0;
- unsigned int dd = vfp_get_dd(inst);
+ unsigned int dest;
unsigned int dn = vfp_get_dn(inst);
unsigned int dm = vfp_get_dm(inst);
unsigned int vecitr, veclen, vecstride;
@@ -1137,10 +1137,20 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK)) * 2;
/*
+ * fcvtds takes an sN register number as destination, not dN.
+ * It also always operates on scalars.
+ */
+ if ((inst & FEXT_MASK) == FEXT_FCVT) {
+ veclen = 0;
+ dest = vfp_get_sd(inst);
+ } else
+ dest = vfp_get_dd(inst);
+
+ /*
* If destination bank is zero, vector length is always '1'.
* ARM DDI0100F C5.1.3, C5.3.2.
*/
- if (FREG_BANK(dd) == 0)
+ if (FREG_BANK(dest) == 0)
veclen = 0;
pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
@@ -1153,16 +1163,20 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) {
u32 except;
- if (op == FOP_EXT)
+ if (op == FOP_EXT && (inst & FEXT_MASK) == FEXT_FCVT)
+ pr_debug("VFP: itr%d (s%u) = op[%u] (d%u)\n",
+ vecitr >> FPSCR_LENGTH_BIT,
+ dest, dn, dm);
+ else if (op == FOP_EXT)
pr_debug("VFP: itr%d (d%u) = op[%u] (d%u)\n",
vecitr >> FPSCR_LENGTH_BIT,
- dd, dn, dm);
+ dest, dn, dm);
else
pr_debug("VFP: itr%d (d%u) = (d%u) op[%u] (d%u)\n",
vecitr >> FPSCR_LENGTH_BIT,
- dd, dn, FOP_TO_IDX(op), dm);
+ dest, dn, FOP_TO_IDX(op), dm);
- except = fop(dd, dn, dm, fpscr);
+ except = fop(dest, dn, dm, fpscr);
pr_debug("VFP: itr%d: exceptions=%08x\n",
vecitr >> FPSCR_LENGTH_BIT, except);
@@ -1180,7 +1194,7 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
* we encounter an exception. We continue.
*/
- dd = FREG_BANK(dd) + ((FREG_IDX(dd) + vecstride) & 6);
+ dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 6);
dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 6);
if (FREG_BANK(dm) != 0)
dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 6);
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index eb683cd7716..e51e6679c40 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -178,12 +178,12 @@ vfp_get_float:
.globl vfp_put_float
vfp_put_float:
- add pc, pc, r0, lsl #3
+ add pc, pc, r1, lsl #3
mov r0, r0
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- mcr p10, 0, r1, c\dr, c0, 0 @ fmsr r0, s0
+ mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0
mov pc, lr
- mcr p10, 0, r1, c\dr, c0, 4 @ fmsr r0, s1
+ mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1
mov pc, lr
.endr
@@ -203,9 +203,9 @@ vfp_get_double:
.globl vfp_put_double
vfp_put_double:
- add pc, pc, r0, lsl #3
+ add pc, pc, r2, lsl #3
mov r0, r0
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- fmdrr d\dr, r1, r2
+ fmdrr d\dr, r0, r1
mov pc, lr
.endr
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 9d265d5e748..4178f6cc3d3 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -131,7 +131,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
pr_debug("VFP: raising exceptions %08x\n", exceptions);
- if (exceptions == (u32)-1) {
+ if (exceptions == VFP_EXCEPTION_ERROR) {
vfp_panic("unhandled bounce");
vfp_raise_sigfpe(0, regs);
return;
@@ -170,7 +170,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
*/
static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
{
- u32 exceptions = (u32)-1;
+ u32 exceptions = VFP_EXCEPTION_ERROR;
pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c
index dae2c2f4605..8f6c179cafb 100644
--- a/arch/arm/vfp/vfpsingle.c
+++ b/arch/arm/vfp/vfpsingle.c
@@ -200,7 +200,7 @@ u32 vfp_single_normaliseround(int sd, struct vfp_single *vs, u32 fpscr, u32 exce
s32 d = vfp_single_pack(vs);
pr_debug("VFP: %s: d(s%d)=%08x exceptions=%08x\n", func,
sd, d, exceptions);
- vfp_put_float(sd, d);
+ vfp_put_float(d, sd);
}
return exceptions;
@@ -257,19 +257,19 @@ vfp_propagate_nan(struct vfp_single *vsd, struct vfp_single *vsn,
*/
static u32 vfp_single_fabs(int sd, int unused, s32 m, u32 fpscr)
{
- vfp_put_float(sd, vfp_single_packed_abs(m));
+ vfp_put_float(vfp_single_packed_abs(m), sd);
return 0;
}
static u32 vfp_single_fcpy(int sd, int unused, s32 m, u32 fpscr)
{
- vfp_put_float(sd, m);
+ vfp_put_float(m, sd);
return 0;
}
static u32 vfp_single_fneg(int sd, int unused, s32 m, u32 fpscr)
{
- vfp_put_float(sd, vfp_single_packed_negate(m));
+ vfp_put_float(vfp_single_packed_negate(m), sd);
return 0;
}
@@ -333,7 +333,7 @@ static u32 vfp_single_fsqrt(int sd, int unused, s32 m, u32 fpscr)
vsp = &vfp_single_default_qnan;
ret = FPSCR_IOC;
}
- vfp_put_float(sd, vfp_single_pack(vsp));
+ vfp_put_float(vfp_single_pack(vsp), sd);
return ret;
}
@@ -506,7 +506,7 @@ static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr)
*/
if (tm & (VFP_INFINITY|VFP_NAN)) {
vdd.exponent = 2047;
- if (tm & VFP_NAN)
+ if (tm == VFP_QNAN)
vdd.significand |= VFP_DOUBLE_SIGNIFICAND_QNAN;
goto pack_nan;
} else if (tm & VFP_ZERO)
@@ -514,14 +514,10 @@ static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr)
else
vdd.exponent = vsm.exponent + (1023 - 127);
- /*
- * Technically, if bit 0 of dd is set, this is an invalid
- * instruction. However, we ignore this for efficiency.
- */
return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fcvtd");
pack_nan:
- vfp_put_double(dd, vfp_double_pack(&vdd));
+ vfp_put_double(vfp_double_pack(&vdd), dd);
return exceptions;
}
@@ -617,7 +613,7 @@ static u32 vfp_single_ftoui(int sd, int unused, s32 m, u32 fpscr)
pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
- vfp_put_float(sd, d);
+ vfp_put_float(d, sd);
return exceptions;
}
@@ -696,7 +692,7 @@ static u32 vfp_single_ftosi(int sd, int unused, s32 m, u32 fpscr)
pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
- vfp_put_float(sd, (s32)d);
+ vfp_put_float((s32)d, sd);
return exceptions;
}
@@ -1131,7 +1127,7 @@ static u32 vfp_single_fdiv(int sd, int sn, s32 m, u32 fpscr)
vsn_nan:
exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr);
pack:
- vfp_put_float(sd, vfp_single_pack(&vsd));
+ vfp_put_float(vfp_single_pack(&vsd), sd);
return exceptions;
vsm_nan:
@@ -1151,7 +1147,7 @@ static u32 vfp_single_fdiv(int sd, int sn, s32 m, u32 fpscr)
goto pack;
invalid:
- vfp_put_float(sd, vfp_single_pack(&vfp_single_default_qnan));
+ vfp_put_float(vfp_single_pack(&vfp_single_default_qnan), sd);
return FPSCR_IOC;
}
@@ -1174,7 +1170,7 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
{
u32 op = inst & FOP_MASK;
u32 exceptions = 0;
- unsigned int sd = vfp_get_sd(inst);
+ unsigned int dest;
unsigned int sn = vfp_get_sn(inst);
unsigned int sm = vfp_get_sm(inst);
unsigned int vecitr, veclen, vecstride;
@@ -1184,10 +1180,22 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
vecstride = 1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK);
/*
+ * fcvtsd takes a dN register number as destination, not sN.
+ * Technically, if bit 0 of dd is set, this is an invalid
+ * instruction. However, we ignore this for efficiency.
+ * It also only operates on scalars.
+ */
+ if ((inst & FEXT_MASK) == FEXT_FCVT) {
+ veclen = 0;
+ dest = vfp_get_dd(inst);
+ } else
+ dest = vfp_get_sd(inst);
+
+ /*
* If destination bank is zero, vector length is always '1'.
* ARM DDI0100F C5.1.3, C5.3.2.
*/
- if (FREG_BANK(sd) == 0)
+ if (FREG_BANK(dest) == 0)
veclen = 0;
pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
@@ -1201,15 +1209,18 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
s32 m = vfp_get_float(sm);
u32 except;
- if (op == FOP_EXT)
+ if (op == FOP_EXT && (inst & FEXT_MASK) == FEXT_FCVT)
+ pr_debug("VFP: itr%d (d%u) = op[%u] (s%u=%08x)\n",
+ vecitr >> FPSCR_LENGTH_BIT, dest, sn, sm, m);
+ else if (op == FOP_EXT)
pr_debug("VFP: itr%d (s%u) = op[%u] (s%u=%08x)\n",
- vecitr >> FPSCR_LENGTH_BIT, sd, sn, sm, m);
+ vecitr >> FPSCR_LENGTH_BIT, dest, sn, sm, m);
else
pr_debug("VFP: itr%d (s%u) = (s%u) op[%u] (s%u=%08x)\n",
- vecitr >> FPSCR_LENGTH_BIT, sd, sn,
+ vecitr >> FPSCR_LENGTH_BIT, dest, sn,
FOP_TO_IDX(op), sm, m);
- except = fop(sd, sn, m, fpscr);
+ except = fop(dest, sn, m, fpscr);
pr_debug("VFP: itr%d: exceptions=%08x\n",
vecitr >> FPSCR_LENGTH_BIT, except);
@@ -1227,7 +1238,7 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
* we encounter an exception. We continue.
*/
- sd = FREG_BANK(sd) + ((FREG_IDX(sd) + vecstride) & 7);
+ dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 7);
sn = FREG_BANK(sn) + ((FREG_IDX(sn) + vecstride) & 7);
if (FREG_BANK(sm) != 0)
sm = FREG_BANK(sm) + ((FREG_IDX(sm) + vecstride) & 7);