aboutsummaryrefslogtreecommitdiff
path: root/arch/m68k/fpsp040/res_func.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/fpsp040/res_func.S')
-rw-r--r--arch/m68k/fpsp040/res_func.S2040
1 files changed, 2040 insertions, 0 deletions
diff --git a/arch/m68k/fpsp040/res_func.S b/arch/m68k/fpsp040/res_func.S
new file mode 100644
index 00000000000..8f6b9521786
--- /dev/null
+++ b/arch/m68k/fpsp040/res_func.S
@@ -0,0 +1,2040 @@
+|
+| res_func.sa 3.9 7/29/91
+|
+| Normalizes denormalized numbers if necessary and updates the
+| stack frame. The function is then restored back into the
+| machine and the 040 completes the operation. This routine
+| is only used by the unsupported data type/format handler.
+| (Exception vector 55).
+|
+| For packed move out (fmove.p fpm,<ea>) the operation is
+| completed here; data is packed and moved to user memory.
+| The stack is restored to the 040 only in the case of a
+| reportable exception in the conversion.
+|
+|
+| Copyright (C) Motorola, Inc. 1990
+| All Rights Reserved
+|
+| THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+| The copyright notice above does not evidence any
+| actual or intended publication of such source code.
+
+RES_FUNC: |idnt 2,1 | Motorola 040 Floating Point Software Package
+
+ |section 8
+
+#include "fpsp.h"
+
+sp_bnds: .short 0x3f81,0x407e
+ .short 0x3f6a,0x0000
+dp_bnds: .short 0x3c01,0x43fe
+ .short 0x3bcd,0x0000
+
+ |xref mem_write
+ |xref bindec
+ |xref get_fline
+ |xref round
+ |xref denorm
+ |xref dest_ext
+ |xref dest_dbl
+ |xref dest_sgl
+ |xref unf_sub
+ |xref nrm_set
+ |xref dnrm_lp
+ |xref ovf_res
+ |xref reg_dest
+ |xref t_ovfl
+ |xref t_unfl
+
+ .global res_func
+ .global p_move
+
+res_func:
+ clrb DNRM_FLG(%a6)
+ clrb RES_FLG(%a6)
+ clrb CU_ONLY(%a6)
+ tstb DY_MO_FLG(%a6)
+ beqs monadic
+dyadic:
+ btstb #7,DTAG(%a6) |if dop = norm=000, zero=001,
+| ;inf=010 or nan=011
+ beqs monadic |then branch
+| ;else denorm
+| HANDLE DESTINATION DENORM HERE
+| ;set dtag to norm
+| ;write the tag & fpte15 to the fstack
+ leal FPTEMP(%a6),%a0
+
+ bclrb #sign_bit,LOCAL_EX(%a0)
+ sne LOCAL_SGN(%a0)
+
+ bsr nrm_set |normalize number (exp will go negative)
+ bclrb #sign_bit,LOCAL_EX(%a0) |get rid of false sign
+ bfclr LOCAL_SGN(%a0){#0:#8} |change back to IEEE ext format
+ beqs dpos
+ bsetb #sign_bit,LOCAL_EX(%a0)
+dpos:
+ bfclr DTAG(%a6){#0:#4} |set tag to normalized, FPTE15 = 0
+ bsetb #4,DTAG(%a6) |set FPTE15
+ orb #0x0f,DNRM_FLG(%a6)
+monadic:
+ leal ETEMP(%a6),%a0
+ btstb #direction_bit,CMDREG1B(%a6) |check direction
+ bne opclass3 |it is a mv out
+|
+| At this point, only opclass 0 and 2 possible
+|
+ btstb #7,STAG(%a6) |if sop = norm=000, zero=001,
+| ;inf=010 or nan=011
+ bne mon_dnrm |else denorm
+ tstb DY_MO_FLG(%a6) |all cases of dyadic instructions would
+ bne normal |require normalization of denorm
+
+| At this point:
+| monadic instructions: fabs = $18 fneg = $1a ftst = $3a
+| fmove = $00 fsmove = $40 fdmove = $44
+| fsqrt = $05* fssqrt = $41 fdsqrt = $45
+| (*fsqrt reencoded to $05)
+|
+ movew CMDREG1B(%a6),%d0 |get command register
+ andil #0x7f,%d0 |strip to only command word
+|
+| At this point, fabs, fneg, fsmove, fdmove, ftst, fsqrt, fssqrt, and
+| fdsqrt are possible.
+| For cases fabs, fneg, fsmove, and fdmove goto spos (do not normalize)
+| For cases fsqrt, fssqrt, and fdsqrt goto nrm_src (do normalize)
+|
+ btstl #0,%d0
+ bne normal |weed out fsqrt instructions
+|
+| cu_norm handles fmove in instructions with normalized inputs.
+| The routine round is used to correctly round the input for the
+| destination precision and mode.
+|
+cu_norm:
+ st CU_ONLY(%a6) |set cu-only inst flag
+ movew CMDREG1B(%a6),%d0
+ andib #0x3b,%d0 |isolate bits to select inst
+ tstb %d0
+ beql cu_nmove |if zero, it is an fmove
+ cmpib #0x18,%d0
+ beql cu_nabs |if $18, it is fabs
+ cmpib #0x1a,%d0
+ beql cu_nneg |if $1a, it is fneg
+|
+| Inst is ftst. Check the source operand and set the cc's accordingly.
+| No write is done, so simply rts.
+|
+cu_ntst:
+ movew LOCAL_EX(%a0),%d0
+ bclrl #15,%d0
+ sne LOCAL_SGN(%a0)
+ beqs cu_ntpo
+ orl #neg_mask,USER_FPSR(%a6) |set N
+cu_ntpo:
+ cmpiw #0x7fff,%d0 |test for inf/nan
+ bnes cu_ntcz
+ tstl LOCAL_HI(%a0)
+ bnes cu_ntn
+ tstl LOCAL_LO(%a0)
+ bnes cu_ntn
+ orl #inf_mask,USER_FPSR(%a6)
+ rts
+cu_ntn:
+ orl #nan_mask,USER_FPSR(%a6)
+ movel ETEMP_EX(%a6),FPTEMP_EX(%a6) |set up fptemp sign for
+| ;snan handler
+
+ rts
+cu_ntcz:
+ tstl LOCAL_HI(%a0)
+ bnel cu_ntsx
+ tstl LOCAL_LO(%a0)
+ bnel cu_ntsx
+ orl #z_mask,USER_FPSR(%a6)
+cu_ntsx:
+ rts
+|
+| Inst is fabs. Execute the absolute value function on the input.
+| Branch to the fmove code. If the operand is NaN, do nothing.
+|
+cu_nabs:
+ moveb STAG(%a6),%d0
+ btstl #5,%d0 |test for NaN or zero
+ bne wr_etemp |if either, simply write it
+ bclrb #7,LOCAL_EX(%a0) |do abs
+ bras cu_nmove |fmove code will finish
+|
+| Inst is fneg. Execute the negate value function on the input.
+| Fall though to the fmove code. If the operand is NaN, do nothing.
+|
+cu_nneg:
+ moveb STAG(%a6),%d0
+ btstl #5,%d0 |test for NaN or zero
+ bne wr_etemp |if either, simply write it
+ bchgb #7,LOCAL_EX(%a0) |do neg
+|
+| Inst is fmove. This code also handles all result writes.
+| If bit 2 is set, round is forced to double. If it is clear,
+| and bit 6 is set, round is forced to single. If both are clear,
+| the round precision is found in the fpcr. If the rounding precision
+| is double or single, round the result before the write.
+|
+cu_nmove:
+ moveb STAG(%a6),%d0
+ andib #0xe0,%d0 |isolate stag bits
+ bne wr_etemp |if not norm, simply write it
+ btstb #2,CMDREG1B+1(%a6) |check for rd
+ bne cu_nmrd
+ btstb #6,CMDREG1B+1(%a6) |check for rs
+ bne cu_nmrs
+|
+| The move or operation is not with forced precision. Test for
+| nan or inf as the input; if so, simply write it to FPn. Use the
+| FPCR_MODE byte to get rounding on norms and zeros.
+|
+cu_nmnr:
+ bfextu FPCR_MODE(%a6){#0:#2},%d0
+ tstb %d0 |check for extended
+ beq cu_wrexn |if so, just write result
+ cmpib #1,%d0 |check for single
+ beq cu_nmrs |fall through to double
+|
+| The move is fdmove or round precision is double.
+|
+cu_nmrd:
+ movel #2,%d0 |set up the size for denorm
+ movew LOCAL_EX(%a0),%d1 |compare exponent to double threshold
+ andw #0x7fff,%d1
+ cmpw #0x3c01,%d1
+ bls cu_nunfl
+ bfextu FPCR_MODE(%a6){#2:#2},%d1 |get rmode
+ orl #0x00020000,%d1 |or in rprec (double)
+ clrl %d0 |clear g,r,s for round
+ bclrb #sign_bit,LOCAL_EX(%a0) |convert to internal format
+ sne LOCAL_SGN(%a0)
+ bsrl round
+ bfclr LOCAL_SGN(%a0){#0:#8}
+ beqs cu_nmrdc
+ bsetb #sign_bit,LOCAL_EX(%a0)
+cu_nmrdc:
+ movew LOCAL_EX(%a0),%d1 |check for overflow
+ andw #0x7fff,%d1
+ cmpw #0x43ff,%d1
+ bge cu_novfl |take care of overflow case
+ bra cu_wrexn
+|
+| The move is fsmove or round precision is single.
+|
+cu_nmrs:
+ movel #1,%d0
+ movew LOCAL_EX(%a0),%d1
+ andw #0x7fff,%d1
+ cmpw #0x3f81,%d1
+ bls cu_nunfl
+ bfextu FPCR_MODE(%a6){#2:#2},%d1
+ orl #0x00010000,%d1
+ clrl %d0
+ bclrb #sign_bit,LOCAL_EX(%a0)
+ sne LOCAL_SGN(%a0)
+ bsrl round
+ bfclr LOCAL_SGN(%a0){#0:#8}
+ beqs cu_nmrsc
+ bsetb #sign_bit,LOCAL_EX(%a0)
+cu_nmrsc:
+ movew LOCAL_EX(%a0),%d1
+ andw #0x7FFF,%d1
+ cmpw #0x407f,%d1
+ blt cu_wrexn
+|
+| The operand is above precision boundaries. Use t_ovfl to
+| generate the correct value.
+|
+cu_novfl:
+ bsr t_ovfl
+ bra cu_wrexn
+|
+| The operand is below precision boundaries. Use denorm to
+| generate the correct value.
+|
+cu_nunfl:
+ bclrb #sign_bit,LOCAL_EX(%a0)
+ sne LOCAL_SGN(%a0)
+ bsr denorm
+ bfclr LOCAL_SGN(%a0){#0:#8} |change back to IEEE ext format
+ beqs cu_nucont
+ bsetb #sign_bit,LOCAL_EX(%a0)
+cu_nucont:
+ bfextu FPCR_MODE(%a6){#2:#2},%d1
+ btstb #2,CMDREG1B+1(%a6) |check for rd
+ bne inst_d
+ btstb #6,CMDREG1B+1(%a6) |check for rs
+ bne inst_s
+ swap %d1
+ moveb FPCR_MODE(%a6),%d1
+ lsrb #6,%d1
+ swap %d1
+ bra inst_sd
+inst_d:
+ orl #0x00020000,%d1
+ bra inst_sd
+inst_s:
+ orl #0x00010000,%d1
+inst_sd:
+ bclrb #sign_bit,LOCAL_EX(%a0)
+ sne LOCAL_SGN(%a0)
+ bsrl round
+ bfclr LOCAL_SGN(%a0){#0:#8}
+ beqs cu_nuflp
+ bsetb #sign_bit,LOCAL_EX(%a0)
+cu_nuflp:
+ btstb #inex2_bit,FPSR_EXCEPT(%a6)
+ beqs cu_nuninx
+ orl #aunfl_mask,USER_FPSR(%a6) |if the round was inex, set AUNFL
+cu_nuninx:
+ tstl LOCAL_HI(%a0) |test for zero
+ bnes cu_nunzro
+ tstl LOCAL_LO(%a0)
+ bnes cu_nunzro
+|
+| The mantissa is zero from the denorm loop. Check sign and rmode
+| to see if rounding should have occurred which would leave the lsb.
+|
+ movel USER_FPCR(%a6),%d0
+ andil #0x30,%d0 |isolate rmode
+ cmpil #0x20,%d0
+ blts cu_nzro
+ bnes cu_nrp
+cu_nrm:
+ tstw LOCAL_EX(%a0) |if positive, set lsb
+ bges cu_nzro
+ btstb #7,FPCR_MODE(%a6) |check for double
+ beqs cu_nincs
+ bras cu_nincd
+cu_nrp:
+ tstw LOCAL_EX(%a0) |if positive, set lsb
+ blts cu_nzro
+ btstb #7,FPCR_MODE(%a6) |check for double
+ beqs cu_nincs
+cu_nincd:
+ orl #0x800,LOCAL_LO(%a0) |inc for double
+ bra cu_nunzro
+cu_nincs:
+ orl #0x100,LOCAL_HI(%a0) |inc for single
+ bra cu_nunzro
+cu_nzro:
+ orl #z_mask,USER_FPSR(%a6)
+ moveb STAG(%a6),%d0
+ andib #0xe0,%d0
+ cmpib #0x40,%d0 |check if input was tagged zero
+ beqs cu_numv
+cu_nunzro:
+ orl #unfl_mask,USER_FPSR(%a6) |set unfl
+cu_numv:
+ movel (%a0),ETEMP(%a6)
+ movel 4(%a0),ETEMP_HI(%a6)
+ movel 8(%a0),ETEMP_LO(%a6)
+|
+| Write the result to memory, setting the fpsr cc bits. NaN and Inf
+| bypass cu_wrexn.
+|
+cu_wrexn:
+ tstw LOCAL_EX(%a0) |test for zero
+ beqs cu_wrzero
+ cmpw #0x8000,LOCAL_EX(%a0) |test for zero
+ bnes cu_wreon
+cu_wrzero:
+ orl #z_mask,USER_FPSR(%a6) |set Z bit
+cu_wreon:
+ tstw LOCAL_EX(%a0)
+ bpl wr_etemp
+ orl #neg_mask,USER_FPSR(%a6)
+ bra wr_etemp
+
+|
+| HANDLE SOURCE DENORM HERE
+|
+| ;clear denorm stag to norm
+| ;write the new tag & ete15 to the fstack
+mon_dnrm:
+|
+| At this point, check for the cases in which normalizing the
+| denorm produces incorrect results.
+|
+ tstb DY_MO_FLG(%a6) |all cases of dyadic instructions would
+ bnes nrm_src |require normalization of denorm
+
+| At this point:
+| monadic instructions: fabs = $18 fneg = $1a ftst = $3a
+| fmove = $00 fsmove = $40 fdmove = $44
+| fsqrt = $05* fssqrt = $41 fdsqrt = $45
+| (*fsqrt reencoded to $05)
+|
+ movew CMDREG1B(%a6),%d0 |get command register
+ andil #0x7f,%d0 |strip to only command word
+|
+| At this point, fabs, fneg, fsmove, fdmove, ftst, fsqrt, fssqrt, and
+| fdsqrt are possible.
+| For cases fabs, fneg, fsmove, and fdmove goto spos (do not normalize)
+| For cases fsqrt, fssqrt, and fdsqrt goto nrm_src (do normalize)
+|
+ btstl #0,%d0
+ bnes nrm_src |weed out fsqrt instructions
+ st CU_ONLY(%a6) |set cu-only inst flag
+ bra cu_dnrm |fmove, fabs, fneg, ftst
+| ;cases go to cu_dnrm
+nrm_src:
+ bclrb #sign_bit,LOCAL_EX(%a0)
+ sne LOCAL_SGN(%a0)
+ bsr nrm_set |normalize number (exponent will go
+| ; negative)
+ bclrb #sign_bit,LOCAL_EX(%a0) |get rid of false sign
+
+ bfclr LOCAL_SGN(%a0){#0:#8} |change back to IEEE ext format
+ beqs spos
+ bsetb #sign_bit,LOCAL_EX(%a0)
+spos:
+ bfclr STAG(%a6){#0:#4} |set tag to normalized, FPTE15 = 0
+ bsetb #4,STAG(%a6) |set ETE15
+ orb #0xf0,DNRM_FLG(%a6)
+normal:
+ tstb DNRM_FLG(%a6) |check if any of the ops were denorms
+ bne ck_wrap |if so, check if it is a potential
+| ;wrap-around case
+fix_stk:
+ moveb #0xfe,CU_SAVEPC(%a6)
+ bclrb #E1,E_BYTE(%a6)
+
+ clrw NMNEXC(%a6)
+
+ st RES_FLG(%a6) |indicate that a restore is needed
+ rts
+
+|
+| cu_dnrm handles all cu-only instructions (fmove, fabs, fneg, and
+| ftst) completely in software without an frestore to the 040.
+|
+cu_dnrm:
+ st CU_ONLY(%a6)
+ movew CMDREG1B(%a6),%d0
+ andib #0x3b,%d0 |isolate bits to select inst
+ tstb %d0
+ beql cu_dmove |if zero, it is an fmove
+ cmpib #0x18,%d0
+ beql cu_dabs |if $18, it is fabs
+ cmpib #0x1a,%d0
+ beql cu_dneg |if $1a, it is fneg
+|
+| Inst is ftst. Check the source operand and set the cc's accordingly.
+| No write is done, so simply rts.
+|
+cu_dtst:
+ movew LOCAL_EX(%a0),%d0
+ bclrl #15,%d0
+ sne LOCAL_SGN(%a0)
+ beqs cu_dtpo
+ orl #neg_mask,USER_FPSR(%a6) |set N
+cu_dtpo:
+ cmpiw #0x7fff,%d0 |test for inf/nan
+ bnes cu_dtcz
+ tstl LOCAL_HI(%a0)
+ bnes cu_dtn
+ tstl LOCAL_LO(%a0)
+ bnes cu_dtn
+ orl #inf_mask,USER_FPSR(%a6)
+ rts
+cu_dtn:
+ orl #nan_mask,USER_FPSR(%a6)
+ movel ETEMP_EX(%a6),FPTEMP_EX(%a6) |set up fptemp sign for
+| ;snan handler
+ rts
+cu_dtcz:
+ tstl LOCAL_HI(%a0)
+ bnel cu_dtsx
+ tstl LOCAL_LO(%a0)
+ bnel cu_dtsx
+ orl #z_mask,USER_FPSR(%a6)
+cu_dtsx:
+ rts
+|
+| Inst is fabs. Execute the absolute value function on the input.
+| Branch to the fmove code.
+|
+cu_dabs:
+ bclrb #7,LOCAL_EX(%a0) |do abs
+ bras cu_dmove |fmove code will finish
+|
+| Inst is fneg. Execute the negate value function on the input.
+| Fall though to the fmove code.
+|
+cu_dneg:
+ bchgb #7,LOCAL_EX(%a0) |do neg
+|
+| Inst is fmove. This code also handles all result writes.
+| If bit 2 is set, round is forced to double. If it is clear,
+| and bit 6 is set, round is forced to single. If both are clear,
+| the round precision is found in the fpcr. If the rounding precision
+| is double or single, the result is zero, and the mode is checked
+| to determine if the lsb of the result should be set.
+|
+cu_dmove:
+ btstb #2,CMDREG1B+1(%a6) |check for rd
+ bne cu_dmrd
+ btstb #6,CMDREG1B+1(%a6) |check for rs
+ bne cu_dmrs
+|
+| The move or operation is not with forced precision. Use the
+| FPCR_MODE byte to get rounding.
+|
+cu_dmnr:
+ bfextu FPCR_MODE(%a6){#0:#2},%d0
+ tstb %d0 |check for extended
+ beq cu_wrexd |if so, just write result
+ cmpib #1,%d0 |check for single
+ beq cu_dmrs |fall through to double
+|
+| The move is fdmove or round precision is double. Result is zero.
+| Check rmode for rp or rm and set lsb accordingly.
+|
+cu_dmrd:
+ bfextu FPCR_MODE(%a6){#2:#2},%d1 |get rmode
+ tstw LOCAL_EX(%a0) |check sign
+ blts cu_dmdn
+ cmpib #3,%d1 |check for rp
+ bne cu_dpd |load double pos zero
+ bra cu_dpdr |load double pos zero w/lsb
+cu_dmdn:
+ cmpib #2,%d1 |check for rm
+ bne cu_dnd |load double neg zero
+ bra cu_dndr |load double neg zero w/lsb
+|
+| The move is fsmove or round precision is single. Result is zero.
+| Check for rp or rm and set lsb accordingly.
+|
+cu_dmrs:
+ bfextu FPCR_MODE(%a6){#2:#2},%d1 |get rmode
+ tstw LOCAL_EX(%a0) |check sign
+ blts cu_dmsn
+ cmpib #3,%d1 |check for rp
+ bne cu_spd |load single pos zero
+ bra cu_spdr |load single pos zero w/lsb
+cu_dmsn:
+ cmpib #2,%d1 |check for rm
+ bne cu_snd |load single neg zero
+ bra cu_sndr |load single neg zero w/lsb
+|
+| The precision is extended, so the result in etemp is correct.
+| Simply set unfl (not inex2 or aunfl) and write the result to
+| the correct fp register.
+cu_wrexd:
+ orl #unfl_mask,USER_FPSR(%a6)
+ tstw LOCAL_EX(%a0)
+ beq wr_etemp
+ orl #neg_mask,USER_FPSR(%a6)
+ bra wr_etemp
+|
+| These routines write +/- zero in double format. The routines
+| cu_dpdr and cu_dndr set the double lsb.
+|
+cu_dpd:
+ movel #0x3c010000,LOCAL_EX(%a0) |force pos double zero
+ clrl LOCAL_HI(%a0)
+ clrl LOCAL_LO(%a0)
+ orl #z_mask,USER_FPSR(%a6)
+ orl #unfinx_mask,USER_FPSR(%a6)
+ bra wr_etemp
+cu_dpdr:
+ movel #0x3c010000,LOCAL_EX(%a0) |force pos double zero
+ clrl LOCAL_HI(%a0)
+ movel #0x800,LOCAL_LO(%a0) |with lsb set
+ orl #unfinx_mask,USER_FPSR(%a6)
+ bra wr_etemp
+cu_dnd:
+ movel #0xbc010000,LOCAL_EX(%a0) |force pos double zero
+ clrl LOCAL_HI(%a0)
+ clrl LOCAL_LO(%a0)
+ orl #z_mask,USER_FPSR(%a6)
+ orl #neg_mask,USER_FPSR(%a6)
+ orl #unfinx_mask,USER_FPSR(%a6)
+ bra wr_etemp
+cu_dndr:
+ movel #0xbc010000,LOCAL_EX(%a0) |force pos double zero
+ clrl LOCAL_HI(%a0)
+ movel #0x800,LOCAL_LO(%a0) |with lsb set
+ orl #neg_mask,USER_FPSR(%a6)
+ orl #unfinx_mask,USER_FPSR(%a6)
+ bra wr_etemp
+|
+| These routines write +/- zero in single format. The routines
+| cu_dpdr and cu_dndr set the single lsb.
+|
+cu_spd:
+ movel #0x3f810000,LOCAL_EX(%a0) |force pos single zero
+ clrl LOCAL_HI(%a0)
+ clrl LOCAL_LO(%a0)
+ orl #z_mask,USER_FPSR(%a6)
+ orl #unfinx_mask,USER_FPSR(%a6)
+ bra wr_etemp
+cu_spdr:
+ movel #0x3f810000,LOCAL_EX(%a0) |force pos single zero
+ movel #0x100,LOCAL_HI(%a0) |with lsb set
+ clrl LOCAL_LO(%a0)
+ orl #unfinx_mask,USER_FPSR(%a6)
+ bra wr_etemp
+cu_snd:
+ movel #0xbf810000,LOCAL_EX(%a0) |force pos single zero
+ clrl LOCAL_HI(%a0)
+ clrl LOCAL_LO(%a0)
+ orl #z_mask,USER_FPSR(%a6)
+ orl #neg_mask,USER_FPSR(%a6)
+ orl #unfinx_mask,USER_FPSR(%a6)
+ bra wr_etemp
+cu_sndr:
+ movel #0xbf810000,LOCAL_EX(%a0) |force pos single zero
+ movel #0x100,LOCAL_HI(%a0) |with lsb set
+ clrl LOCAL_LO(%a0)
+ orl #neg_mask,USER_FPSR(%a6)
+ orl #unfinx_mask,USER_FPSR(%a6)
+ bra wr_etemp
+
+|
+| This code checks for 16-bit overflow conditions on dyadic
+| operations which are not restorable into the floating-point
+| unit and must be completed in software. Basically, this
+| condition exists with a very large norm and a denorm. One
+| of the operands must be denormalized to enter this code.
+|
+| Flags used:
+| DY_MO_FLG contains 0 for monadic op, $ff for dyadic
+| DNRM_FLG contains $00 for neither op denormalized
+| $0f for the destination op denormalized
+| $f0 for the source op denormalized
+| $ff for both ops denormalized
+|
+| The wrap-around condition occurs for add, sub, div, and cmp
+| when
+|
+| abs(dest_exp - src_exp) >= $8000
+|
+| and for mul when
+|
+| (dest_exp + src_exp) < $0
+|
+| we must process the operation here if this case is true.
+|
+| The rts following the frcfpn routine is the exit from res_func
+| for this condition. The restore flag (RES_FLG) is left clear.
+| No frestore is done unless an exception is to be reported.
+|
+| For fadd:
+| if(sign_of(dest) != sign_of(src))
+| replace exponent of src with $3fff (keep sign)
+| use fpu to perform dest+new_src (user's rmode and X)
+| clr sticky
+| else
+| set sticky
+| call round with user's precision and mode
+| move result to fpn and wbtemp
+|
+| For fsub:
+| if(sign_of(dest) == sign_of(src))
+| replace exponent of src with $3fff (keep sign)
+| use fpu to perform dest+new_src (user's rmode and X)
+| clr sticky
+| else
+| set sticky
+| call round with user's precision and mode
+| move result to fpn and wbtemp
+|
+| For fdiv/fsgldiv:
+| if(both operands are denorm)
+| restore_to_fpu;
+| if(dest is norm)
+| force_ovf;
+| else(dest is denorm)
+| force_unf:
+|
+| For fcmp:
+| if(dest is norm)
+| N = sign_of(dest);
+| else(dest is denorm)
+| N = sign_of(src);
+|
+| For fmul:
+| if(both operands are denorm)
+| force_unf;
+| if((dest_exp + src_exp) < 0)
+| force_unf:
+| else
+| restore_to_fpu;
+|
+| local equates:
+ .set addcode,0x22
+ .set subcode,0x28
+ .set mulcode,0x23
+ .set divcode,0x20
+ .set cmpcode,0x38
+ck_wrap:
+ | tstb DY_MO_FLG(%a6) ;check for fsqrt
+ beq fix_stk |if zero, it is fsqrt
+ movew CMDREG1B(%a6),%d0
+ andiw #0x3b,%d0 |strip to command bits
+ cmpiw #addcode,%d0
+ beq wrap_add
+ cmpiw #subcode,%d0
+ beq wrap_sub
+ cmpiw #mulcode,%d0
+ beq wrap_mul
+ cmpiw #cmpcode,%d0
+ beq wrap_cmp
+|
+| Inst is fdiv.
+|
+wrap_div:
+ cmpb #0xff,DNRM_FLG(%a6) |if both ops denorm,
+ beq fix_stk |restore to fpu
+|
+| One of the ops is denormalized. Test for wrap condition
+| and force the result.
+|
+ cmpb #0x0f,DNRM_FLG(%a6) |check for dest denorm
+ bnes div_srcd
+div_destd:
+ bsrl ckinf_ns
+ bne fix_stk
+ bfextu ETEMP_EX(%a6){#1:#15},%d0 |get src exp (always pos)
+ bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg)
+ subl %d1,%d0 |subtract dest from src
+ cmpl #0x7fff,%d0
+ blt fix_stk |if less, not wrap case
+ clrb WBTEMP_SGN(%a6)
+ movew ETEMP_EX(%a6),%d0 |find the sign of the result
+ movew FPTEMP_EX(%a6),%d1
+ eorw %d1,%d0
+ andiw #0x8000,%d0
+ beq force_unf
+ st WBTEMP_SGN(%a6)
+ bra force_unf
+
+ckinf_ns:
+ moveb STAG(%a6),%d0 |check source tag for inf or nan
+ bra ck_in_com
+ckinf_nd:
+ moveb DTAG(%a6),%d0 |check destination tag for inf or nan
+ck_in_com:
+ andib #0x60,%d0 |isolate tag bits
+ cmpb #0x40,%d0 |is it inf?
+ beq nan_or_inf |not wrap case
+ cmpb #0x60,%d0 |is it nan?
+ beq nan_or_inf |yes, not wrap case?
+ cmpb #0x20,%d0 |is it a zero?
+ beq nan_or_inf |yes
+ clrl %d0
+ rts |then ; it is either a zero of norm,
+| ;check wrap case
+nan_or_inf:
+ moveql #-1,%d0
+ rts
+
+
+
+div_srcd:
+ bsrl ckinf_nd
+ bne fix_stk
+ bfextu FPTEMP_EX(%a6){#1:#15},%d0 |get dest exp (always pos)
+ bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg)
+ subl %d1,%d0 |subtract src from dest
+ cmpl #0x8000,%d0
+ blt fix_stk |if less, not wrap case
+ clrb WBTEMP_SGN(%a6)
+ movew ETEMP_EX(%a6),%d0 |find the sign of the result
+ movew FPTEMP_EX(%a6),%d1
+ eorw %d1,%d0
+ andiw #0x8000,%d0
+ beqs force_ovf
+ st WBTEMP_SGN(%a6)
+|
+| This code handles the case of the instruction resulting in
+| an overflow condition.
+|
+force_ovf:
+ bclrb #E1,E_BYTE(%a6)
+ orl #ovfl_inx_mask,USER_FPSR(%a6)
+ clrw NMNEXC(%a6)
+ leal WBTEMP(%a6),%a0 |point a0 to memory location
+ movew CMDREG1B(%a6),%d0
+ btstl #6,%d0 |test for forced precision
+ beqs frcovf_fpcr
+ btstl #2,%d0 |check for double
+ bnes frcovf_dbl
+ movel #0x1,%d0 |inst is forced single
+ bras frcovf_rnd
+frcovf_dbl:
+ movel #0x2,%d0 |inst is forced double
+ bras frcovf_rnd
+frcovf_fpcr:
+ bfextu FPCR_MODE(%a6){#0:#2},%d0 |inst not forced - use fpcr prec
+frcovf_rnd:
+
+| The 881/882 does not set inex2 for the following case, so the
+| line is commented out to be compatible with 881/882
+| tst.b %d0
+| beq.b frcovf_x
+| or.l #inex2_mask,USER_FPSR(%a6) ;if prec is s or d, set inex2
+
+|frcovf_x:
+ bsrl ovf_res |get correct result based on
+| ;round precision/mode. This
+| ;sets FPSR_CC correctly
+| ;returns in external format
+ bfclr WBTEMP_SGN(%a6){#0:#8}
+ beq frcfpn
+ bsetb #sign_bit,WBTEMP_EX(%a6)
+ bra frcfpn
+|
+| Inst is fadd.
+|
+wrap_add:
+ cmpb #0xff,DNRM_FLG(%a6) |if both ops denorm,
+ beq fix_stk |restore to fpu
+|
+| One of the ops is denormalized. Test for wrap condition
+| and complete the instruction.
+|
+ cmpb #0x0f,DNRM_FLG(%a6) |check for dest denorm
+ bnes add_srcd
+add_destd:
+ bsrl ckinf_ns
+ bne fix_stk
+ bfextu ETEMP_EX(%a6){#1:#15},%d0 |get src exp (always pos)
+ bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg)
+ subl %d1,%d0 |subtract dest from src
+ cmpl #0x8000,%d0
+ blt fix_stk |if less, not wrap case
+ bra add_wrap
+add_srcd:
+ bsrl ckinf_nd
+ bne fix_stk
+ bfextu FPTEMP_EX(%a6){#1:#15},%d0 |get dest exp (always pos)
+ bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg)
+ subl %d1,%d0 |subtract src from dest
+ cmpl #0x8000,%d0
+ blt fix_stk |if less, not wrap case
+|
+| Check the signs of the operands. If they are unlike, the fpu
+| can be used to add the norm and 1.0 with the sign of the
+| denorm and it will correctly generate the result in extended
+| precision. We can then call round with no sticky and the result
+| will be correct for the user's rounding mode and precision. If
+| the signs are the same, we call round with the sticky bit set
+| and the result will be correct for the user's rounding mode and
+| precision.
+|
+add_wrap:
+ movew ETEMP_EX(%a6),%d0
+ movew FPTEMP_EX(%a6),%d1
+ eorw %d1,%d0
+ andiw #0x8000,%d0
+ beq add_same
+|
+| The signs are unlike.
+|
+ cmpb #0x0f,DNRM_FLG(%a6) |is dest the denorm?
+ bnes add_u_srcd
+ movew FPTEMP_EX(%a6),%d0
+ andiw #0x8000,%d0
+ orw #0x3fff,%d0 |force the exponent to +/- 1
+ movew %d0,FPTEMP_EX(%a6) |in the denorm
+ movel USER_FPCR(%a6),%d0
+ andil #0x30,%d0
+ fmovel %d0,%fpcr |set up users rmode and X
+ fmovex ETEMP(%a6),%fp0
+ faddx FPTEMP(%a6),%fp0
+ leal WBTEMP(%a6),%a0 |point a0 to wbtemp in frame
+ fmovel %fpsr,%d1
+ orl %d1,USER_FPSR(%a6) |capture cc's and inex from fadd
+ fmovex %fp0,WBTEMP(%a6) |write result to memory
+ lsrl #4,%d0 |put rmode in lower 2 bits
+ movel USER_FPCR(%a6),%d1
+ andil #0xc0,%d1
+ lsrl #6,%d1 |put precision in upper word
+ swap %d1
+ orl %d0,%d1 |set up for round call
+ clrl %d0 |force sticky to zero
+ bclrb #sign_bit,WBTEMP_EX(%a6)
+ sne WBTEMP_SGN(%a6)
+ bsrl round |round result to users rmode & prec
+ bfclr WBTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
+ beq frcfpnr
+ bsetb #sign_bit,WBTEMP_EX(%a6)
+ bra frcfpnr
+add_u_srcd:
+ movew ETEMP_EX(%a6),%d0
+ andiw #0x8000,%d0
+ orw #0x3fff,%d0 |force the exponent to +/- 1
+ movew %d0,ETEMP_EX(%a6) |in the denorm
+ movel USER_FPCR(%a6),%d0
+ andil #0x30,%d0
+ fmovel %d0,%fpcr |set up users rmode and X
+ fmovex ETEMP(%a6),%fp0
+ faddx FPTEMP(%a6),%fp0
+ fmovel %fpsr,%d1
+ orl %d1,USER_FPSR(%a6) |capture cc's and inex from fadd
+ leal WBTEMP(%a6),%a0 |point a0 to wbtemp in frame
+ fmovex %fp0,WBTEMP(%a6) |write result to memory
+ lsrl #4,%d0 |put rmode in lower 2 bits
+ movel USER_FPCR(%a6),%d1
+ andil #0xc0,%d1
+ lsrl #6,%d1 |put precision in upper word
+ swap %d1
+ orl %d0,%d1 |set up for round call
+ clrl %d0 |force sticky to zero
+ bclrb #sign_bit,WBTEMP_EX(%a6)
+ sne WBTEMP_SGN(%a6) |use internal format for round
+ bsrl round |round result to users rmode & prec
+ bfclr WBTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
+ beq frcfpnr
+ bsetb #sign_bit,WBTEMP_EX(%a6)
+ bra frcfpnr
+|
+| Signs are alike:
+|
+add_same:
+ cmpb #0x0f,DNRM_FLG(%a6) |is dest the denorm?
+ bnes add_s_srcd
+add_s_destd:
+ leal ETEMP(%a6),%a0
+ movel USER_FPCR(%a6),%d0
+ andil #0x30,%d0
+ lsrl #4,%d0 |put rmode in lower 2 bits
+ movel USER_FPCR(%a6),%d1
+ andil #0xc0,%d1
+ lsrl #6,%d1 |put precision in upper word
+ swap %d1
+ orl %d0,%d1 |set up for round call
+ movel #0x20000000,%d0 |set sticky for round
+ bclrb #sign_bit,ETEMP_EX(%a6)
+ sne ETEMP_SGN(%a6)
+ bsrl round |round result to users rmode & prec
+ bfclr ETEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
+ beqs add_s_dclr
+ bsetb #sign_bit,ETEMP_EX(%a6)
+add_s_dclr:
+ leal WBTEMP(%a6),%a0
+ movel ETEMP(%a6),(%a0) |write result to wbtemp
+ movel ETEMP_HI(%a6),4(%a0)
+ movel ETEMP_LO(%a6),8(%a0)
+ tstw ETEMP_EX(%a6)
+ bgt add_ckovf
+ orl #neg_mask,USER_FPSR(%a6)
+ bra add_ckovf
+add_s_srcd:
+ leal FPTEMP(%a6),%a0
+ movel USER_FPCR(%a6),%d0
+ andil #0x30,%d0
+ lsrl #4,%d0 |put rmode in lower 2 bits
+ movel USER_FPCR(%a6),%d1
+ andil #0xc0,%d1
+ lsrl #6,%d1 |put precision in upper word
+ swap %d1
+ orl %d0,%d1 |set up for round call
+ movel #0x20000000,%d0 |set sticky for round
+ bclrb #sign_bit,FPTEMP_EX(%a6)
+ sne FPTEMP_SGN(%a6)
+ bsrl round |round result to users rmode & prec
+ bfclr FPTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
+ beqs add_s_sclr
+ bsetb #sign_bit,FPTEMP_EX(%a6)
+add_s_sclr:
+ leal WBTEMP(%a6),%a0
+ movel FPTEMP(%a6),(%a0) |write result to wbtemp
+ movel FPTEMP_HI(%a6),4(%a0)
+ movel FPTEMP_LO(%a6),8(%a0)
+ tstw FPTEMP_EX(%a6)
+ bgt add_ckovf
+ orl #neg_mask,USER_FPSR(%a6)
+add_ckovf:
+ movew WBTEMP_EX(%a6),%d0
+ andiw #0x7fff,%d0
+ cmpiw #0x7fff,%d0
+ bne frcfpnr
+|
+| The result has overflowed to $7fff exponent. Set I, ovfl,
+| and aovfl, and clr the mantissa (incorrectly set by the
+| round routine.)
+|
+ orl #inf_mask+ovfl_inx_mask,USER_FPSR(%a6)
+ clrl 4(%a0)
+ bra frcfpnr
+|
+| Inst is fsub.
+|
+wrap_sub:
+ cmpb #0xff,DNRM_FLG(%a6) |if both ops denorm,
+ beq fix_stk |restore to fpu
+|
+| One of the ops is denormalized. Test for wrap condition
+| and complete the instruction.
+|
+ cmpb #0x0f,DNRM_FLG(%a6) |check for dest denorm
+ bnes sub_srcd
+sub_destd:
+ bsrl ckinf_ns
+ bne fix_stk
+ bfextu ETEMP_EX(%a6){#1:#15},%d0 |get src exp (always pos)
+ bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg)
+ subl %d1,%d0 |subtract src from dest
+ cmpl #0x8000,%d0
+ blt fix_stk |if less, not wrap case
+ bra sub_wrap
+sub_srcd:
+ bsrl ckinf_nd
+ bne fix_stk
+ bfextu FPTEMP_EX(%a6){#1:#15},%d0 |get dest exp (always pos)
+ bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg)
+ subl %d1,%d0 |subtract dest from src
+ cmpl #0x8000,%d0
+ blt fix_stk |if less, not wrap case
+|
+| Check the signs of the operands. If they are alike, the fpu
+| can be used to subtract from the norm 1.0 with the sign of the
+| denorm and it will correctly generate the result in extended
+| precision. We can then call round with no sticky and the result
+| will be correct for the user's rounding mode and precision. If
+| the signs are unlike, we call round with the sticky bit set
+| and the result will be correct for the user's rounding mode and
+| precision.
+|
+sub_wrap:
+ movew ETEMP_EX(%a6),%d0
+ movew FPTEMP_EX(%a6),%d1
+ eorw %d1,%d0
+ andiw #0x8000,%d0
+ bne sub_diff
+|
+| The signs are alike.
+|
+ cmpb #0x0f,DNRM_FLG(%a6) |is dest the denorm?
+ bnes sub_u_srcd
+ movew FPTEMP_EX(%a6),%d0
+ andiw #0x8000,%d0
+ orw #0x3fff,%d0 |force the exponent to +/- 1
+ movew %d0,FPTEMP_EX(%a6) |in the denorm
+ movel USER_FPCR(%a6),%d0
+ andil #0x30,%d0
+ fmovel %d0,%fpcr |set up users rmode and X
+ fmovex FPTEMP(%a6),%fp0
+ fsubx ETEMP(%a6),%fp0
+ fmovel %fpsr,%d1
+ orl %d1,USER_FPSR(%a6) |capture cc's and inex from fadd
+ leal WBTEMP(%a6),%a0 |point a0 to wbtemp in frame
+ fmovex %fp0,WBTEMP(%a6) |write result to memory
+ lsrl #4,%d0 |put rmode in lower 2 bits
+ movel USER_FPCR(%a6),%d1
+ andil #0xc0,%d1
+ lsrl #6,%d1 |put precision in upper word
+ swap %d1
+ orl %d0,%d1 |set up for round call
+ clrl %d0 |force sticky to zero
+ bclrb #sign_bit,WBTEMP_EX(%a6)
+ sne WBTEMP_SGN(%a6)
+ bsrl round |round result to users rmode & prec
+ bfclr WBTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
+ beq frcfpnr
+ bsetb #sign_bit,WBTEMP_EX(%a6)
+ bra frcfpnr
+sub_u_srcd:
+ movew ETEMP_EX(%a6),%d0
+ andiw #0x8000,%d0
+ orw #0x3fff,%d0 |force the exponent to +/- 1
+ movew %d0,ETEMP_EX(%a6) |in the denorm
+ movel USER_FPCR(%a6),%d0
+ andil #0x30,%d0
+ fmovel %d0,%fpcr |set up users rmode and X
+ fmovex FPTEMP(%a6),%fp0
+ fsubx ETEMP(%a6),%fp0
+ fmovel %fpsr,%d1
+ orl %d1,USER_FPSR(%a6) |capture cc's and inex from fadd
+ leal WBTEMP(%a6),%a0 |point a0 to wbtemp in frame
+ fmovex %fp0,WBTEMP(%a6) |write result to memory
+ lsrl #4,%d0 |put rmode in lower 2 bits
+ movel USER_FPCR(%a6),%d1
+ andil #0xc0,%d1
+ lsrl #6,%d1 |put precision in upper word
+ swap %d1
+ orl %d0,%d1 |set up for round call
+ clrl %d0 |force sticky to zero
+ bclrb #sign_bit,WBTEMP_EX(%a6)
+ sne WBTEMP_SGN(%a6)
+ bsrl round |round result to users rmode & prec
+ bfclr WBTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
+ beq frcfpnr
+ bsetb #sign_bit,WBTEMP_EX(%a6)
+ bra frcfpnr
+|
+| Signs are unlike:
+|
+sub_diff:
+ cmpb #0x0f,DNRM_FLG(%a6) |is dest the denorm?
+ bnes sub_s_srcd
+sub_s_destd:
+ leal ETEMP(%a6),%a0
+ movel USER_FPCR(%a6),%d0
+ andil #0x30,%d0
+ lsrl #4,%d0 |put rmode in lower 2 bits
+ movel USER_FPCR(%a6),%d1
+ andil #0xc0,%d1
+ lsrl #6,%d1 |put precision in upper word
+ swap %d1
+ orl %d0,%d1 |set up for round call
+ movel #0x20000000,%d0 |set sticky for round
+|
+| Since the dest is the denorm, the sign is the opposite of the
+| norm sign.
+|
+ eoriw #0x8000,ETEMP_EX(%a6) |flip sign on result
+ tstw ETEMP_EX(%a6)
+ bgts sub_s_dwr
+ orl #neg_mask,USER_FPSR(%a6)
+sub_s_dwr:
+ bclrb #sign_bit,ETEMP_EX(%a6)
+ sne ETEMP_SGN(%a6)
+ bsrl round |round result to users rmode & prec
+ bfclr ETEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
+ beqs sub_s_dclr
+ bsetb #sign_bit,ETEMP_EX(%a6)
+sub_s_dclr:
+ leal WBTEMP(%a6),%a0
+ movel ETEMP(%a6),(%a0) |write result to wbtemp
+ movel ETEMP_HI(%a6),4(%a0)
+ movel ETEMP_LO(%a6),8(%a0)
+ bra sub_ckovf
+sub_s_srcd:
+ leal FPTEMP(%a6),%a0
+ movel USER_FPCR(%a6),%d0
+ andil #0x30,%d0
+ lsrl #4,%d0 |put rmode in lower 2 bits
+ movel USER_FPCR(%a6),%d1
+ andil #0xc0,%d1
+ lsrl #6,%d1 |put precision in upper word
+ swap %d1
+ orl %d0,%d1 |set up for round call
+ movel #0x20000000,%d0 |set sticky for round
+ bclrb #sign_bit,FPTEMP_EX(%a6)
+ sne FPTEMP_SGN(%a6)
+ bsrl round |round result to users rmode & prec
+ bfclr FPTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
+ beqs sub_s_sclr
+ bsetb #sign_bit,FPTEMP_EX(%a6)
+sub_s_sclr:
+ leal WBTEMP(%a6),%a0
+ movel FPTEMP(%a6),(%a0) |write result to wbtemp
+ movel FPTEMP_HI(%a6),4(%a0)
+ movel FPTEMP_LO(%a6),8(%a0)
+ tstw FPTEMP_EX(%a6)
+ bgt sub_ckovf
+ orl #neg_mask,USER_FPSR(%a6)
+sub_ckovf:
+ movew WBTEMP_EX(%a6),%d0
+ andiw #0x7fff,%d0
+ cmpiw #0x7fff,%d0
+ bne frcfpnr
+|
+| The result has overflowed to $7fff exponent. Set I, ovfl,
+| and aovfl, and clr the mantissa (incorrectly set by the
+| round routine.)
+|
+ orl #inf_mask+ovfl_inx_mask,USER_FPSR(%a6)
+ clrl 4(%a0)
+ bra frcfpnr
+|
+| Inst is fcmp.
+|
+wrap_cmp:
+ cmpb #0xff,DNRM_FLG(%a6) |if both ops denorm,
+ beq fix_stk |restore to fpu
+|
+| One of the ops is denormalized. Test for wrap condition
+| and complete the instruction.
+|
+ cmpb #0x0f,DNRM_FLG(%a6) |check for dest denorm
+ bnes cmp_srcd
+cmp_destd:
+ bsrl ckinf_ns
+ bne fix_stk
+ bfextu ETEMP_EX(%a6){#1:#15},%d0 |get src exp (always pos)
+ bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg)
+ subl %d1,%d0 |subtract dest from src
+ cmpl #0x8000,%d0
+ blt fix_stk |if less, not wrap case
+ tstw ETEMP_EX(%a6) |set N to ~sign_of(src)
+ bge cmp_setn
+ rts
+cmp_srcd:
+ bsrl ckinf_nd
+ bne fix_stk
+ bfextu FPTEMP_EX(%a6){#1:#15},%d0 |get dest exp (always pos)
+ bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg)
+ subl %d1,%d0 |subtract src from dest
+ cmpl #0x8000,%d0
+ blt fix_stk |if less, not wrap case
+ tstw FPTEMP_EX(%a6) |set N to sign_of(dest)
+ blt cmp_setn
+ rts
+cmp_setn:
+ orl #neg_mask,USER_FPSR(%a6)
+ rts
+
+|
+| Inst is fmul.
+|
+wrap_mul:
+ cmpb #0xff,DNRM_FLG(%a6) |if both ops denorm,
+ beq force_unf |force an underflow (really!)
+|
+| One of the ops is denormalized. Test for wrap condition
+| and complete the instruction.
+|
+ cmpb #0x0f,DNRM_