diff options
author | Craig Topper <craig.topper@gmail.com> | 2012-06-01 06:07:48 +0000 |
---|---|---|
committer | Craig Topper <craig.topper@gmail.com> | 2012-06-01 06:07:48 +0000 |
commit | 3a8172ad8d991d71620fc0075a4cc6afc168756f (patch) | |
tree | 974b6bb80eacf19b4d0046edf5b2157d50933066 /lib/Target/X86/X86InstrFMA.td | |
parent | 78fc72d0f1be84ce53d66596ef4c4dc93cd9b0b0 (diff) |
Remove fadd(fmul) patterns for FMA3. This needs to be implemented by paying attention to FP_CONTRACT and matching @llvm.fma which is not available yet. This will allow us to enablle intrinsic use at least though.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157804 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86/X86InstrFMA.td')
-rw-r--r-- | lib/Target/X86/X86InstrFMA.td | 212 |
1 files changed, 0 insertions, 212 deletions
diff --git a/lib/Target/X86/X86InstrFMA.td b/lib/Target/X86/X86InstrFMA.td index 1b7f0949df..3dd642f2cf 100644 --- a/lib/Target/X86/X86InstrFMA.td +++ b/lib/Target/X86/X86InstrFMA.td @@ -113,162 +113,6 @@ let ExeDomain = SSEPackedDouble in { memopv4f64, int_x86_fma4_vfnmsub_pd, int_x86_fma4_vfnmsub_pd_256>, VEX_W; } -let Predicates = [HasFMA3], AddedComplexity = 20 in { -//------------ -// FP double precision ADD - 256 -//------------ - -// FMA231: src1 = src2*src3 + src1 -def : Pat<(v4f64 (fadd (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)), - (VFMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>; - -// FMA231: src1 = src2*src3 + src1 -def : Pat<(v4f64 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)), - (VFMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; - - -//------------ -// FP double precision ADD - 128 -//------------ - - -// FMA231: src1 = src2*src3 + src1 -def : Pat<(v2f64 (fadd (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)), - (VFMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>; - -// FMA231: src1 = src2*src3 + src1 -def : Pat<(v2f64 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)), - (VFMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>; - -//------------ -// FP double precision SUB - 256 -//------------ -// FMA231: src1 = src2*src3 - src1 -def : Pat<(v4f64 (fsub (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)), - (VFMSUBPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>; - -// FMA231: src1 = src2*src3 - src1 -def : Pat<(v4f64 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)), - (VFMSUBPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; - - -//------------ -// FP double precision SUB - 128 -//------------ - -// FMA231: src1 = src2*src3 - src1 -def : Pat<(v2f64 (fsub (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)), - (VFMSUBPDr231m VR128:$src1, VR128:$src2, addr:$src3)>; - -// FMA231: src1 = src2*src3 - src1 -def : Pat<(v2f64 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)), - (VFMSUBPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>; - -//------------ -// FP double precision FNMADD - 256 -//------------ -// FMA231: src1 = - src2*src3 + src1 -def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, (memopv4f64 addr:$src3)))), - (VFNMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>; - -// FMA231: src1 = - src2*src3 + src1 -def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))), - (VFNMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; - -//------------ -// FP double precision FNMADD - 128 -//------------ - -// FMA231: src1 = - src2*src3 + src1 -def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, (memopv2f64 addr:$src3)))), - (VFNMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>; - -// FMA231: src1 = - src2*src3 + src1 -def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))), - (VFNMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>; - -//------------ -// FP single precision ADD - 256 -//------------ - -// FMA231: src1 = src2*src3 + src1 -def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)), - (VFMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; - -// FMA213 : src1 = src2*src1 + src3 -def : Pat<(v8f32 (fadd (fmul VR256:$src1, VR256:$src2), (memopv8f32 addr:$src3))), - (VFMADDPSr213mY VR256:$src1, VR256:$src2, addr:$src3)>; - -// FMA231: src1 = src2*src3 + src1 -def : Pat<(v8f32 (fadd (fmul (memopv8f32 addr:$src3), VR256:$src2), VR256:$src1)), - (VFMADDPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>; - -// FMA213: src1 = src2*src1 + src3 -def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src1), VR256:$src3)), - (VFMADDPSr213rY VR256:$src1, VR256:$src2, VR256:$src3)>; - -//------------ -// FP single precision ADD - 128 -//------------ - -// FMA231 : src1 = src2*src3 + src1 -def : Pat<(v4f32 (fadd (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)), - (VFMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>; - -// FMA231 : src1 = src2*src3 + src1 -def : Pat<(v4f32 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)), - (VFMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>; - -//------------ -// FP single precision SUB - 256 -//------------ -// FMA231: src1 = src2*src3 - src1 -def : Pat<(v8f32 (fsub (fmul VR256:$src2, (memopv8f32 addr:$src3)), VR256:$src1)), - (VFMSUBPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>; - -// FMA231: src1 = src2*src3 - src1 -def : Pat<(v8f32 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)), - (VFMSUBPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; - -//------------ -// FP single precision SUB - 128 -//------------ -// FMA231 : src1 = src2*src3 - src1 -def : Pat<(v4f32 (fsub (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)), - (VFMSUBPSr231m VR128:$src1, VR128:$src2, addr:$src3)>; - -// FMA231 : src1 = src2*src3 - src1 -def : Pat<(v4f32 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)), - (VFMSUBPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>; - -//------------ -// FP single precision FNMADD - 256 -//------------ -// FMA231: src1 = - src2*src3 + src1 -def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, (memopv8f32 addr:$src3)))), - (VFNMADDPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>; - -// FMA231: src1 = - src2*src3 + src1 -def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))), - (VFNMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; - -//------------ -// FP single precision FNMADD - 128 -//------------ - -// FMA231 : src1 = src2*src3 - src1 -def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, (memopv4f32 addr:$src3)))), - (VFNMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>; - -// FMA231 : src1 = src2*src3 - src1 -def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))), - (VFNMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>; - -} // HasFMA3 - -//------------------------------ -// SCALAR -//------------------------------ let Constraints = "$src1 = $dst" in { multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop, @@ -328,62 +172,6 @@ defm VFNMSUBSD : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", "sd", f64mem, FR64, int_x86_fma4_vfnmsub_sd>, VEX_W, VEX_LIG; -let Predicates = [HasFMA3], AddedComplexity = 20 in { - -//------------ -// FP scalar ADD -//------------ - - -// FMADD231 : src1 = src2*src3 + src1 -def : Pat<(f32 (fadd (fmul FR32:$src2, FR32:$src3), FR32:$src1)), - (VFMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>; - -def : Pat<(f32 (fadd (fmul FR32:$src2, (loadf32 addr:$src3)), FR32:$src1)), - (VFMADDSSr231m FR32:$src1, FR32:$src2, addr:$src3)>; - -def : Pat<(f64 (fadd (fmul FR64:$src2, FR64:$src3), FR64:$src1)), - (VFMADDSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>; - -def : Pat<(f64 (fadd (fmul FR64:$src2, (loadf64 addr:$src3)), FR64:$src1)), - (VFMADDSDr231m FR64:$src1, FR64:$src2, addr:$src3)>; - - - -//------------ -// FP scalar SUB src2*src3 - src1 -//------------ - -def : Pat<(f32 (fsub (fmul FR32:$src2, FR32:$src3), FR32:$src1)), - (VFMSUBSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>; - -def : Pat<(f32 (fsub (fmul FR32:$src2, (loadf32 addr:$src3)), FR32:$src1)), - (VFMSUBSSr231m FR32:$src1, FR32:$src2, addr:$src3)>; - -def : Pat<(f64 (fsub (fmul FR64:$src2, FR64:$src3), FR64:$src1)), - (VFMSUBSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>; - -def : Pat<(f64 (fsub (fmul FR64:$src2, (loadf64 addr:$src3)), FR64:$src1)), - (VFMSUBSDr231m FR64:$src1, FR64:$src2, addr:$src3)>; - -//------------ -// FP scalar NADD src1 - src2*src3 -//------------ - -def : Pat<(f32 (fsub FR32:$src1, (fmul FR32:$src2, FR32:$src3))), - (VFNMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>; - -def : Pat<(f32 (fsub FR32:$src1, (fmul FR32:$src2, (loadf32 addr:$src3)))), - (VFNMADDSSr231m FR32:$src1, FR32:$src2, addr:$src3)>; - -def : Pat<(f64 (fsub FR64:$src1, (fmul FR64:$src2, FR64:$src3))), - (VFNMADDSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>; - -def : Pat<(f64 (fsub FR64:$src1, (fmul FR64:$src2, (loadf64 addr:$src3)))), - (VFNMADDSDr231m FR64:$src1, FR64:$src2, addr:$src3)>; - -} // HasFMA3 - //===----------------------------------------------------------------------===// // FMA4 - AMD 4 operand Fused Multiply-Add instructions //===----------------------------------------------------------------------===// |