diff options
author | Arnold Schwaighofer <aschwaighofer@apple.com> | 2013-02-19 15:27:05 +0000 |
---|---|---|
committer | Arnold Schwaighofer <aschwaighofer@apple.com> | 2013-02-19 15:27:05 +0000 |
commit | 2e750c12e91ab09949ef1617ab3af14e1b6cd239 (patch) | |
tree | 57fb0f606bf5b772b6ad67016d2b51ad91dc417d | |
parent | e5839d0fc9999a3d53659354a3cceb838cb87711 (diff) |
ARM NEON: Merge a f32 bitcast of a v2i32 extractelt
A vectorized sitfp on doubles will get scalarized to a sequence of an
extract_element of <2 x i32>, a bitcast to f32 and a sitofp.
Due to the the extract_element, and the bitcast we will uneccessarily generate
moves between scalar and vector registers.
The patch fixes this by using a COPY_TO_REGCLASS and a EXTRACT_SUBREG to extract
the element from the vector instead.
radar://13191881
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@175520 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/Target/ARM/ARMInstrNEON.td | 6 | ||||
-rw-r--r-- | test/CodeGen/ARM/neon_fpconv.ll | 25 |
2 files changed, 31 insertions, 0 deletions
diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index 901ff64a86..9f68c22077 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -5745,6 +5745,12 @@ def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>; def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>; def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>; +// Fold extracting an element out of a v2i32 into a vfp register. +def : Pat<(f32 (bitconvert (i32 (extractelt (v2i32 DPR:$src), imm:$lane)))), + (f32 (EXTRACT_SUBREG + (v2f32 (COPY_TO_REGCLASS (v2i32 DPR:$src), DPR)), + (SSubReg_f32_reg imm:$lane)))>; + // Vector lengthening move with load, matching extending loads. // extload, zextload and sextload for a standard lengthening load. Example: diff --git a/test/CodeGen/ARM/neon_fpconv.ll b/test/CodeGen/ARM/neon_fpconv.ll index 1948ad8471..149f4c7770 100644 --- a/test/CodeGen/ARM/neon_fpconv.ll +++ b/test/CodeGen/ARM/neon_fpconv.ll @@ -15,3 +15,28 @@ define <2 x double> @vextend(<2 x float> %a) { ret <2 x double> %ve } +; We used to generate vmovs between scalar and vfp/neon registers. +; CHECK: vsitofp_double +define void @vsitofp_double(<2 x i32>* %loadaddr, + <2 x double>* %storeaddr) { + %v0 = load <2 x i32>* %loadaddr +; CHECK: vldr +; CHECK-NEXT: vcvt.f64.s32 +; CHECK-NEXT: vcvt.f64.s32 +; CHECK-NEXT: vst + %r = sitofp <2 x i32> %v0 to <2 x double> + store <2 x double> %r, <2 x double>* %storeaddr + ret void +} +; CHECK: vuitofp_double +define void @vuitofp_double(<2 x i32>* %loadaddr, + <2 x double>* %storeaddr) { + %v0 = load <2 x i32>* %loadaddr +; CHECK: vldr +; CHECK-NEXT: vcvt.f64.u32 +; CHECK-NEXT: vcvt.f64.u32 +; CHECK-NEXT: vst + %r = uitofp <2 x i32> %v0 to <2 x double> + store <2 x double> %r, <2 x double>* %storeaddr + ret void +} |