aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2012-12-12 00:42:09 +0000
committerEvan Cheng <evan.cheng@apple.com>2012-12-12 00:42:09 +0000
commit61f4dfe3693bf68b20748d82ac4dd9bf2f356699 (patch)
tree47cd7aa5c257fa42db0fa94e4aa0c504592364c9 /lib
parentd0a0d221da55f5e2d97909991d77e7ab91e75426 (diff)
Avoid using lossy load / stores for memcpy / memset expansion. e.g.
f64 load / store on non-SSE2 x86 targets. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@169944 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp36
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp4
-rw-r--r--lib/Target/ARM/ARMISelLowering.h7
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp8
-rw-r--r--lib/Target/X86/X86ISelLowering.h7
5 files changed, 47 insertions, 15 deletions
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 4cb63ce9a6..36592e54f8 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -3474,27 +3474,33 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
unsigned VTSize = VT.getSizeInBits() / 8;
while (VTSize > Size) {
// For now, only use non-vector load / store's for the left-over pieces.
- EVT NewVT;
+ EVT NewVT = VT;
unsigned NewVTSize;
+
+ bool Found = false;
if (VT.isVector() || VT.isFloatingPoint()) {
NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
- while (!TLI.isOperationLegalOrCustom(ISD::STORE, NewVT)) {
- if (NewVT == MVT::i64 &&
- TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64)) {
- // i64 is usually not legal on 32-bit targets, but f64 may be.
- NewVT = MVT::f64;
- break;
- }
- NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
+ if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
+ TLI.isLegalMemOpType(NewVT.getSimpleVT()))
+ Found = true;
+ else if (NewVT == MVT::i64 &&
+ TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
+ TLI.isLegalMemOpType(MVT::f64)) {
+ // i64 is usually not legal on 32-bit targets, but f64 may be.
+ NewVT = MVT::f64;
+ Found = true;
}
- NewVTSize = NewVT.getSizeInBits() / 8;
- } else {
- // This can result in a type that is not legal on the target, e.g.
- // 1 or 2 bytes on PPC.
- NewVT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
- NewVTSize = VTSize >> 1;
}
+ if (!Found) {
+ do {
+ NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
+ if (NewVT == MVT::i8)
+ break;
+ } while (!TLI.isLegalMemOpType(NewVT.getSimpleVT()));
+ }
+ NewVTSize = NewVT.getSizeInBits() / 8;
+
// If the new VT cannot cover all of the remaining bits, then consider
// issuing a (or a pair of) unaligned and overlapping load / store.
// FIXME: Only does this for 64-bit or more since we don't have proper
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 88282c7331..de7159e474 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -9481,6 +9481,10 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
return MVT::Other;
}
+bool ARMTargetLowering::isLegalMemOpType(MVT VT) const {
+ return VT.isInteger() || VT == MVT::f64 || VT == MVT::v2f64;
+}
+
bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
if (Val.getOpcode() != ISD::LOAD)
return false;
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index 5a44201ec4..3e78ae3b2d 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -296,6 +296,13 @@ namespace llvm {
bool MemcpyStrSrc,
MachineFunction &MF) const;
+ /// isLegalMemOpType - Returns true if it's legal to use load / store of the
+ /// specified type to expand memcpy / memset inline. This is mostly true
+ /// for legal types except for some special cases. For example, on X86
+ /// targets without SSE2 f64 load / store are done with fldl / fstpl which
+ /// also does type conversion.
+ virtual bool isLegalMemOpType(MVT VT) const;
+
using TargetLowering::isZExtFree;
virtual bool isZExtFree(SDValue Val, EVT VT2) const;
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 90bee41e35..800c2012df 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1412,6 +1412,14 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size,
return MVT::i32;
}
+bool X86TargetLowering::isLegalMemOpType(MVT VT) const {
+ if (VT == MVT::f32)
+ return X86ScalarSSEf32;
+ else if (VT == MVT::f64)
+ return X86ScalarSSEf64;
+ return VT.isInteger();
+}
+
bool
X86TargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
if (Fast)
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index a515be23ef..9d22da1dd9 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -506,6 +506,13 @@ namespace llvm {
bool IsZeroVal, bool MemcpyStrSrc,
MachineFunction &MF) const;
+ /// isLegalMemOpType - Returns true if it's legal to use load / store of the
+ /// specified type to expand memcpy / memset inline. This is mostly true
+ /// for legal types except for some special cases. For example, on X86
+ /// targets without SSE2 f64 load / store are done with fldl / fstpl which
+ /// also does type conversion.
+ virtual bool isLegalMemOpType(MVT VT) const;
+
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
/// unaligned memory accesses. of the specified type. Returns whether it
/// is "fast" by reference in the second argument.