aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/Target/TargetLowering.h17
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp10
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp4
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp24
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp51
-rw-r--r--lib/Target/ARM/ARMISelLowering.h7
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp56
-rw-r--r--lib/Target/X86/X86ISelLowering.h7
8 files changed, 65 insertions, 111 deletions
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 2cdc05096b..1cef274c34 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -935,16 +935,6 @@ public:
const SelectionDAG &DAG,
unsigned Depth = 0) const;
- /// computeMaskedBitsForAnyExtend - Since each target implement ANY_EXTEND
- /// and ExtLoad nodes specifically, let the target determine which of the bits
- /// specified in Mask are known to be either zero or one and return them in
- /// the KnownZero/KnownOne bitsets.
- virtual void computeMaskedBitsForAnyExtend(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const;
-
/// ComputeNumSignBitsForTargetNode - This method can be implemented by
/// targets that want to expose additional information about sign bits to the
/// DAG Combiner.
@@ -1723,6 +1713,13 @@ public:
return false;
}
+ /// isZExtFree - Return true if zero-extending the specific node Val to type
+ /// VT2 is free (either because it's implicitly zero-extended such as ARM
+ /// ldrb / ldrh or because it's folded such as X86 zero-extending loads).
+ virtual bool isZExtFree(SDValue Val, EVT VT2) const {
+ return isZExtFree(Val.getValueType(), VT2);
+ }
+
/// isFNegFree - Return true if an fneg operation is free to the point where
/// it is never worthwhile to replace it with a bitwise operation.
virtual bool isFNegFree(EVT) const {
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 631449ca79..7dd57d5420 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -1930,8 +1930,6 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
} else if (const MDNode *Ranges = LD->getRanges()) {
computeMaskedBitsLoad(*Ranges, KnownZero);
- } else if (ISD::isEXTLoad(Op.getNode())) {
- TLI.computeMaskedBitsForAnyExtend(Op, KnownZero, KnownOne, *this, Depth);
}
return;
}
@@ -1974,7 +1972,13 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
return;
}
case ISD::ANY_EXTEND: {
- TLI.computeMaskedBitsForAnyExtend(Op, KnownZero, KnownOne, *this, Depth);
+ EVT InVT = Op.getOperand(0).getValueType();
+ unsigned InBits = InVT.getScalarType().getSizeInBits();
+ KnownZero = KnownZero.trunc(InBits);
+ KnownOne = KnownOne.trunc(InBits);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ KnownZero = KnownZero.zext(BitWidth);
+ KnownOne = KnownOne.zext(BitWidth);
return;
}
case ISD::TRUNCATE: {
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 8f1701450b..97d975ecb7 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -769,9 +769,11 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
EVT ValueVT = ValueVTs[Value];
unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
EVT RegisterVT = RegVTs[Value];
+ ISD::NodeType ExtendKind =
+ TLI.isZExtFree(Val, RegisterVT)? ISD::ZERO_EXTEND: ISD::ANY_EXTEND;
getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
- &Parts[Part], NumParts, RegisterVT, V);
+ &Parts[Part], NumParts, RegisterVT, V, ExtendKind);
Part += NumParts;
}
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index b410988dbd..931c569d42 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -1856,30 +1856,6 @@ void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
}
-void TargetLowering::computeMaskedBitsForAnyExtend(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
- unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
- if (Op.getOpcode() == ISD::ANY_EXTEND) {
- EVT InVT = Op.getOperand(0).getValueType();
- unsigned InBits = InVT.getScalarType().getSizeInBits();
- KnownZero = KnownZero.trunc(InBits);
- KnownOne = KnownOne.trunc(InBits);
- DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
- KnownZero = KnownZero.zext(BitWidth);
- KnownOne = KnownOne.zext(BitWidth);
- return;
- } else if (ISD::isEXTLoad(Op.getNode())) {
- KnownZero = KnownOne = APInt(BitWidth, 0);
- return;
- }
-
- assert(0 && "Expecting an ANY_EXTEND or extload!");
-}
-
-
/// ComputeNumSignBitsForTargetNode - This method can be implemented by
/// targets that want to expose additional information about sign bits to the
/// DAG Combiner.
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index c0a785338d..32235b9d0c 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -9462,6 +9462,27 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
return MVT::Other;
}
+bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
+ if (Val.getOpcode() != ISD::LOAD)
+ return false;
+
+ EVT VT1 = Val.getValueType();
+ if (!VT1.isSimple() || !VT1.isInteger() ||
+ !VT2.isSimple() || !VT2.isInteger())
+ return false;
+
+ switch (VT1.getSimpleVT().SimpleTy) {
+ default: break;
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
+ return true;
+ }
+
+ return false;
+}
+
static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
if (V < 0)
return false;
@@ -9878,36 +9899,6 @@ void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
}
}
-void ARMTargetLowering::computeMaskedBitsForAnyExtend(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
- unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
- if (Op.getOpcode() == ISD::ANY_EXTEND) {
- // Implemented as a zero_extend.
- EVT InVT = Op.getOperand(0).getValueType();
- unsigned InBits = InVT.getScalarType().getSizeInBits();
- KnownZero = KnownZero.trunc(InBits);
- KnownOne = KnownOne.trunc(InBits);
- DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
- KnownZero = KnownZero.zext(BitWidth);
- KnownOne = KnownOne.zext(BitWidth);
- APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
- KnownZero |= NewBits;
- return;
- } else if (ISD::isEXTLoad(Op.getNode())) {
- // Implemented as zextloads.
- LoadSDNode *LD = cast<LoadSDNode>(Op);
- EVT VT = LD->getMemoryVT();
- unsigned MemBits = VT.getScalarType().getSizeInBits();
- KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
- return;
- }
-
- assert(0 && "Expecting an ANY_EXTEND or extload!");
-}
-
//===----------------------------------------------------------------------===//
// ARM Inline Assembly Support
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index bde2ad4924..8f7b593cbf 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -294,6 +294,8 @@ namespace llvm {
bool MemcpyStrSrc,
MachineFunction &MF) const;
+ virtual bool isZExtFree(SDValue Val, EVT VT2) const;
+
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
@@ -333,11 +335,6 @@ namespace llvm {
const SelectionDAG &DAG,
unsigned Depth) const;
- virtual void computeMaskedBitsForAnyExtend(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const;
virtual bool ExpandInlineAsm(CallInst *CI) const;
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 4ed8d1bb3c..e59f619370 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -12142,6 +12142,30 @@ bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
}
+bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
+ EVT VT1 = Val.getValueType();
+ if (isZExtFree(VT1, VT2))
+ return true;
+
+ if (Val.getOpcode() != ISD::LOAD)
+ return false;
+
+ if (!VT1.isSimple() || !VT1.isInteger() ||
+ !VT2.isSimple() || !VT2.isInteger())
+ return false;
+
+ switch (VT1.getSimpleVT().SimpleTy) {
+ default: break;
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ // X86 has 8, 16, and 32-bit zero-extending loads.
+ return true;
+ }
+
+ return false;
+}
+
bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
// i16 instructions are longer (0x66 prefix) and potentially slower.
return !(VT1 == MVT::i32 && VT2 == MVT::i16);
@@ -14093,38 +14117,6 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
}
}
-void X86TargetLowering::computeMaskedBitsForAnyExtend(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
- unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
- if (Op.getOpcode() == ISD::ANY_EXTEND) {
- // Implemented as a zero_extend except for i16 -> i32
- EVT InVT = Op.getOperand(0).getValueType();
- unsigned InBits = InVT.getScalarType().getSizeInBits();
- KnownZero = KnownZero.trunc(InBits);
- KnownOne = KnownOne.trunc(InBits);
- DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
- KnownZero = KnownZero.zext(BitWidth);
- KnownOne = KnownOne.zext(BitWidth);
- if (BitWidth != 32 || InBits != 16) {
- APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
- KnownZero |= NewBits;
- }
- return;
- } else if (ISD::isEXTLoad(Op.getNode())) {
- // Implemented as zextloads or implicitly zero-extended (i32 -> i64)
- LoadSDNode *LD = cast<LoadSDNode>(Op);
- EVT VT = LD->getMemoryVT();
- unsigned MemBits = VT.getScalarType().getSizeInBits();
- KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
- return;
- }
-
- assert(0 && "Expecting an ANY_EXTEND or extload!");
-}
-
unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
unsigned Depth) const {
// SETCC_CARRY sets the dest to ~0 for true or 0 for false.
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 099f1d884e..1042fe13ec 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -558,12 +558,6 @@ namespace llvm {
const SelectionDAG &DAG,
unsigned Depth = 0) const;
- virtual void computeMaskedBitsForAnyExtend(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const;
-
// ComputeNumSignBitsForTargetNode - Determine the number of bits in the
// operation that are sign bits.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
@@ -634,6 +628,7 @@ namespace llvm {
/// result out to 64 bits.
virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
virtual bool isZExtFree(EVT VT1, EVT VT2) const;
+ virtual bool isZExtFree(SDValue Val, EVT VT2) const;
/// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
/// a pair of mul and add instructions. fmuladd intrinsics will be expanded to