aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp24
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp2
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp4
-rw-r--r--lib/Target/Alpha/AlphaISelLowering.cpp14
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.cpp18
-rw-r--r--lib/Target/IA64/IA64ISelLowering.cpp12
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp6
-rw-r--r--lib/Target/PIC16/PIC16ISelLowering.cpp6
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp4
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp4
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp2
12 files changed, 51 insertions, 51 deletions
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index c774c6878d..f546ed4467 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -1724,7 +1724,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
BitWidth - EVT.getSizeInBits())) &&
((!AfterLegalize && !LN0->isVolatile()) ||
- TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
+ TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT))) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), EVT,
@@ -1746,7 +1746,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
BitWidth - EVT.getSizeInBits())) &&
((!AfterLegalize && !LN0->isVolatile()) ||
- TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
+ TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT))) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), EVT,
@@ -1775,7 +1775,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// Do not generate loads of non-round integer types since these can
// be expensive (and would be wrong if the type is not byte sized).
if (EVT != MVT::Other && LoadedVT.bitsGT(EVT) && EVT.isRound() &&
- (!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
+ (!AfterLegalize || TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT))) {
MVT PtrType = N0.getOperand(1).getValueType();
// For big endian targets, we need to add an offset to the pointer to
// load the correct bytes. For little endian systems, we merely need to
@@ -2858,7 +2858,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
// fold (sext (load x)) -> (sext (truncate (sextload x)))
if (ISD::isNON_EXTLoad(N0.getNode()) &&
((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
- TLI.isLoadXLegal(ISD::SEXTLOAD, N0.getValueType()))) {
+ TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()))) {
bool DoXform = true;
SmallVector<SDNode*, 4> SetCCs;
if (!N0.hasOneUse())
@@ -2900,7 +2900,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
MVT EVT = LN0->getMemoryVT();
if ((!AfterLegalize && !LN0->isVolatile()) ||
- TLI.isLoadXLegal(ISD::SEXTLOAD, EVT)) {
+ TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT)) {
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), EVT,
@@ -2984,7 +2984,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
// fold (zext (load x)) -> (zext (truncate (zextload x)))
if (ISD::isNON_EXTLoad(N0.getNode()) &&
((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
- TLI.isLoadXLegal(ISD::ZEXTLOAD, N0.getValueType()))) {
+ TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()))) {
bool DoXform = true;
SmallVector<SDNode*, 4> SetCCs;
if (!N0.hasOneUse())
@@ -3026,7 +3026,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
MVT EVT = LN0->getMemoryVT();
if ((!AfterLegalize && !LN0->isVolatile()) ||
- TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT)) {
+ TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT)) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), EVT,
@@ -3106,7 +3106,7 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
// fold (aext (load x)) -> (aext (truncate (extload x)))
if (ISD::isNON_EXTLoad(N0.getNode()) && N0.hasOneUse() &&
((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
- TLI.isLoadXLegal(ISD::EXTLOAD, N0.getValueType()))) {
+ TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
@@ -3212,7 +3212,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
if (Opc == ISD::SIGN_EXTEND_INREG) {
ExtType = ISD::SEXTLOAD;
EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
- if (AfterLegalize && !TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))
+ if (AfterLegalize && !TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))
return SDValue();
}
@@ -3345,7 +3345,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
ISD::isUNINDEXEDLoad(N0.getNode()) &&
EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
- TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))) {
+ TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
@@ -3361,7 +3361,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
N0.hasOneUse() &&
EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
- TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))) {
+ TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
@@ -4043,7 +4043,7 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
// fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
if (ISD::isNON_EXTLoad(N0.getNode()) && N0.hasOneUse() &&
((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
- TLI.isLoadXLegal(ISD::EXTLOAD, N0.getValueType()))) {
+ TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 6e7aa1964a..9fa0628029 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -456,7 +456,7 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
if (CFP->isValueValidForType(SVT, CFP->getValueAPF()) &&
// Only do this if the target has a native EXTLOAD instruction from
// smaller type.
- TLI.isLoadXLegal(ISD::EXTLOAD, SVT) &&
+ TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) &&
TLI.ShouldShrinkFPConstant(OrigVT)) {
const Type *SType = SVT.getTypeForMVT();
LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType));
@@ -1981,7 +1981,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// nice to have an effective generic way of getting these benefits...
// Until such a way is found, don't insist on promoting i1 here.
(SrcVT != MVT::i1 ||
- TLI.getLoadXAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
+ TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
// Promote to a byte-sized load if not loading an integral number of
// bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
unsigned NewWidth = SrcVT.getStoreSizeInBits();
@@ -2086,7 +2086,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Tmp1 = LegalizeOp(Result);
Tmp2 = LegalizeOp(Ch);
} else {
- switch (TLI.getLoadXAction(ExtType, SrcVT)) {
+ switch (TLI.getLoadExtAction(ExtType, SrcVT)) {
default: assert(0 && "This action is not supported yet!");
case TargetLowering::Custom:
isCustom = true;
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 05edb6348d..d60ae2eacf 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -402,7 +402,7 @@ TargetLowering::TargetLowering(TargetMachine &tm)
"Fixed size array in TargetLowering is not large enough!");
// All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions));
- memset(LoadXActions, 0, sizeof(LoadXActions));
+ memset(LoadExtActions, 0, sizeof(LoadExtActions));
memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
memset(ConvertActions, 0, sizeof(ConvertActions));
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index e70bb0bd96..14c4526d1a 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -131,10 +131,10 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
computeRegisterProperties();
// ARM does not have f32 extending load.
- setLoadXAction(ISD::EXTLOAD, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
// ARM does not have i1 sign extending load.
- setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
// ARM supports all 4 flavors of integer indexed load / store.
for (unsigned im = (unsigned)ISD::PRE_INC;
diff --git a/lib/Target/Alpha/AlphaISelLowering.cpp b/lib/Target/Alpha/AlphaISelLowering.cpp
index 8ec3fb5d9b..33445f0fa4 100644
--- a/lib/Target/Alpha/AlphaISelLowering.cpp
+++ b/lib/Target/Alpha/AlphaISelLowering.cpp
@@ -52,15 +52,15 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM) : TargetLowering(TM)
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
- setLoadXAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadXAction(ISD::EXTLOAD, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
- setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadXAction(ISD::ZEXTLOAD, MVT::i32, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Expand);
- setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
- setLoadXAction(ISD::SEXTLOAD, MVT::i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
// setOperationAction(ISD::BRIND, MVT::Other, Expand);
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp
index 33cb9e6d46..086a25f1d8 100644
--- a/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -131,27 +131,27 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
// SPU has no sign or zero extended loads for i1, i8, i16:
- setLoadXAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
setTruncStoreAction(MVT::i8, MVT::i1, Custom);
setTruncStoreAction(MVT::i16, MVT::i1, Custom);
setTruncStoreAction(MVT::i32, MVT::i1, Custom);
setTruncStoreAction(MVT::i64, MVT::i1, Custom);
setTruncStoreAction(MVT::i128, MVT::i1, Custom);
- setLoadXAction(ISD::EXTLOAD, MVT::i8, Custom);
- setLoadXAction(ISD::SEXTLOAD, MVT::i8, Custom);
- setLoadXAction(ISD::ZEXTLOAD, MVT::i8, Custom);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom);
setTruncStoreAction(MVT::i8 , MVT::i8, Custom);
setTruncStoreAction(MVT::i16 , MVT::i8, Custom);
setTruncStoreAction(MVT::i32 , MVT::i8, Custom);
setTruncStoreAction(MVT::i64 , MVT::i8, Custom);
setTruncStoreAction(MVT::i128, MVT::i8, Custom);
- setLoadXAction(ISD::EXTLOAD, MVT::i16, Custom);
- setLoadXAction(ISD::SEXTLOAD, MVT::i16, Custom);
- setLoadXAction(ISD::ZEXTLOAD, MVT::i16, Custom);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i16, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Custom);
// SPU constant load actions are custom lowered:
setOperationAction(ISD::Constant, MVT::i64, Custom);
diff --git a/lib/Target/IA64/IA64ISelLowering.cpp b/lib/Target/IA64/IA64ISelLowering.cpp
index 3959dc6f74..11582719c8 100644
--- a/lib/Target/IA64/IA64ISelLowering.cpp
+++ b/lib/Target/IA64/IA64ISelLowering.cpp
@@ -35,14 +35,14 @@ IA64TargetLowering::IA64TargetLowering(TargetMachine &TM)
// register class for predicate registers
addRegisterClass(MVT::i1, IA64::PRRegisterClass);
- setLoadXAction(ISD::EXTLOAD , MVT::i1 , Promote);
+ setLoadExtAction(ISD::EXTLOAD , MVT::i1 , Promote);
- setLoadXAction(ISD::ZEXTLOAD , MVT::i1 , Promote);
+ setLoadExtAction(ISD::ZEXTLOAD , MVT::i1 , Promote);
- setLoadXAction(ISD::SEXTLOAD , MVT::i1 , Promote);
- setLoadXAction(ISD::SEXTLOAD , MVT::i8 , Expand);
- setLoadXAction(ISD::SEXTLOAD , MVT::i16 , Expand);
- setLoadXAction(ISD::SEXTLOAD , MVT::i32 , Expand);
+ setLoadExtAction(ISD::SEXTLOAD , MVT::i1 , Promote);
+ setLoadExtAction(ISD::SEXTLOAD , MVT::i8 , Expand);
+ setLoadExtAction(ISD::SEXTLOAD , MVT::i16 , Expand);
+ setLoadExtAction(ISD::SEXTLOAD , MVT::i32 , Expand);
setOperationAction(ISD::BRIND , MVT::Other, Expand);
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index dd6b432dca..6dc237aad6 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -82,9 +82,9 @@ MipsTargetLowering(MipsTargetMachine &TM): TargetLowering(TM)
addLegalFPImmediate(APFloat(+0.0f));
// Load extented operations for i1 types must be promoted
- setLoadXAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
// Used by legalize types to correctly generate the setcc result.
// Without this, every float setcc comes with a AND/OR with the result,
diff --git a/lib/Target/PIC16/PIC16ISelLowering.cpp b/lib/Target/PIC16/PIC16ISelLowering.cpp
index cdcc2527a2..ace3170dfa 100644
--- a/lib/Target/PIC16/PIC16ISelLowering.cpp
+++ b/lib/Target/PIC16/PIC16ISelLowering.cpp
@@ -60,9 +60,9 @@ PIC16TargetLowering(PIC16TargetMachine &TM): TargetLowering(TM)
addRegisterClass(MVT::i16, PIC16::PTRRegsRegisterClass);
// Load extented operations for i1 types must be promoted .
- setLoadXAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
setOperationAction(ISD::ADD, MVT::i1, Promote);
setOperationAction(ISD::ADD, MVT::i8, Legal);
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 0cb77c014e..553ca107ec 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -53,8 +53,8 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
// PowerPC has an i16 but no i8 (or i1) SEXTLOAD
- setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index dc35424804..e81688e4e8 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -515,9 +515,9 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
addRegisterClass(MVT::f64, SP::DFPRegsRegisterClass);
// Turn FP extload into load/fextend
- setLoadXAction(ISD::EXTLOAD, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
// Sparc doesn't have i1 sign extending load
- setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
// Turn FP truncstore into trunc + store.
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 6d3d0b0554..19b94dfe09 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -84,7 +84,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
if (Subtarget->is64Bit())
addRegisterClass(MVT::i64, X86::GR64RegisterClass);
- setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
// We don't accept any truncstore of integer registers.
setTruncStoreAction(MVT::i64, MVT::i32, Expand);