aboutsummaryrefslogtreecommitdiff
path: root/lib/Target
diff options
context:
space:
mode:
authorWesley Peck <peckw@wesleypeck.com>2010-11-23 03:31:01 +0000
committerWesley Peck <peckw@wesleypeck.com>2010-11-23 03:31:01 +0000
commitbf17cfa3f904e488e898ac2e3af706fd1a892f08 (patch)
treef48d5532ddc3b52d2b73c4d8e1c4d78d8f59707f /lib/Target
parent5400570097a25f05694d451afed7f949281b789e (diff)
Renaming ISD::BIT_CONVERT to ISD::BITCAST to better reflect the LLVM IR concept.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@119990 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp2
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp102
-rw-r--r--lib/Target/Alpha/AlphaISelLowering.cpp14
-rw-r--r--lib/Target/CellSPU/SPUISelDAGToDAG.cpp84
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.cpp184
-rw-r--r--lib/Target/MBlaze/MBlazeISelLowering.cpp8
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp282
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp179
-rw-r--r--lib/Target/PowerPC/README.txt4
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp38
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp4
-rw-r--r--lib/Target/X86/X86FastISel.cpp152
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp154
-rw-r--r--lib/Target/X86/X86ISelLowering.h2
14 files changed, 604 insertions, 605 deletions
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 18b534846b..a0e6bc812e 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -1519,7 +1519,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
break;
}
case CCValAssign::BCvt: {
- unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BIT_CONVERT, Arg,
+ unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
/*TODO: Kill=*/false);
assert(BC != 0 && "Failed to emit a bitcast!");
Arg = BC;
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 1c7a1935af..37d7fccbd7 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -238,7 +238,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setLibcallName(RTLIB::SRA_I128, 0);
if (Subtarget->isAAPCS_ABI()) {
- // Double-precision floating-point arithmetic helper functions
+ // Double-precision floating-point arithmetic helper functions
// RTABI chapter 4.1.2, Table 2
setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd");
setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv");
@@ -338,7 +338,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f");
setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d");
setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS);
// Integer to floating-point conversions.
// RTABI chapter 4.1.2, Table 8
@@ -387,7 +387,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS);
setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS);
setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS);
}
if (Subtarget->isThumb1Only())
@@ -609,7 +609,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
// Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
// iff target supports vfp2.
- setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom);
+ setOperationAction(ISD::BITCAST, MVT::i64, Custom);
setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
}
@@ -1061,7 +1061,7 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::BCvt:
- Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val);
+ Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
break;
}
@@ -1209,7 +1209,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
break;
case CCValAssign::BCvt:
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
+ Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
break;
}
@@ -1666,7 +1666,7 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::BCvt:
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
+ Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
break;
}
@@ -2223,7 +2223,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::BCvt:
- ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
+ ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
break;
case CCValAssign::SExt:
ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
@@ -2689,7 +2689,7 @@ static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
break;
}
Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
}
static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
@@ -2708,7 +2708,7 @@ static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
break;
}
- Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
+ Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
return DAG.getNode(Opc, dl, VT, Op);
}
@@ -2765,12 +2765,12 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
return FrameAddr;
}
-/// ExpandBIT_CONVERT - If the target supports VFP, this function is called to
+/// ExpandBITCAST - If the target supports VFP, this function is called to
/// expand a bit convert where either the source or destination type is i64 to
/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64
/// operand type is illegal (e.g., v2f32 for a target that doesn't support
/// vectors), since the legalizer won't know what to do with that.
-static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
+static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
DebugLoc dl = N->getDebugLoc();
SDValue Op = N->getOperand(0);
@@ -2780,7 +2780,7 @@ static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
EVT SrcVT = Op.getValueType();
EVT DstVT = N->getValueType(0);
assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
- "ExpandBIT_CONVERT called for non-i64 type");
+ "ExpandBITCAST called for non-i64 type");
// Turn i64->f64 into VMOVDRR.
if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
@@ -2788,7 +2788,7 @@ static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
DAG.getConstant(0, MVT::i32));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
DAG.getConstant(1, MVT::i32));
- return DAG.getNode(ISD::BIT_CONVERT, dl, DstVT,
+ return DAG.getNode(ISD::BITCAST, dl, DstVT,
DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
}
@@ -2815,7 +2815,7 @@ static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
}
/// LowerShiftRightParts - Lower SRA_PARTS, which returns two
@@ -3068,13 +3068,13 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
AndOp = Op1;
// Ignore bitconvert.
- if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT)
+ if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
AndOp = AndOp.getOperand(0);
if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
Opc = ARMISD::VTST;
- Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0));
- Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1));
+ Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0));
+ Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1));
Invert = !Invert;
}
}
@@ -3095,7 +3095,7 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
Opc = ARMISD::VCLTZ;
SingleOp = Op1;
}
-
+
SDValue Result;
if (SingleOp.getNode()) {
switch (Opc) {
@@ -3499,7 +3499,7 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
VMOVModImm);
if (Val.getNode()) {
SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
}
// Try an immediate VMVN.
@@ -3507,11 +3507,11 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
((1LL << SplatBitSize) - 1));
Val = isNEONModifiedImm(NegatedImm,
SplatUndef.getZExtValue(), SplatBitSize,
- DAG, VmovVT, VT.is128BitVector(),
+ DAG, VmovVT, VT.is128BitVector(),
VMVNModImm);
if (Val.getNode()) {
SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
}
}
}
@@ -3553,13 +3553,13 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
if (VT.getVectorElementType().isFloatingPoint()) {
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i)
- Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32,
+ Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
Op.getOperand(i)));
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts);
Val = LowerBUILD_VECTOR(Val, DAG, ST);
if (Val.getNode())
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Val);
}
SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
if (Val.getNode())
@@ -3582,9 +3582,9 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i)
- Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, EltVT, Op.getOperand(i)));
+ Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Val);
}
return SDValue();
@@ -3805,8 +3805,8 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
// registers are defined to use, and since i64 is not legal.
EVT EltVT = EVT::getFloatingPointVT(EltSize);
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
- V1 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V1);
- V2 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V2);
+ V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
+ V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i) {
if (ShuffleMask[i] < 0)
@@ -3818,7 +3818,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
MVT::i32)));
}
SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Val);
}
return SDValue();
@@ -3851,13 +3851,13 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
SDValue Op1 = Op.getOperand(1);
if (Op0.getOpcode() != ISD::UNDEF)
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0),
+ DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
DAG.getIntPtrConstant(0));
if (Op1.getOpcode() != ISD::UNDEF)
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1),
+ DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
DAG.getIntPtrConstant(1));
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val);
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
}
/// SkipExtension - For a node that is either a SIGN_EXTEND, ZERO_EXTEND, or
@@ -3933,7 +3933,7 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
Subtarget);
- case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(Op.getNode(), DAG);
+ case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG);
case ISD::SHL:
case ISD::SRL:
case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget);
@@ -3962,8 +3962,8 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
default:
llvm_unreachable("Don't know how to custom expand this!");
break;
- case ISD::BIT_CONVERT:
- Res = ExpandBIT_CONVERT(N, DAG);
+ case ISD::BITCAST:
+ Res = ExpandBITCAST(N, DAG);
break;
case ISD::SRL:
case ISD::SRA:
@@ -4497,7 +4497,7 @@ static SDValue PerformANDCombine(SDNode *N,
DebugLoc dl = N->getDebugLoc();
EVT VT = N->getValueType(0);
SelectionDAG &DAG = DCI.DAG;
-
+
APInt SplatBits, SplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
@@ -4507,17 +4507,17 @@ static SDValue PerformANDCombine(SDNode *N,
EVT VbicVT;
SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
SplatUndef.getZExtValue(), SplatBitSize,
- DAG, VbicVT, VT.is128BitVector(),
+ DAG, VbicVT, VT.is128BitVector(),
OtherModImm);
if (Val.getNode()) {
SDValue Input =
- DAG.getNode(ISD::BIT_CONVERT, dl, VbicVT, N->getOperand(0));
+ DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vbic);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
}
}
}
-
+
return SDValue();
}
@@ -4530,7 +4530,7 @@ static SDValue PerformORCombine(SDNode *N,
DebugLoc dl = N->getDebugLoc();
EVT VT = N->getValueType(0);
SelectionDAG &DAG = DCI.DAG;
-
+
APInt SplatBits, SplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
@@ -4544,9 +4544,9 @@ static SDValue PerformORCombine(SDNode *N,
OtherModImm);
if (Val.getNode()) {
SDValue Input =
- DAG.getNode(ISD::BIT_CONVERT, dl, VorrVT, N->getOperand(0));
+ DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vorr);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
}
}
}
@@ -4640,7 +4640,7 @@ static SDValue PerformORCombine(SDNode *N,
DCI.CombineTo(N, Res, false);
}
}
-
+
return SDValue();
}
@@ -4661,14 +4661,14 @@ static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
// N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
- if (Op0.getOpcode() == ISD::BIT_CONVERT)
+ if (Op0.getOpcode() == ISD::BITCAST)
Op0 = Op0.getOperand(0);
- if (Op1.getOpcode() == ISD::BIT_CONVERT)
+ if (Op1.getOpcode() == ISD::BITCAST)
Op1 = Op1.getOperand(0);
if (Op0.getOpcode() == ARMISD::VMOVRRD &&
Op0.getNode() == Op1.getNode() &&
Op0.getResNo() == 0 && Op1.getResNo() == 1)
- return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
+ return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
N->getValueType(0), Op0.getOperand(0));
return SDValue();
}
@@ -4748,7 +4748,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
// Ignore bit_converts.
- while (Op.getOpcode() == ISD::BIT_CONVERT)
+ while (Op.getOpcode() == ISD::BITCAST)
Op = Op.getOperand(0);
if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
return SDValue();
@@ -4763,7 +4763,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N, SelectionDAG &DAG) {
if (EltSize > VT.getVectorElementType().getSizeInBits())
return SDValue();
- return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op);
+ return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
}
/// getVShiftImm - Check if this is a valid build_vector for the immediate
@@ -4771,7 +4771,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N, SelectionDAG &DAG) {
/// build_vector must have the same constant integer value.
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
// Ignore bit_converts.
- while (Op.getOpcode() == ISD::BIT_CONVERT)
+ while (Op.getOpcode() == ISD::BITCAST)
Op = Op.getOperand(0);
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
APInt SplatBits, SplatUndef;
@@ -5935,7 +5935,7 @@ bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
return false;
}
-/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
+/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
/// specified in the intrinsic calls.
bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
diff --git a/lib/Target/Alpha/AlphaISelLowering.cpp b/lib/Target/Alpha/AlphaISelLowering.cpp
index 9ae06eab75..e266eda743 100644
--- a/lib/Target/Alpha/AlphaISelLowering.cpp
+++ b/lib/Target/Alpha/AlphaISelLowering.cpp
@@ -125,7 +125,7 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM)
setOperationAction(ISD::SETCC, MVT::f32, Promote);
- setOperationAction(ISD::BIT_CONVERT, MVT::f32, Promote);
+ setOperationAction(ISD::BITCAST, MVT::f32, Promote);
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
@@ -616,7 +616,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
"Unhandled SINT_TO_FP type in custom expander!");
SDValue LD;
bool isDouble = Op.getValueType() == MVT::f64;
- LD = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op.getOperand(0));
+ LD = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0));
SDValue FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_, dl,
isDouble?MVT::f64:MVT::f32, LD);
return FP;
@@ -630,7 +630,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
src = DAG.getNode(AlphaISD::CVTTQ_, dl, MVT::f64, src);
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, src);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::i64, src);
}
case ISD::ConstantPool: {
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
@@ -648,11 +648,11 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
case ISD::GlobalAddress: {
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GSDN->getGlobal();
- SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i64,
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i64,
GSDN->getOffset());
// FIXME there isn't really any debug info here
- // if (!GV->hasWeakLinkage() && !GV->isDeclaration()
+ // if (!GV->hasWeakLinkage() && !GV->isDeclaration()
// && !GV->hasLinkOnceLinkage()) {
if (GV->hasLocalLinkage()) {
SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, MVT::i64, GA,
@@ -727,7 +727,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
SDValue Val = DAG.getLoad(getPointerTy(), dl, Chain, SrcP,
MachinePointerInfo(SrcS),
false, false, 0);
- SDValue Result = DAG.getStore(Val.getValue(1), dl, Val, DestP,
+ SDValue Result = DAG.getStore(Val.getValue(1), dl, Val, DestP,
MachinePointerInfo(DestS),
false, false, 0);
SDValue NP = DAG.getNode(ISD::ADD, dl, MVT::i64, SrcP,
@@ -779,7 +779,7 @@ void AlphaTargetLowering::ReplaceNodeResults(SDNode *N,
SDValue Chain, DataPtr;
LowerVAARG(N, Chain, DataPtr, DAG);
- SDValue Res = DAG.getLoad(N->getValueType(0), dl, Chain, DataPtr,
+ SDValue Res = DAG.getLoad(N->getValueType(0), dl, Chain, DataPtr,
MachinePointerInfo(),
false, false, 0);
Results.push_back(Res);
diff --git a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
index 38a13d1874..fe6523662f 100644
--- a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
+++ b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
@@ -213,7 +213,7 @@ namespace {
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
SDValue CGPoolOffset =
SPU::LowerConstantPool(CPIdx, *CurDAG, TM);
-
+
HandleSDNode Dummy(CurDAG->getLoad(vecVT, dl,
CurDAG->getEntryNode(), CGPoolOffset,
MachinePointerInfo::getConstantPool(),
@@ -308,9 +308,9 @@ namespace {
assert(II && "No InstrInfo?");
return new SPUHazardRecognizer(*II);
}
-
+
private:
- SDValue getRC( MVT );
+ SDValue getRC( MVT );
// Include the pieces autogenerated from the target description.
#include "SPUGenDAGISel.inc"
@@ -512,8 +512,8 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDNode *Op, SDValue N, SDValue &Base,
Base = CurDAG->getTargetConstant(0, N.getValueType());
Index = N;
return true;
- } else if (Opc == ISD::Register
- ||Opc == ISD::CopyFromReg
+ } else if (Opc == ISD::Register
+ ||Opc == ISD::CopyFromReg
||Opc == ISD::UNDEF
||Opc == ISD::Constant) {
unsigned OpOpc = Op->getOpcode();
@@ -574,7 +574,7 @@ SPUDAGToDAGISel::SelectXFormAddr(SDNode *Op, SDValue N, SDValue &Base,
}
/*!
- Utility function to use with COPY_TO_REGCLASS instructions. Returns a SDValue
+ Utility function to use with COPY_TO_REGCLASS instructions. Returns a SDValue
to be used as the last parameter of a
CurDAG->getMachineNode(COPY_TO_REGCLASS,..., ) function call
\arg VT the value type for which we want a register class
@@ -582,19 +582,19 @@ CurDAG->getMachineNode(COPY_TO_REGCLASS,..., ) function call
SDValue SPUDAGToDAGISel::getRC( MVT VT ) {
switch( VT.SimpleTy ) {
case MVT::i8:
- return CurDAG->getTargetConstant(SPU::R8CRegClass.getID(), MVT::i32);
- break;
+ return CurDAG->getTargetConstant(SPU::R8CRegClass.getID(), MVT::i32);
+ break;
case MVT::i16:
- return CurDAG->getTargetConstant(SPU::R16CRegClass.getID(), MVT::i32);
- break;
+ return CurDAG->getTargetConstant(SPU::R16CRegClass.getID(), MVT::i32);
+ break;
case MVT::i32:
- return CurDAG->getTargetConstant(SPU::R32CRegClass.getID(), MVT::i32);
- break;
+ return CurDAG->getTargetConstant(SPU::R32CRegClass.getID(), MVT::i32);
+ break;
case MVT::f32:
- return CurDAG->getTargetConstant(SPU::R32FPRegClass.getID(), MVT::i32);
- break;
+ return CurDAG->getTargetConstant(SPU::R32FPRegClass.getID(), MVT::i32);
+ break;
case MVT::i64:
- return CurDAG->getTargetConstant(SPU::R64CRegClass.getID(), MVT::i32);
+ return CurDAG->getTargetConstant(SPU::R64CRegClass.getID(), MVT::i32);
break;
case MVT::v16i8:
case MVT::v8i16:
@@ -602,7 +602,7 @@ SDValue SPUDAGToDAGISel::getRC( MVT VT ) {
case MVT::v4f32:
case MVT::v2i64:
case MVT::v2f64:
- return CurDAG->getTargetConstant(SPU::VECREGRegClass.getID(), MVT::i32);
+ return CurDAG->getTargetConstant(SPU::VECREGRegClass.getID(), MVT::i32);
break;
default:
assert( false && "add a new case here" );
@@ -654,7 +654,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
EVT Op0VT = Op0.getValueType();
EVT Op0VecVT = EVT::getVectorVT(*CurDAG->getContext(),
Op0VT, (128 / Op0VT.getSizeInBits()));
- EVT OpVecVT = EVT::getVectorVT(*CurDAG->getContext(),
+ EVT OpVecVT = EVT::getVectorVT(*CurDAG->getContext(),
OpVT, (128 / OpVT.getSizeInBits()));
SDValue shufMask;
@@ -688,19 +688,19 @@ SPUDAGToDAGISel::Select(SDNode *N) {
}
SDNode *shufMaskLoad = emitBuildVector(shufMask.getNode());
-
+
HandleSDNode PromoteScalar(CurDAG->getNode(SPUISD::PREFSLOT2VEC, dl,
Op0VecVT, Op0));
-
+
SDValue PromScalar;
if (SDNode *N = SelectCode(PromoteScalar.getValue().getNode()))
PromScalar = SDValue(N, 0);
else
PromScalar = PromoteScalar.getValue();
-
+
SDValue zextShuffle =
CurDAG->getNode(SPUISD::SHUFB, dl, OpVecVT,
- PromScalar, PromScalar,
+ PromScalar, PromScalar,
SDValue(shufMaskLoad, 0));
HandleSDNode Dummy2(zextShuffle);
@@ -710,7 +710,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
zextShuffle = Dummy2.getValue();
HandleSDNode Dummy(CurDAG->getNode(SPUISD::VEC2PREFSLOT, dl, OpVT,
zextShuffle));
-
+
CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode());
SelectCode(Dummy.getValue().getNode());
return Dummy.getValue().getNode();
@@ -721,7 +721,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
HandleSDNode Dummy(CurDAG->getNode(SPUISD::ADD64_MARKER, dl, OpVT,
N->getOperand(0), N->getOperand(1),
SDValue(CGLoad, 0)));
-
+
CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode());
if (SDNode *N = SelectCode(Dummy.getValue().getNode()))
return N;
@@ -733,7 +733,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
HandleSDNode Dummy(CurDAG->getNode(SPUISD::SUB64_MARKER, dl, OpVT,
N->getOperand(0), N->getOperand(1),
SDValue(CGLoad, 0)));
-
+
CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode());
if (SDNode *N = SelectCode(Dummy.getValue().getNode()))
return N;
@@ -847,12 +847,12 @@ SPUDAGToDAGISel::Select(SDNode *N) {
SDValue Arg = N->getOperand(0);
SDValue Chain = N->getOperand(1);
SDNode *Result;
-
+
Result = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, VT,
MVT::Other, Arg,
getRC( VT.getSimpleVT()), Chain);
return Result;
-
+
} else if (Opc == SPUISD::IndirectAddr) {
// Look at the operands: SelectCode() will catch the cases that aren't
// specifically handled here.
@@ -878,10 +878,10 @@ SPUDAGToDAGISel::Select(SDNode *N) {
NewOpc = SPU::AIr32;
Ops[1] = Op1;
} else {
- Ops[1] = SDValue(CurDAG->getMachineNode(SPU::ILr32, dl,
- N->getValueType(0),
+ Ops[1] = SDValue(CurDAG->getMachineNode(SPU::ILr32, dl,
+ N->getValueType(0),
Op1),
- 0);
+ 0);
}
}
Ops[0] = Op0;
@@ -913,7 +913,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
SDNode *
SPUDAGToDAGISel::SelectSHLi64(SDNode *N, EVT OpVT) {
SDValue Op0 = N->getOperand(0);
- EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
+ EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
OpVT, (128 / OpVT.getSizeInBits()));
SDValue ShiftAmt = N->getOperand(1);
EVT ShiftAmtVT = ShiftAmt.getValueType();
@@ -966,7 +966,7 @@ SPUDAGToDAGISel::SelectSHLi64(SDNode *N, EVT OpVT) {
SDValue(Shift, 0), SDValue(Bits, 0));
}
- return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
+ return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
OpVT, SDValue(Shift, 0), getRC(MVT::i64));
}
@@ -1035,7 +1035,7 @@ SPUDAGToDAGISel::SelectSRLi64(SDNode *N, EVT OpVT) {
SDValue(Shift, 0), SDValue(Bits, 0));
}
- return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
+ return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
OpVT, SDValue(Shift, 0), getRC(MVT::i64));
}
@@ -1050,14 +1050,14 @@ SPUDAGToDAGISel::SelectSRLi64(SDNode *N, EVT OpVT) {
SDNode *
SPUDAGToDAGISel::SelectSRAi64(SDNode *N, EVT OpVT) {
// Promote Op0 to vector
- EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
+ EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
OpVT, (128 / OpVT.getSizeInBits()));
SDValue ShiftAmt = N->getOperand(1);
EVT ShiftAmtVT = ShiftAmt.getValueType();
DebugLoc dl = N->getDebugLoc();
SDNode *VecOp0 =
- CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
+ CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
VecVT, N->getOperand(0), getRC(MVT::v2i64));
SDValue SignRotAmt = CurDAG->getTargetConstant(31, ShiftAmtVT);
@@ -1065,7 +1065,7 @@ SPUDAGToDAGISel::SelectSRAi64(SDNode *N, EVT OpVT) {
CurDAG->getMachineNode(SPU::ROTMAIv2i64_i32, dl, MVT::v2i64,
SDValue(VecOp0, 0), SignRotAmt);
SDNode *UpperHalfSign =
- CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
+ CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
MVT::i32, SDValue(SignRot, 0), getRC(MVT::i32));
SDNode *UpperHalfSignMask =
@@ -1113,7 +1113,7 @@ SPUDAGToDAGISel::SelectSRAi64(SDNode *N, EVT OpVT) {
SDValue(Shift, 0), SDValue(NegShift, 0));
}
- return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
+ return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
OpVT, SDValue(Shift, 0), getRC(MVT::i64));
}
@@ -1135,7 +1135,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
// Here's where it gets interesting, because we have to parse out the
// subtree handed back in i64vec:
- if (i64vec.getOpcode() == ISD::BIT_CONVERT) {
+ if (i64vec.getOpcode() == ISD::BITCAST) {
// The degenerate case where the upper and lower bits in the splat are
// identical:
SDValue Op0 = i64vec.getOperand(0);
@@ -1149,7 +1149,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
SDValue rhs = i64vec.getOperand(1);
SDValue shufmask = i64vec.getOperand(2);
- if (lhs.getOpcode() == ISD::BIT_CONVERT) {
+ if (lhs.getOpcode() == ISD::BITCAST) {
ReplaceUses(lhs, lhs.getOperand(0));
lhs = lhs.getOperand(0);
}
@@ -1158,7 +1158,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
? lhs.getNode()
: emitBuildVector(lhs.getNode()));
- if (rhs.getOpcode() == ISD::BIT_CONVERT) {
+ if (rhs.getOpcode() == ISD::BITCAST) {
ReplaceUses(rhs, rhs.getOperand(0));
rhs = rhs.getOperand(0);
}
@@ -1167,7 +1167,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
? rhs.getNode()
: emitBuildVector(rhs.getNode()));
- if (shufmask.getOpcode() == ISD::BIT_CONVERT) {
+ if (shufmask.getOpcode() == ISD::BITCAST) {
ReplaceUses(shufmask, shufmask.getOperand(0));
shufmask = shufmask.getOperand(0);
}
@@ -1183,8 +1183,8 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
HandleSDNode Dummy(shufNode);
SDNode *SN = SelectCode(Dummy.getValue().getNode());
if (SN == 0) SN = Dummy.getValue().getNode();
-
- return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
+
+ return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
OpVT, SDValue(SN, 0), getRC(MVT::i64));
} else if (i64vec.getOpcode() == ISD::BUILD_VECTOR) {
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, OpVT,
diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp
index 5a1e23a374..014fbbedb7 100644
--- a/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -45,9 +45,9 @@ namespace {
// Byte offset of the preferred slot (counted from the MSB)
int prefslotOffset(EVT VT) {
int retval=0;
- if (VT==MVT::i1) retval=3;
- if (VT==MVT::i8) retval=3;
- if (VT==MVT::i16) retval=2;
+ if (VT==MVT::i1) retval=3;
+ if (VT==MVT::i8) retval=3;
+ if (VT==MVT::i16) retval=2;
return retval;
}