aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/CellSPU/SPUISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/CellSPU/SPUISelLowering.cpp')
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.cpp184
1 files changed, 92 insertions, 92 deletions
diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp
index 5a1e23a374..014fbbedb7 100644
--- a/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -45,9 +45,9 @@ namespace {
// Byte offset of the preferred slot (counted from the MSB)
int prefslotOffset(EVT VT) {
int retval=0;
- if (VT==MVT::i1) retval=3;
- if (VT==MVT::i8) retval=3;
- if (VT==MVT::i16) retval=2;
+ if (VT==MVT::i1) retval=3;
+ if (VT==MVT::i8) retval=3;
+ if (VT==MVT::i16) retval=2;
return retval;
}
@@ -348,10 +348,10 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
- setOperationAction(ISD::BIT_CONVERT, MVT::i32, Legal);
- setOperationAction(ISD::BIT_CONVERT, MVT::f32, Legal);
- setOperationAction(ISD::BIT_CONVERT, MVT::i64, Legal);
- setOperationAction(ISD::BIT_CONVERT, MVT::f64, Legal);
+ setOperationAction(ISD::BITCAST, MVT::i32, Legal);
+ setOperationAction(ISD::BITCAST, MVT::f32, Legal);
+ setOperationAction(ISD::BITCAST, MVT::i64, Legal);
+ setOperationAction(ISD::BITCAST, MVT::f64, Legal);
// We cannot sextinreg(i1). Expand to shifts.
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
@@ -550,13 +550,13 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
(128 / InVT.getSizeInBits()));
// two sanity checks
- assert( LN->getAddressingMode() == ISD::UNINDEXED
+ assert( LN->getAddressingMode() == ISD::UNINDEXED
&& "we should get only UNINDEXED adresses");
// clean aligned loads can be selected as-is
if (InVT.getSizeInBits() == 128 && alignment == 16)
return SDValue();
- // Get pointerinfos to the memory chunk(s) that contain the data to load
+ // Get pointerinfos to the memory chunk(s) that contain the data to load
uint64_t mpi_offset = LN->getPointerInfo().Offset;
mpi_offset -= mpi_offset%16;
MachinePointerInfo lowMemPtr(LN->getPointerInfo().V, mpi_offset);
@@ -649,7 +649,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
SDValue low = DAG.getLoad(MVT::i128, dl, the_chain, basePtr,
lowMemPtr,
LN->isVolatile(), LN->isNonTemporal(), 16);
-
+
// When the size is not greater than alignment we get all data with just
// one load
if (alignment >= InVT.getSizeInBits()/8) {
@@ -662,30 +662,30 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
// Convert the loaded v16i8 vector to the appropriate vector type
// specified by the operand:
- EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
+ EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
InVT, (128 / InVT.getSizeInBits()));
result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
- DAG.getNode(ISD::BIT_CONVERT, dl, vecVT, result));
+ DAG.getNode(ISD::BITCAST, dl, vecVT, result));
}
// When alignment is less than the size, we might need (known only at
// run-time) two loads
- // TODO: if the memory address is composed only from constants, we have
+ // TODO: if the memory address is composed only from constants, we have
// extra kowledge, and might avoid the second load
else {
// storage position offset from lower 16 byte aligned memory chunk
- SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
+ SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
basePtr, DAG.getConstant( 0xf, MVT::i32 ) );
// 16 - offset
- SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
DAG.getConstant( 16, MVT::i32),
offset );
- // get a registerfull of ones. (this implementation is a workaround: LLVM
+ // get a registerfull of ones. (this implementation is a workaround: LLVM
// cannot handle 128 bit signed int constants)
SDValue ones = DAG.getConstant(-1, MVT::v4i32 );
- ones = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, ones);
+ ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
SDValue high = DAG.getLoad(MVT::i128, dl, the_chain,
- DAG.getNode(ISD::ADD, dl, PtrVT,
+ DAG.getNode(ISD::ADD, dl, PtrVT,
basePtr,
DAG.getConstant(16, PtrVT)),
highMemPtr,
@@ -695,20 +695,20 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
high.getValue(1));
// Shift the (possible) high part right to compensate the misalignemnt.
- // if there is no highpart (i.e. value is i64 and offset is 4), this
+ // if there is no highpart (i.e. value is i64 and offset is 4), this
// will zero out the high value.
- high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high,
+ high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high,
DAG.getNode(ISD::SUB, dl, MVT::i32,
DAG.getConstant( 16, MVT::i32),
offset
));
-
+
// Shift the low similarily
// TODO: add SPUISD::SHL_BYTES
low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset );
// Merge the two parts
- result = DAG.getNode(ISD::BIT_CONVERT, dl, vecVT,
+ result = DAG.getNode(ISD::BITCAST, dl, vecVT,
DAG.getNode(ISD::OR, dl, MVT::i128, low, high));
if (!InVT.isVector()) {
@@ -759,7 +759,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
SDValue result;
EVT vecVT = StVT.isVector()? StVT: EVT::getVectorVT(*DAG.getContext(), StVT,
(128 / StVT.getSizeInBits()));
- // Get pointerinfos to the memory chunk(s) that contain the data to load
+ // Get pointerinfos to the memory chunk(s) that contain the data to load
uint64_t mpi_offset = SN->getPointerInfo().Offset;
mpi_offset -= mpi_offset%16;
MachinePointerInfo lowMemPtr(SN->getPointerInfo().V, mpi_offset);
@@ -767,7 +767,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
// two sanity checks
- assert( SN->getAddressingMode() == ISD::UNINDEXED
+ assert( SN->getAddressingMode() == ISD::UNINDEXED
&& "we should get only UNINDEXED adresses");
// clean aligned loads can be selected as-is
if (StVT.getSizeInBits() == 128 && alignment == 16)
@@ -876,12 +876,12 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT,
insertEltOffs);
- SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
+ SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
theValue);
result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
vectorizeOp, low,
- DAG.getNode(ISD::BIT_CONVERT, dl,
+ DAG.getNode(ISD::BITCAST, dl,
MVT::v4i32, insertEltOp));
result = DAG.getStore(the_chain, dl, result, basePtr,
@@ -892,59 +892,59 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
}
// do the store when it might cross the 16 byte memory access boundary.
else {
- // TODO issue a warning if SN->isVolatile()== true? This is likely not
+ // TODO issue a warning if SN->isVolatile()== true? This is likely not
// what the user wanted.
-
+
// address offset from nearest lower 16byte alinged address
- SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
- SN->getBasePtr(),
+ SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
+ SN->getBasePtr(),
DAG.getConstant(0xf, MVT::i32));
// 16 - offset
- SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
DAG.getConstant( 16, MVT::i32),
offset);
- SDValue hi_shift = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ SDValue hi_shift = DAG.getNode(ISD::SUB, dl, MVT::i32,
DAG.getConstant( VT.getSizeInBits()/8,
MVT::i32),
offset_compl);
// 16 - sizeof(Value)
- SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32,
DAG.getConstant( 16, MVT::i32),
DAG.getConstant( VT.getSizeInBits()/8,
MVT::i32));
- // get a registerfull of ones
+ // get a registerfull of ones
SDValue ones = DAG.getConstant(-1, MVT::v4i32);
- ones = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, ones);
+ ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
// Create the 128 bit masks that have ones where the data to store is
// located.
- SDValue lowmask, himask;
- // if the value to store don't fill up the an entire 128 bits, zero
+ SDValue lowmask, himask;
+ // if the value to store don't fill up the an entire 128 bits, zero
// out the last bits of the mask so that only the value we want to store
- // is masked.
+ // is masked.
// this is e.g. in the case of store i32, align 2
if (!VT.isVector()){
Value = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, Value);
lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, ones, surplus);
- lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
+ lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
surplus);
- Value = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, Value);
+ Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
Value = DAG.getNode(ISD::AND, dl, MVT::i128, Value, lowmask);
-
+
}
else {
lowmask = ones;
- Value = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, Value);
+ Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
}
- // this will zero, if there are no data that goes to the high quad
- himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
+ // this will zero, if there are no data that goes to the high quad
+ himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
offset_compl);
- lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask,
+ lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask,
offset);
-
+
// Load in the old data and zero out the parts that will be overwritten with
// the new data to store.
- SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain,
+ SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain,
DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
DAG.getConstant( 16, PtrVT)),
highMemPtr,
@@ -952,40 +952,40 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
hi.getValue(1));
- low = DAG.getNode(ISD::AND, dl, MVT::i128,
- DAG.getNode( ISD::BIT_CONVERT, dl, MVT::i128, low),
+ low = DAG.getNode(ISD::AND, dl, MVT::i128,
+ DAG.getNode( ISD::BITCAST, dl, MVT::i128, low),
DAG.getNode( ISD::XOR, dl, MVT::i128, lowmask, ones));
- hi = DAG.getNode(ISD::AND, dl, MVT::i128,
- DAG.getNode( ISD::BIT_CONVERT, dl, MVT::i128, hi),
+ hi = DAG.getNode(ISD::AND, dl, MVT::i128,
+ DAG.getNode( ISD::BITCAST, dl, MVT::i128, hi),
DAG.getNode( ISD::XOR, dl, MVT::i128, himask, ones));
// Shift the Value to store into place. rlow contains the parts that go to
- // the lower memory chunk, rhi has the parts that go to the upper one.
+ // the lower memory chunk, rhi has the parts that go to the upper one.
SDValue rlow = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, Value, offset);
rlow = DAG.getNode(ISD::AND, dl, MVT::i128, rlow, lowmask);
- SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value,
+ SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value,
offset_compl);
// Merge the old data and the new data and store the results
- // Need to convert vectors here to integer as 'OR'ing floats assert
- rlow = DAG.getNode(ISD::OR, dl, MVT::i128,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, low),
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, rlow));
- rhi = DAG.getNode(ISD::OR, dl, MVT::i128,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, hi),
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, rhi));
+ // Need to convert vectors here to integer as 'OR'ing floats assert
+ rlow = DAG.getNode(ISD::OR, dl, MVT::i128,
+ DAG.getNode(ISD::BITCAST, dl, MVT::i128, low),
+ DAG.getNode(ISD::BITCAST, dl, MVT::i128, rlow));
+ rhi = DAG.getNode(ISD::OR, dl, MVT::i128,
+ DAG.getNode(ISD::BITCAST, dl, MVT::i128, hi),
+ DAG.getNode(ISD::BITCAST, dl, MVT::i128, rhi));
low = DAG.getStore(the_chain, dl, rlow, basePtr,
lowMemPtr,
SN->isVolatile(), SN->isNonTemporal(), 16);
- hi = DAG.getStore(the_chain, dl, rhi,
+ hi = DAG.getStore(the_chain, dl, rhi,
DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
DAG.getConstant( 16, PtrVT)),
highMemPtr,
SN->isVolatile(), SN->isNonTemporal(), 16);
result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(0),
hi.getValue(0));
- }
+ }
return result;
}
@@ -1095,7 +1095,7 @@ LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
SDValue T = DAG.getConstant(dbits, MVT::i64);
SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Tvec));
+ DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Tvec));
}
return SDValue();
@@ -1194,8 +1194,8 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
// vararg handling:
if (isVarArg) {
- // FIXME: we should be able to query the argument registers from
- // tablegen generated code.
+ // FIXME: we should be able to query the argument registers from
+ // tablegen generated code.
static const unsigned ArgRegs[] = {
SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
@@ -1270,10 +1270,10 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
- *DAG.getContext());
+ *DAG.getContext());
// FIXME: allow for other calling conventions
CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
-
+
const unsigned NumArgRegs = ArgLocs.size();
@@ -1438,7 +1438,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// If the call has results, copy the values out of the ret val registers.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign VA = RVLocs[i];
-
+
SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
InFlag);
Chain = Val.getValue(1);
@@ -1671,7 +1671,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
&& "LowerBUILD_VECTOR: Unexpected floating point vector element.");
// NOTE: pretend the constant is an integer. LLVM won't load FP constants
SDValue T = DAG.getConstant(Value32, MVT::i32);
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
break;
}
@@ -1681,7 +1681,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
&& "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
// NOTE: pretend the constant is an integer. LLVM won't load FP constants
SDValue T = DAG.getConstant(f64val, MVT::i64);
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64,
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v2f64,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
break;
}
@@ -1691,7 +1691,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
SmallVector<SDValue, 8> Ops;
Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
+ return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
}
case MVT::v8i16: {
@@ -1725,7 +1725,7 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
if (upper == lower) {
// Magic constant that can be matched by IL, ILA, et. al.
SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
- return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
+ return DAG.getNode(ISD::BITCAST, dl, OpVT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
Val, Val, Val, Val));
} else {
@@ -1754,7 +1754,7 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
// Create lower vector if not a special pattern
if (!lower_special) {
SDValue LO32C = DAG.getConstant(lower, MVT::i32);
- LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
+ LO32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
LO32C, LO32C, LO32C, LO32C));
}
@@ -1762,7 +1762,7 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
// Create upper vector if not a special pattern
if (!upper_special) {
SDValue HI32C = DAG.getConstant(upper, MVT::i32);
- HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
+ HI32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
HI32C, HI32C, HI32C, HI32C));
}
@@ -1846,7 +1846,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if (EltVT == MVT::i8) {
V2EltIdx0 = 16;
- maskVT = MVT::v16i8;
+ maskVT = MVT::v16i8;
} else if (EltVT == MVT::i16) {
V2EltIdx0 = 8;
maskVT = MVT::v8i16;
@@ -1862,7 +1862,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
for (unsigned i = 0; i != MaxElts; ++i) {
if (SVN->getMaskElt(i) < 0)
continue;
-
+
unsigned SrcElt = SVN->getMaskElt(i);
if (monotonic) {
@@ -1909,7 +1909,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
DAG.getRegister(SPU::R1, PtrVT),
DAG.getConstant(V2EltOffset, MVT::i32));
- SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
+ SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
maskVT, Pointer);
// Use shuffle mask in SHUFB synthetic instruction:
@@ -2173,7 +2173,7 @@ static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
DAG.getRegister(SPU::R1, PtrVT),
DAG.getConstant(Offset, PtrVT));
// widen the mask when dealing with half vectors
- EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
+ EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
128/ VT.getVectorElementType().getSizeInBits());
SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer);
@@ -2181,7 +2181,7 @@ static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(SPUISD::SHUFB, dl, VT,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
VecOp,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, ShufMask));
+ DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ShufMask));
return result;
}
@@ -2301,12 +2301,12 @@ LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
ConstVec = Op.getOperand(0);
Arg = Op.getOperand(1);
if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
- if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
+ if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
ConstVec = ConstVec.getOperand(0);
} else {
ConstVec = Op.getOperand(1);
Arg = Op.getOperand(0);
- if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
+ if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
ConstVec = ConstVec.getOperand(0);
}
}
@@ -2347,7 +2347,7 @@ LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
*/
static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
- EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
+ EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
VT, (128 / VT.getSizeInBits()));
DebugLoc dl = Op.getDebugLoc();
@@ -2523,7 +2523,7 @@ static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
// Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
// selected to a NOP:
- SDValue i64lhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, lhs);
+ SDValue i64lhs = DAG.getNode(ISD::BITCAST, dl, IntVT, lhs);
SDValue lhsHi32 =
DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
DAG.getNode(ISD::SRL, dl, IntVT,
@@ -2557,7 +2557,7 @@ static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
ISD::SETGT));
}
- SDValue i64rhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, rhs);
+ SDValue i64rhs = DAG.getNode(ISD::BITCAST, dl, IntVT, rhs);
SDValue rhsHi32 =
DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
DAG.getNode(ISD::SRL, dl, IntVT,
@@ -2671,7 +2671,7 @@ static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
// Type to truncate to
EVT VT = Op.getValueType();
MVT simpleVT = VT.getSimpleVT();
- EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
+ EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
VT, (128 / VT.getSizeInBits()));
DebugLoc dl = Op.getDebugLoc();
@@ -2745,16 +2745,16 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
DAG.getConstant(31, MVT::i32));
// reinterpret as a i128 (SHUFB requires it). This gets lowered away.
- SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
+ SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
dl, Op0VT, Op0,
DAG.getTargetConstant(
- SPU::GPRCRegClass.getID(),
+ SPU::GPRCRegClass.getID(),
MVT::i32)), 0);
// Shuffle bytes - Copy the sign bits into the upper 64 bits
// and the input value into the lower 64 bits.
SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
extended, sraVal, shufMask);
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, extShuffle);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::i128, extShuffle);
}
//! Custom (target-specific) lowering entry point
@@ -3234,14 +3234,14 @@ bool SPUTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
return isInt<10>(Imm);
}
-bool
-SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+bool
+SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
const Type * ) const{
- // A-form: 18bit absolute address.
+ // A-form: 18bit absolute address.
if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0)
return true;
-
+
// D-form: reg + 14bit offset
if (AM.BaseGV ==0 && AM.HasBaseReg && AM.Scale == 0 && isInt<14>(AM.BaseOffs))
return true;