diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 5 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp | 7 | ||||
-rw-r--r-- | lib/Target/ARM/ARMFastISel.cpp | 13 | ||||
-rw-r--r-- | lib/Target/ARM/ARMISelDAGToDAG.cpp | 5 | ||||
-rw-r--r-- | lib/Target/ARM/ARMISelLowering.cpp | 2 | ||||
-rw-r--r-- | lib/Target/CellSPU/SPUISelLowering.cpp | 2 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 2 |
7 files changed, 16 insertions, 20 deletions
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 3019e764e0..0840bd740b 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -3131,8 +3131,7 @@ static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG, if (Str.empty()) { if (VT.isInteger()) return DAG.getConstant(0, VT); - else if (VT.getSimpleVT().SimpleTy == MVT::f32 || - VT.getSimpleVT().SimpleTy == MVT::f64) + else if (VT == MVT::f32 || VT == MVT::f64) return DAG.getConstantFP(0.0, VT); else if (VT.isVector()) { unsigned NumElts = VT.getVectorNumElements(); @@ -5428,7 +5427,7 @@ const EVT *SDNode::getValueTypeList(EVT VT) { sys::SmartScopedLock<true> Lock(*VTMutex); return &(*EVTs->insert(VT).first); } else { - assert(VT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE && + assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && "Value type out of range!"); return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 26a672c034..304e1bc026 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -2232,7 +2232,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, } case OPC_SwitchType: { - MVT::SimpleValueType CurNodeVT = N.getValueType().getSimpleVT().SimpleTy; + MVT CurNodeVT = N.getValueType().getSimpleVT(); unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart; unsigned CaseSize; while (1) { @@ -2242,10 +2242,9 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex); if (CaseSize == 0) break; - MVT::SimpleValueType CaseVT = - (MVT::SimpleValueType)MatcherTable[MatcherIndex++]; + MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++]; if (CaseVT == MVT::iPTR) - CaseVT = TLI.getPointerTy().SimpleTy; + CaseVT = TLI.getPointerTy(); // If the VT matches, then we will execute this case. if (CurNodeVT == CaseVT) diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index eca3d51b0a..0982ca05a9 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -371,7 +371,7 @@ unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, // TODO: Don't worry about 64-bit now, but when this is fixed remove the // checks from the various callers. unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { - if (VT.getSimpleVT().SimpleTy == MVT::f64) return 0; + if (VT == MVT::f64) return 0; unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, @@ -381,7 +381,7 @@ unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { } unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { - if (VT.getSimpleVT().SimpleTy == MVT::i64) return 0; + if (VT == MVT::i64) return 0; unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, @@ -395,7 +395,7 @@ unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { // the combined constant into an FP reg. unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { const APFloat Val = CFP->getValueAPF(); - bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64; + bool is64bit = VT == MVT::f64; // This checks to see if we can use VFP3 instructions to materialize // a constant, otherwise we have to go through the constant pool. @@ -432,7 +432,7 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { // For now 32-bit only. - if (VT.getSimpleVT().SimpleTy != MVT::i32) return false; + if (VT != MVT::i32) return false; // MachineConstantPool wants an explicit alignment. unsigned Align = TD.getPrefTypeAlignment(C->getType()); @@ -459,7 +459,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { // For now 32-bit only. - if (VT.getSimpleVT().SimpleTy != MVT::i32) return 0; + if (VT != MVT::i32) return 0; Reloc::Model RelocM = TM.getRelocationModel(); @@ -1292,8 +1292,7 @@ bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { if (Op2 == 0) return false; unsigned Opc; - bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64 || - VT.getSimpleVT().SimpleTy == MVT::i64; + bool is64bit = VT == MVT::f64 || VT == MVT::i64; switch (ISDOpcode) { default: return false; case ISD::FADD: diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 5b2839935a..ee3ffef0e5 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -2240,12 +2240,11 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { EVT VecVT = N->getValueType(0); EVT EltVT = VecVT.getVectorElementType(); unsigned NumElts = VecVT.getVectorNumElements(); - if (EltVT.getSimpleVT() == MVT::f64) { + if (EltVT == MVT::f64) { assert(NumElts == 2 && "unexpected type for BUILD_VECTOR"); return PairDRegs(VecVT, N->getOperand(0), N->getOperand(1)); } - assert(EltVT.getSimpleVT() == MVT::f32 && - "unexpected type for BUILD_VECTOR"); + assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR"); if (NumElts == 2) return PairSRegs(VecVT, N->getOperand(0), N->getOperand(1)); assert(NumElts == 4 && "unexpected type for BUILD_VECTOR"); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 6bd8503bb4..a9a8c88576 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -3832,7 +3832,7 @@ static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { } else if ((N0->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N0)) && (N1->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N1))) { NewOpc = ARMISD::VMULLu; - } else if (VT.getSimpleVT().SimpleTy == MVT::v2i64) { + } else if (VT == MVT::v2i64) { // Fall through to expand this. It is not legal. return SDValue(); } else { diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index eb6134c617..b5f0e055a9 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -2578,7 +2578,7 @@ static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) SDValue Op0 = Op.getOperand(0); EVT Op0VT = Op0.getValueType(); - if (Op0VT.getSimpleVT() == MVT::i128 && simpleVT == MVT::i64) { + if (Op0VT == MVT::i128 && simpleVT == MVT::i64) { // Create shuffle mask, least significant doubleword of quadword unsigned maskHigh = 0x08090a0b; unsigned maskLow = 0x0c0d0e0f; diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 69589d2860..bce5fd8626 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -4893,7 +4893,7 @@ static SDValue getVZextMovL(EVT VT, EVT OpVT, // movssrr and movsdrr do not clear top bits. Try to use movd, movq // instead. MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; - if ((ExtVT.SimpleTy != MVT::i64 || Subtarget->is64Bit()) && + if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT && SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { |