diff options
author | Evan Cheng <evan.cheng@apple.com> | 2011-12-07 07:15:52 +0000 |
---|---|---|
committer | Evan Cheng <evan.cheng@apple.com> | 2011-12-07 07:15:52 +0000 |
commit | 5a96b3dad2f634c9081c8b2b6c2575441dc5a2bd (patch) | |
tree | 88fa5be8f6cf6e9a222e79c8806f29c8841925d1 /lib/CodeGen | |
parent | 5729c5848c74a2413cc1d32a5f3c746aff5d9ccc (diff) |
Add bundle aware API for querying instruction properties and switch the code
generator to it. For non-bundle instructions, these behave exactly the same
as the MC layer API.
For properties like mayLoad / mayStore, look into the bundle and if any of the
bundled instructions has the property it would return true.
For properties like isPredicable, only return true if *all* of the bundled
instructions have the property.
For properties like canFoldAsLoad, isCompare, conservatively return false for
bundles.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@146026 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen')
35 files changed, 152 insertions, 148 deletions
diff --git a/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/lib/CodeGen/AggressiveAntiDepBreaker.cpp index 25842a7876..6cf4571d73 100644 --- a/lib/CodeGen/AggressiveAntiDepBreaker.cpp +++ b/lib/CodeGen/AggressiveAntiDepBreaker.cpp @@ -148,7 +148,7 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) { assert(State == NULL); State = new AggressiveAntiDepState(TRI->getNumRegs(), BB); - bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn()); + bool IsReturnBlock = (!BB->empty() && BB->back().isReturn()); std::vector<unsigned> &KillIndices = State->GetKillIndices(); std::vector<unsigned> &DefIndices = State->GetDefIndices(); @@ -384,7 +384,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI, // If MI's defs have a special allocation requirement, don't allow // any def registers to be changed. Also assume all registers // defined in a call must not be changed (ABI). - if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq() || + if (MI->isCall() || MI->hasExtraDefRegAllocReq() || TII->isPredicated(MI)) { DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)"); State->UnionGroups(Reg, 0); @@ -451,8 +451,8 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI, // instruction which may not be executed. The second R6 def may or may not // re-define R6 so it's not safe to change it since the last R6 use cannot be // changed. - bool Special = MI->getDesc().isCall() || - MI->getDesc().hasExtraSrcRegAllocReq() || + bool Special = MI->isCall() || + MI->hasExtraSrcRegAllocReq() || TII->isPredicated(MI); // Scan the register uses for this instruction and update diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 23bf602bad..d13eae098f 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2087,7 +2087,7 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const { MachineInstr &MI = *II; // If it is not a simple branch, we are in a table somewhere. - if (!MI.getDesc().isBranch() || MI.getDesc().isIndirectBranch()) + if (!MI.isBranch() || MI.isIndirectBranch()) return false; // If we are the operands of one of the branches, this is not diff --git a/lib/CodeGen/AsmPrinter/DwarfException.cpp b/lib/CodeGen/AsmPrinter/DwarfException.cpp index e0f2e85f62..bf7f7eec9f 100644 --- a/lib/CodeGen/AsmPrinter/DwarfException.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfException.cpp @@ -184,7 +184,7 @@ ComputeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads, /// CallToNoUnwindFunction - Return `true' if this is a call to a function /// marked `nounwind'. Return `false' otherwise. bool DwarfException::CallToNoUnwindFunction(const MachineInstr *MI) { - assert(MI->getDesc().isCall() && "This should be a call instruction!"); + assert(MI->isCall() && "This should be a call instruction!"); bool MarkedNoUnwind = false; bool SawFunc = false; @@ -243,7 +243,7 @@ ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites, for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end(); MI != E; ++MI) { if (!MI->isLabel()) { - if (MI->getDesc().isCall()) + if (MI->isCall()) SawPotentiallyThrowing |= !CallToNoUnwindFunction(MI); continue; } diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index 5dec368112..0d88e6c211 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -432,10 +432,9 @@ static unsigned EstimateRuntime(MachineBasicBlock::iterator I, for (; I != E; ++I) { if (I->isDebugValue()) continue; - const MCInstrDesc &MCID = I->getDesc(); - if (MCID.isCall()) + if (I->isCall()) Time += 10; - else if (MCID.mayLoad() || MCID.mayStore()) + else if (I->mayLoad() || I->mayStore()) Time += 2; else ++Time; @@ -502,7 +501,7 @@ static unsigned CountTerminators(MachineBasicBlock *MBB, break; } --I; - if (!I->getDesc().isTerminator()) break; + if (!I->isTerminator()) break; ++NumTerms; } return NumTerms; @@ -550,8 +549,8 @@ static bool ProfitableToMerge(MachineBasicBlock *MBB1, // heuristics. unsigned EffectiveTailLen = CommonTailLen; if (SuccBB && MBB1 != PredBB && MBB2 != PredBB && - !MBB1->back().getDesc().isBarrier() && - !MBB2->back().getDesc().isBarrier()) + !MBB1->back().isBarrier() && + !MBB2->back().isBarrier()) ++EffectiveTailLen; // Check if the common tail is long enough to be worthwhile. @@ -983,7 +982,7 @@ static bool IsBranchOnlyBlock(MachineBasicBlock *MBB) { if (!MBBI->isDebugValue()) break; } - return (MBBI->getDesc().isBranch()); + return (MBBI->isBranch()); } /// IsBetterFallthrough - Return true if it would be clearly better to @@ -1011,7 +1010,7 @@ static bool IsBetterFallthrough(MachineBasicBlock *MBB1, MachineBasicBlock::iterator MBB2I = --MBB2->end(); while (MBB2I->isDebugValue()) --MBB2I; - return MBB2I->getDesc().isCall() && !MBB1I->getDesc().isCall(); + return MBB2I->isCall() && !MBB1I->isCall(); } /// OptimizeBlock - Analyze and optimize control flow related to the specified diff --git a/lib/CodeGen/CriticalAntiDepBreaker.cpp b/lib/CodeGen/CriticalAntiDepBreaker.cpp index 84c4d59c0e..128143e70f 100644 --- a/lib/CodeGen/CriticalAntiDepBreaker.cpp +++ b/lib/CodeGen/CriticalAntiDepBreaker.cpp @@ -54,7 +54,7 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) { // Clear "do not change" set. KeepRegs.clear(); - bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn()); + bool IsReturnBlock = (!BB->empty() && BB->back().isReturn()); // Determine the live-out physregs for this block. if (IsReturnBlock) { @@ -193,8 +193,8 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) { // instruction which may not be executed. The second R6 def may or may not // re-define R6 so it's not safe to change it since the last R6 use cannot be // changed. - bool Special = MI->getDesc().isCall() || - MI->getDesc().hasExtraSrcRegAllocReq() || + bool Special = MI->isCall() || + MI->hasExtraSrcRegAllocReq() || TII->isPredicated(MI); // Scan the register operands for this instruction and update @@ -572,7 +572,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits, // If MI's defs have a special allocation requirement, don't allow // any def registers to be changed. Also assume all registers // defined in a call must not be changed (ABI). - if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq() || + if (MI->isCall() || MI->hasExtraDefRegAllocReq() || TII->isPredicated(MI)) // If this instruction's defs have special allocation requirement, don't // break this anti-dependency. diff --git a/lib/CodeGen/DeadMachineInstructionElim.cpp b/lib/CodeGen/DeadMachineInstructionElim.cpp index 6de6c0cb81..ba135e19f4 100644 --- a/lib/CodeGen/DeadMachineInstructionElim.cpp +++ b/lib/CodeGen/DeadMachineInstructionElim.cpp @@ -102,7 +102,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) { LivePhysRegs = ReservedRegs; // Also add any explicit live-out physregs for this block. - if (!MBB->empty() && MBB->back().getDesc().isReturn()) + if (!MBB->empty() && MBB->back().isReturn()) for (MachineRegisterInfo::liveout_iterator LOI = MRI->liveout_begin(), LOE = MRI->liveout_end(); LOI != LOE; ++LOI) { unsigned Reg = *LOI; diff --git a/lib/CodeGen/ExecutionDepsFix.cpp b/lib/CodeGen/ExecutionDepsFix.cpp index 300f037121..4ec75cdfec 100644 --- a/lib/CodeGen/ExecutionDepsFix.cpp +++ b/lib/CodeGen/ExecutionDepsFix.cpp @@ -454,7 +454,7 @@ void ExeDepsFix::processDefs(MachineInstr *MI, bool Kill) { assert(!MI->isDebugValue() && "Won't process debug values"); const MCInstrDesc &MCID = MI->getDesc(); for (unsigned i = 0, - e = MCID.isVariadic() ? MI->getNumOperands() : MCID.getNumDefs(); + e = MI->isVariadic() ? MI->getNumOperands() : MCID.getNumDefs(); i != e; ++i) { MachineOperand &MO = MI->getOperand(i); if (!MO.isReg()) diff --git a/lib/CodeGen/ExpandISelPseudos.cpp b/lib/CodeGen/ExpandISelPseudos.cpp index a67140ece4..b5f107d5cd 100644 --- a/lib/CodeGen/ExpandISelPseudos.cpp +++ b/lib/CodeGen/ExpandISelPseudos.cpp @@ -62,8 +62,7 @@ bool ExpandISelPseudos::runOnMachineFunction(MachineFunction &MF) { MachineInstr *MI = MBBI++; // If MI is a pseudo, expand it. - const MCInstrDesc &MCID = MI->getDesc(); - if (MCID.usesCustomInsertionHook()) { + if (MI->usesCustomInsertionHook()) { Changed = true; MachineBasicBlock *NewMBB = TLI->EmitInstrWithCustomInserter(MI, MBB); diff --git a/lib/CodeGen/ExpandPostRAPseudos.cpp b/lib/CodeGen/ExpandPostRAPseudos.cpp index e2a14a8dfd..3d23db0b8f 100644 --- a/lib/CodeGen/ExpandPostRAPseudos.cpp +++ b/lib/CodeGen/ExpandPostRAPseudos.cpp @@ -207,7 +207,7 @@ bool ExpandPostRA::runOnMachineFunction(MachineFunction &MF) { ++mi; // Only expand pseudos. - if (!MI->getDesc().isPseudo()) + if (!MI->isPseudo()) continue; // Give targets a chance to expand even standard pseudos. diff --git a/lib/CodeGen/GCStrategy.cpp b/lib/CodeGen/GCStrategy.cpp index 9349797e22..e2c71322da 100644 --- a/lib/CodeGen/GCStrategy.cpp +++ b/lib/CodeGen/GCStrategy.cpp @@ -386,7 +386,7 @@ void MachineCodeAnalysis::FindSafePoints(MachineFunction &MF) { BBE = MF.end(); BBI != BBE; ++BBI) for (MachineBasicBlock::iterator MI = BBI->begin(), ME = BBI->end(); MI != ME; ++MI) - if (MI->getDesc().isCall()) + if (MI->isCall()) VisitCallPoint(MI); } diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp index d888939d4c..bd31fdf53a 100644 --- a/lib/CodeGen/IfConversion.cpp +++ b/lib/CodeGen/IfConversion.cpp @@ -573,12 +573,12 @@ bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI, // blocks, move the end iterators up past any branch instructions. while (TIE != TIB) { --TIE; - if (!TIE->getDesc().isBranch()) + if (!TIE->isBranch()) break; } while (FIE != FIB) { --FIE; - if (!FIE->getDesc().isBranch()) + if (!FIE->isBranch()) break; } @@ -651,12 +651,11 @@ void IfConverter::ScanInstructions(BBInfo &BBI) { if (I->isDebugValue()) continue; - const MCInstrDesc &MCID = I->getDesc(); - if (MCID.isNotDuplicable()) + if (I->isNotDuplicable()) BBI.CannotBeCopied = true; bool isPredicated = TII->isPredicated(I); - bool isCondBr = BBI.IsBrAnalyzable && MCID.isConditionalBranch(); + bool isCondBr = BBI.IsBrAnalyzable && I->isConditionalBranch(); if (!isCondBr) { if (!isPredicated) { @@ -1395,9 +1394,8 @@ void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI, for (MachineBasicBlock::iterator I = FromBBI.BB->begin(), E = FromBBI.BB->end(); I != E; ++I) { - const MCInstrDesc &MCID = I->getDesc(); // Do not copy the end of the block branches. - if (IgnoreBr && MCID.isBranch()) + if (IgnoreBr && I->isBranch()) break; MachineInstr *MI = MF.CloneMachineInstr(I); diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp index 59907d9c4e..8f38887cb1 100644 --- a/lib/CodeGen/InlineSpiller.cpp +++ b/lib/CodeGen/InlineSpiller.cpp @@ -759,7 +759,7 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { // Find all spills and copies of VNI. for (MachineRegisterInfo::use_nodbg_iterator UI = MRI.use_nodbg_begin(Reg); MachineInstr *MI = UI.skipInstruction();) { - if (!MI->isCopy() && !MI->getDesc().mayStore()) + if (!MI->isCopy() && !MI->mayStore()) continue; SlotIndex Idx = LIS.getInstructionIndex(MI); if (LI->getVNInfoAt(Idx) != VNI) @@ -878,7 +878,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, // Before rematerializing into a register for a single instruction, try to // fold a load into the instruction. That avoids allocating a new register. - if (RM.OrigMI->getDesc().canFoldAsLoad() && + if (RM.OrigMI->canFoldAsLoad() && foldMemoryOperand(MI, Ops, RM.OrigMI)) { Edit->markRematerialized(RM.ParentVNI); ++NumFoldedLoads; diff --git a/lib/CodeGen/LiveDebugVariables.cpp b/lib/CodeGen/LiveDebugVariables.cpp index eb54baa729..c35302a050 100644 --- a/lib/CodeGen/LiveDebugVariables.cpp +++ b/lib/CodeGen/LiveDebugVariables.cpp @@ -920,8 +920,8 @@ findInsertLocation(MachineBasicBlock *MBB, SlotIndex Idx, } // Don't insert anything after the first terminator, though. - return MI->getDesc().isTerminator() ? MBB->getFirstTerminator() : - llvm::next(MachineBasicBlock::iterator(MI)); + return MI->isTerminator() ? MBB->getFirstTerminator() : + llvm::next(MachineBasicBlock::iterator(MI)); } DebugLoc UserValue::findDebugLoc() { diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index edcfebe866..1e58173c18 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -794,7 +794,7 @@ LiveIntervals::getLastSplitPoint(const LiveInterval &li, MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin(); while (I != B) { --I; - if (I->getDesc().isCall()) + if (I->isCall()) return I; } // The block contains no calls that can throw, so use the first terminator. diff --git a/lib/CodeGen/LiveRangeEdit.cpp b/lib/CodeGen/LiveRangeEdit.cpp index 2f283b2443..428b252f97 100644 --- a/lib/CodeGen/LiveRangeEdit.cpp +++ b/lib/CodeGen/LiveRangeEdit.cpp @@ -129,7 +129,7 @@ bool LiveRangeEdit::canRematerializeAt(Remat &RM, } // If only cheap remats were requested, bail out early. - if (cheapAsAMove && !RM.OrigMI->getDesc().isAsCheapAsAMove()) + if (cheapAsAMove && !RM.OrigMI->isAsCheapAsAMove()) return false; // Verify that all used registers are available with the same values. @@ -174,7 +174,7 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI, if (MO.isDef()) { if (DefMI && DefMI != MI) return false; - if (!MI->getDesc().canFoldAsLoad()) + if (!MI->canFoldAsLoad()) return false; DefMI = MI; } else if (!MO.isUndef()) { diff --git a/lib/CodeGen/LiveVariables.cpp b/lib/CodeGen/LiveVariables.cpp index 03f8221e1f..7477d91930 100644 --- a/lib/CodeGen/LiveVariables.cpp +++ b/lib/CodeGen/LiveVariables.cpp @@ -590,8 +590,8 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) { // them. The tail callee need not take the same registers as input // that it produces as output, and there are dependencies for its input // registers elsewhere. - if (!MBB->empty() && MBB->back().getDesc().isReturn() - && !MBB->back().getDesc().isCall()) { + if (!MBB->empty() && MBB->back().isReturn() + && !MBB->back().isCall()) { MachineInstr *Ret = &MBB->back(); for (MachineRegisterInfo::liveout_iterator diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp index 46ed082ad2..4dc8173bd7 100644 --- a/lib/CodeGen/MachineBasicBlock.cpp +++ b/lib/CodeGen/MachineBasicBlock.cpp @@ -538,8 +538,8 @@ bool MachineBasicBlock::canFallThrough() { // Barrier is predicated and thus no longer an actual control barrier. This // is over-conservative though, because if an instruction isn't actually // predicated we could still treat it like a barrier. - return empty() || !back().getDesc().isBarrier() || - back().getDesc().isPredicable(); + return empty() || !back().isBarrier() || + back().isPredicable(); } // If there is no branch, control always falls through. @@ -737,7 +737,7 @@ void MachineBasicBlock::ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock::insn_iterator I = insn_end(); while (I != insn_begin()) { --I; - if (!I->getDesc().isTerminator()) break; + if (!I->isTerminator()) break; // Scan the operands of this machine instruction, replacing any uses of Old // with New. diff --git a/lib/CodeGen/MachineCSE.cpp b/lib/CodeGen/MachineCSE.cpp index 7eda8c129d..8c02cd7ddb 100644 --- a/lib/CodeGen/MachineCSE.cpp +++ b/lib/CodeGen/MachineCSE.cpp @@ -260,12 +260,11 @@ bool MachineCSE::isCSECandidate(MachineInstr *MI) { return false; // Ignore stuff that we obviously can't move. - const MCInstrDesc &MCID = MI->getDesc(); - if (MCID.mayStore() || MCID.isCall() || MCID.isTerminator() || + if (MI->mayStore() || MI->isCall() || MI->isTerminator() || MI->hasUnmodeledSideEffects()) return false; - if (MCID.mayLoad()) { + if (MI->mayLoad()) { // Okay, this instruction does a load. As a refinement, we allow the target // to decide whether the loaded value is actually a constant. If so, we can // actually use it as a load. @@ -287,7 +286,7 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg, // Heuristics #1: Don't CSE "cheap" computation if the def is not local or in // an immediate predecessor. We don't want to increase register pressure and // end up causing other computation to be spilled. - if (MI->getDesc().isAsCheapAsAMove()) { + if (MI->isAsCheapAsAMove()) { MachineBasicBlock *CSBB = CSMI->getParent(); MachineBasicBlock *BB = MI->getParent(); if (CSBB != BB && !CSBB->isSuccessor(BB)) @@ -376,7 +375,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) { // Commute commutable instructions. bool Commuted = false; - if (!FoundCSE && MI->getDesc().isCommutable()) { + if (!FoundCSE && MI->isCommutable()) { MachineInstr *NewMI = TII->commuteInstruction(MI); if (NewMI) { Commuted = true; diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp index ee36fc6ae1..0471cf2e6e 100644 --- a/lib/CodeGen/MachineInstr.cpp +++ b/lib/CodeGen/MachineInstr.cpp @@ -748,18 +748,25 @@ void MachineInstr::addMemOperand(MachineFunction &MF, MemRefsEnd = NewMemRefsEnd; } -bool MachineInstr::hasProperty(unsigned short MCFlag) const { - if (getOpcode() != TargetOpcode::BUNDLE) +bool +MachineInstr::hasProperty(unsigned MCFlag, bool PeekInBundle, bool IsOr) const { + if (!PeekInBundle || getOpcode() != TargetOpcode::BUNDLE) return getDesc().getFlags() & (1 << MCFlag); const MachineBasicBlock *MBB = getParent(); MachineBasicBlock::const_insn_iterator MII = *this; ++MII; while (MII != MBB->end() && MII->isInsideBundle()) { - if (MII->getDesc().getFlags() & (1 << MCFlag)) - return true; + if (MII->getDesc().getFlags() & (1 << MCFlag)) { + if (IsOr) + return true; + } else { + if (!IsOr) + return false; + } ++MII; } - return false; + + return !IsOr; } bool MachineInstr::isIdenticalTo(const MachineInstr *Other, @@ -1017,6 +1024,9 @@ MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap, /// operand list that is used to represent the predicate. It returns -1 if /// none is found. int MachineInstr::findFirstPredOperandIdx() const { + assert(getOpcode() != TargetOpcode::BUNDLE && + "MachineInstr::findFirstPredOperandIdx() can't handle bundles"); + // Don't call MCID.findFirstPredOperandIdx() because this variant // is sometimes called on an instruction that's not yet complete, and // so the number of operands is less than the MCID indicates. In @@ -1166,6 +1176,9 @@ void MachineInstr::copyKillDeadInfo(const MachineInstr *MI) { /// copyPredicates - Copies predicate operand(s) from MI. void MachineInstr::copyPredicates(const MachineInstr *MI) { + assert(getOpcode() != TargetOpcode::BUNDLE && + "MachineInstr::copyPredicates() can't handle bundles"); + const MCInstrDesc &MCID = MI->getDesc(); if (!MCID.isPredicable()) return; @@ -1207,13 +1220,13 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII, AliasAnalysis *AA, bool &SawStore) const { // Ignore stuff that we obviously can't move. - if (MCID->mayStore() || MCID->isCall()) { + if (mayStore() || isCall()) { SawStore = true; return false; } if (isLabel() || isDebugValue() || - MCID->isTerminator() || hasUnmodeledSideEffects()) + isTerminator() || hasUnmodeledSideEffects()) return false; // See if this instruction does a load. If so, we have to guarantee that the @@ -1221,7 +1234,7 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII, // destination. The check for isInvariantLoad gives the targe the chance to // classify the load as always returning a constant, e.g. a constant pool // load. - if (MCID->mayLoad() && !isInvariantLoad(AA)) + if (mayLoad() && !isInvariantLoad(AA)) // Otherwise, this is a real load. If there is a store between the load and // end of block, or if the load is volatile, we can't move it. return !SawStore && !hasVolatileMemoryRef(); @@ -1261,9 +1274,9 @@ bool MachineInstr::isSafeToReMat(const TargetInstrInfo *TII, /// have no volatile memory references. bool MachineInstr::hasVolatileMemoryRef() const { // An instruction known never to access memory won't have a volatile access. - if (!MCID->mayStore() && - !MCID->mayLoad() && - !MCID->isCall() && + if (!mayStore() && + !mayLoad() && + !isCall() && !hasUnmodeledSideEffects()) return false; @@ -1287,7 +1300,7 @@ bool MachineInstr::hasVolatileMemoryRef() const { /// *all* loads the instruction does are invariant (if it does multiple loads). bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const { // If the instruction doesn't load at all, it isn't an invariant load. - if (!MCID->mayLoad()) + if (!mayLoad()) return false; // If the instruction has lost its memoperands, conservatively assume that @@ -1340,7 +1353,7 @@ unsigned MachineInstr::isConstantValuePHI() const { } bool MachineInstr::hasUnmodeledSideEffects() const { - if (getDesc().hasUnmodeledSideEffects()) + if (hasProperty(MCID::UnmodeledSideEffects)) return true; if (isInlineAsm()) { unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); @@ -1468,7 +1481,7 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const { // call instructions much less noisy on targets where calls clobber lots // of registers. Don't rely on MO.isDead() because we may be called before // LiveVariables is run, or we may be looking at a non-allocatable reg. - if (MF && getDesc().isCall() && + if (MF && isCall() && MO.isReg() && MO.isImplicit() && MO.isDef()) { unsigned Reg = MO.getReg(); if (TargetRegisterInfo::isPhysicalRegister(Reg)) { diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp index f88b730c6d..764429dbf1 100644 --- a/lib/CodeGen/MachineLICM.cpp +++ b/lib/CodeGen/MachineLICM.cpp @@ -765,7 +765,7 @@ void MachineLICM::UpdateRegPressure(const MachineInstr *MI) { /// isLoadFromGOTOrConstantPool - Return true if this machine instruction /// loads from global offset table or constant pool. static bool isLoadFromGOTOrConstantPool(MachineInstr &MI) { - assert (MI.getDesc().mayLoad() && "Expected MI that loads!"); + assert (MI.mayLoad() && "Expected MI that loads!"); for (MachineInstr::mmo_iterator I = MI.memoperands_begin(), E = MI.memoperands_end(); I != E; ++I) { if (const Value *V = (*I)->getValue()) { @@ -792,7 +792,7 @@ bool MachineLICM::IsLICMCandidate(MachineInstr &I) { // from constant memory are not safe to speculate all the time, for example // indexed load from a jump table. // Stores and side effects are already checked by isSafeToMove. - if (I.getDesc().mayLoad() && !isLoadFromGOTOrConstantPool(I) && + if (I.mayLoad() && !isLoadFromGOTOrConstantPool(I) && !IsGuaranteedToExecute(I.getParent())) return false; @@ -921,7 +921,7 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI, /// IsCheapInstruction - Return true if the instruction is marked "cheap" or /// the operand latency between its def and a use is one or less. bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const { - if (MI.getDesc().isAsCheapAsAMove() || MI.isCopyLike()) + if (MI.isAsCheapAsAMove() || MI.isCopyLike()) return true; if (!InstrItins || InstrItins->isEmpty()) return false; @@ -1105,7 +1105,7 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) { MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) { // Don't unfold simple loads. - if (MI->getDesc().canFoldAsLoad()) + if (MI->canFoldAsLoad()) |