diff options
author | Derek Schuff <dschuff@chromium.org> | 2013-01-30 11:34:40 -0800 |
---|---|---|
committer | Derek Schuff <dschuff@chromium.org> | 2013-01-30 11:34:40 -0800 |
commit | 1843e19bce9b11fc840858e136c6c52cf8b42e0b (patch) | |
tree | e8bfc928152e2d3b3dd120d141d13dc08a9b49e4 /lib/MC/MCAssembler.cpp | |
parent | aa0fa8a8df25807f784ec9ca9deeb40328636595 (diff) | |
parent | a662a9862501fc86904e90054f7c1519101d9126 (diff) |
Merge commit 'a662a9862501fc86904e90054f7c1519101d9126'
Conflicts:
include/llvm/CodeGen/IntrinsicLowering.h
include/llvm/MC/MCAssembler.h
include/llvm/MC/MCObjectStreamer.h
lib/LLVMBuild.txt
lib/Linker/LinkArchives.cpp
lib/MC/MCAssembler.cpp
lib/MC/MCELFStreamer.cpp
lib/MC/MCParser/AsmParser.cpp
lib/MC/MCPureStreamer.cpp
lib/MC/WinCOFFStreamer.cpp
lib/Makefile
lib/Support/Unix/Memory.inc
lib/Support/Unix/Process.inc
lib/Support/Unix/Program.inc
lib/Target/ARM/ARM.h
lib/Target/ARM/ARMFastISel.cpp
lib/Target/ARM/ARMISelLowering.cpp
lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
lib/Target/Mips/MipsInstrFPU.td
lib/Target/X86/CMakeLists.txt
lib/Target/X86/X86ISelLowering.cpp
lib/Target/X86/X86TargetMachine.cpp
lib/Target/X86/X86TargetObjectFile.cpp
lib/Transforms/InstCombine/InstCombineCalls.cpp
test/CodeGen/X86/fast-isel-x86-64.ll
tools/llc/llc.cpp
tools/lto/LTOModule.cpp
utils/TableGen/EDEmitter.cpp
Diffstat (limited to 'lib/MC/MCAssembler.cpp')
-rw-r--r-- | lib/MC/MCAssembler.cpp | 658 |
1 files changed, 238 insertions, 420 deletions
diff --git a/lib/MC/MCAssembler.cpp b/lib/MC/MCAssembler.cpp index 5f564afd6a..5fdc57ad30 100644 --- a/lib/MC/MCAssembler.cpp +++ b/lib/MC/MCAssembler.cpp @@ -9,32 +9,41 @@ #define DEBUG_TYPE "assembler" #include "llvm/MC/MCAssembler.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/Twine.h" +#include "llvm/MC/MCAsmBackend.h" #include "llvm/MC/MCAsmLayout.h" #include "llvm/MC/MCCodeEmitter.h" #include "llvm/MC/MCContext.h" +#include "llvm/MC/MCDwarf.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCFixupKindInfo.h" #include "llvm/MC/MCObjectWriter.h" #include "llvm/MC/MCSection.h" #include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCValue.h" -#include "llvm/MC/MCDwarf.h" -#include "llvm/MC/MCAsmBackend.h" -#include "llvm/ADT/Statistic.h" -#include "llvm/ADT/StringExtras.h" -#include "llvm/ADT/Twine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/MathExtras.h" // @LOCALMOD -#include "llvm/Support/raw_ostream.h" -#include "llvm/Support/TargetRegistry.h" #include "llvm/Support/LEB128.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/Support/raw_ostream.h" using namespace llvm; namespace { namespace stats { -STATISTIC(EmittedFragments, "Number of emitted assembler fragments"); +STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total"); +STATISTIC(EmittedRelaxableFragments, + "Number of emitted assembler fragments - relaxable"); +STATISTIC(EmittedDataFragments, + "Number of emitted assembler fragments - data"); +STATISTIC(EmittedAlignFragments, + "Number of emitted assembler fragments - align"); +STATISTIC(EmittedFillFragments, + "Number of emitted assembler fragments - fill"); +STATISTIC(EmittedOrgFragments, + "Number of emitted assembler fragments - org"); STATISTIC(evaluateFixup, "Number of evaluated fixups"); STATISTIC(FragmentLayouts, "Number of fragment layouts"); STATISTIC(ObjectBytes, "Number of emitted object file bytes"); @@ -62,7 +71,7 @@ MCAsmLayout::MCAsmLayout(MCAssembler &Asm) SectionOrder.push_back(&*it); } -bool MCAsmLayout::isFragmentUpToDate(const MCFragment *F) const { +bool MCAsmLayout::isFragmentValid(const MCFragment *F) const { const MCSectionData &SD = *F->getParent(); const MCFragment *LastValid = LastValidFragment.lookup(&SD); if (!LastValid) @@ -71,29 +80,9 @@ bool MCAsmLayout::isFragmentUpToDate(const MCFragment *F) const { return F->getLayoutOrder() <= LastValid->getLayoutOrder(); } -void MCAsmLayout::Invalidate(MCFragment *F) { - // @LOCALMOD-BEGIN - if (F->getParent()->isBundlingEnabled()) { - // If this fragment is part of a bundle locked group, - // we need to invalidate all the way to the first fragment - // in the group. - while (F && !F->isBundleGroupStart()) - F = F->getPrevNode(); - assert(F); - // With padding enabled, we need to invalidate back one - // fragment further in in order to force the recalculuation - // of the padding and offset. - if (F->getPrevNode()) { - F = F->getPrevNode(); - } else { - LastValidFragment[F->getParent()] = NULL; - return; - } - } - // @LOCALMOD-END - - // If this fragment wasn't already up-to-date, we don't need to do anything. - if (!isFragmentUpToDate(F)) +void MCAsmLayout::invalidateFragmentsAfter(MCFragment *F) { + // If this fragment wasn't already valid, we don't need to do anything. + if (!isFragmentValid(F)) return; // Otherwise, reset the last valid fragment to this fragment. @@ -101,7 +90,7 @@ void MCAsmLayout::Invalidate(MCFragment *F) { LastValidFragment[&SD] = F; } -void MCAsmLayout::EnsureValid(const MCFragment *F) const { +void MCAsmLayout::ensureValid(const MCFragment *F) const { MCSectionData &SD = *F->getParent(); MCFragment *Cur = LastValidFragment[&SD]; @@ -110,15 +99,16 @@ void MCAsmLayout::EnsureValid(const MCFragment *F) const { else Cur = Cur->getNextNode(); - // Advance the layout position until the fragment is up-to-date. - while (!isFragmentUpToDate(F)) { - const_cast<MCAsmLayout*>(this)->LayoutFragment(Cur); + // Advance the layout position until the fragment is valid. + while (!isFragmentValid(F)) { + assert(Cur && "Layout bookkeeping error"); + const_cast<MCAsmLayout*>(this)->layoutFragment(Cur); Cur = Cur->getNextNode(); } } uint64_t MCAsmLayout::getFragmentOffset(const MCFragment *F) const { - EnsureValid(F); + ensureValid(F); assert(F->Offset != ~UINT64_C(0) && "Address not set!"); return F->Offset; } @@ -154,15 +144,6 @@ uint64_t MCAsmLayout::getSymbolOffset(const MCSymbolData *SD) const { assert(SD->getFragment() && "Invalid getOffset() on undefined symbol!"); return getFragmentOffset(SD->getFragment()) + SD->getOffset(); } - -// @LOCALMOD-BEGIN -uint8_t MCAsmLayout::getFragmentPadding(const MCFragment *F) const { - EnsureValid(F); - assert(F->BundlePadding != (uint8_t)~UINT8_C(0) && "Padding not set!"); - return F->BundlePadding; -} -// @LOCALMOD-END - uint64_t MCAsmLayout::getSectionAddressSize(const MCSectionData *SD) const { // The size is the last fragment's end offset. @@ -179,6 +160,46 @@ uint64_t MCAsmLayout::getSectionFileSize(const MCSectionData *SD) const { return getSectionAddressSize(SD); } +uint64_t MCAsmLayout::computeBundlePadding(const MCFragment *F, + uint64_t FOffset, uint64_t FSize) { + uint64_t BundleSize = Assembler.getBundleAlignSize(); + assert(BundleSize > 0 && + "computeBundlePadding should only be called if bundling is enabled"); + uint64_t BundleMask = BundleSize - 1; + uint64_t OffsetInBundle = FOffset & BundleMask; + uint64_t EndOfFragment = OffsetInBundle + FSize; + + // There are two kinds of bundling restrictions: + // + // 1) For alignToBundleEnd(), add padding to ensure that the fragment will + // *end* on a bundle boundary. + // 2) Otherwise, check if the fragment would cross a bundle boundary. If it + // would, add padding until the end of the bundle so that the fragment + // will start in a new one. + if (F->alignToBundleEnd()) { + // Three possibilities here: + // + // A) The fragment just happens to end at a bundle boundary, so we're good. + // B) The fragment ends before the current bundle boundary: pad it just + // enough to reach the boundary. + // C) The fragment ends after the current bundle boundary: pad it until it + // reaches the end of the next bundle boundary. + // + // Note: this code could be made shorter with some modulo trickery, but it's + // intentionally kept in its more explicit form for simplicity. + if (EndOfFragment == BundleSize) + return 0; + else if (EndOfFragment < BundleSize) + return BundleSize - EndOfFragment; + else { // EndOfFragment > BundleSize + return 2 * BundleSize - EndOfFragment; + } + } else if (EndOfFragment > BundleSize) + return BundleSize - OffsetInBundle; + else + return 0; +} + /* *** */ MCFragment::MCFragment() : Kind(FragmentType(~0)) { @@ -188,32 +209,15 @@ MCFragment::~MCFragment() { } MCFragment::MCFragment(FragmentType _Kind, MCSectionData *_Parent) - : Kind(_Kind), - // @LOCALMOD-BEGIN - BundleAlign(BundleAlignNone), - BundleGroupStart(false), - BundleGroupEnd(false), - BundlePadding(~UINT8_C(0)), - // @LOCALMOD-END - Parent(_Parent), Atom(0), Offset(~UINT64_C(0)) + : Kind(_Kind), Parent(_Parent), Atom(0), Offset(~UINT64_C(0)) { if (Parent) Parent->getFragmentList().push_back(this); +} - // @LOCALMOD-BEGIN - if (Parent && Parent->isBundlingEnabled()) { - BundleAlign = Parent->getBundleAlignNext(); - Parent->setBundleAlignNext(MCFragment::BundleAlignNone); - if (Parent->isBundleLocked()) { - BundleGroupStart = Parent->isBundleGroupFirstFrag(); - BundleGroupEnd = false; - Parent->setBundleGroupFirstFrag(false); - } else { - BundleGroupStart = true; - BundleGroupEnd = true; - } - } - // @LOCALMOD-END +/* *** */ + +MCEncodedFragment::~MCEncodedFragment() { } /* *** */ @@ -224,90 +228,12 @@ MCSectionData::MCSectionData(const MCSection &_Section, MCAssembler *A) : Section(&_Section), Ordinal(~UINT32_C(0)), Alignment(1), - HasInstructions(false), -// @LOCALMOD-BEGIN - BundlingEnabled(false), - BundleLocked(false), - BundleGroupFirstFrag(false), - BundleAlignNext(MCFragment::BundleAlignNone), - BundleOffsetKnown(false), - BundleOffset(0) -// @LOCALMOD-END + BundleLockState(NotBundleLocked), BundleGroupBeforeFirstInst(false), + HasInstructions(false) { if (A) A->getSectionList().push_back(this); - - // @LOCALMOD-BEGIN - BundleSize = A->getBackend().getBundleSize(); - if (BundleSize && _Section.UseCodeAlign()) { - BundlingEnabled = true; - setAlignment(BundleSize); - } - // @LOCALMOD-END -} - -// @LOCALMOD-BEGIN -void MCSectionData::MarkBundleOffsetUnknown() { - BundleOffsetKnown = false; - BundleOffset = 0; -} - -// Only create a new fragment if: -// 1) we are emitting the first instruction of a bundle locked sequence. -// 2) we are not currently emitting a bundle locked sequence and we cannot -// guarantee the instruction would not span a bundle boundary. -// Otherwise, append to the current fragment to reduce the number of fragments. -bool MCSectionData::ShouldCreateNewFragment(size_t Size) { - // The first instruction of a bundle locked region starts a new fragment. - if (isBundleLocked() && isBundleGroupFirstFrag()) - return true; - // Unless we know the relative offset of the end of the current fragment, - // we need to create a new fragment. - if (!isBundleLocked() && !BundleOffsetKnown) - return true; - assert(BundleSize != 0 && "BundleSize needs to be non-zero"); - assert(Size < BundleSize && "Instruction size must be less than BundleSize"); - // If inserting the instruction would overlap a bundle boundary, start a - // new fragment. - // TODO(sehr): we could still explicitly insert a NOP and continue here. - if (BundleOffset + (unsigned) Size > BundleSize) - return true; - return false; -} - -void MCSectionData::UpdateBundleOffset(size_t Size) { - // A bundle locked fragment could move if it spans a bundle boundary. - if (isBundleLocked()) { - BundleOffsetKnown = false; - return; - } - // If inserting the instruction would overlap a bundle boundary, starting a - // new fragment moves the known offset to the end of the instruction in the - // next bundle. - // TODO(sehr): we could insert a NOP and continue the fragment. - if (BundleOffset + (unsigned) Size > BundleSize) - BundleOffset = Size; - else - BundleOffset = BundleOffset + Size; -} - -void MCSectionData::AlignBundleOffsetTo(size_t AlignBase) { - // If BundleOffset is already known, an alignment just moves bundleOffset. - if (BundleOffsetKnown) { - BundleOffset = RoundUpToAlignment(BundleOffset, AlignBase); - return; - } - // Otherwise, if AlignBase is at least as big as a bundle, then we know the - // offset relative to a bundle start. - if (AlignBase >= BundleSize) { - BundleOffsetKnown = true; - BundleOffset = 0; - } else { - BundleOffsetKnown = false; - BundleOffset = 0; - } } -// @LOCALMOD-END /* *** */ @@ -330,12 +256,31 @@ MCAssembler::MCAssembler(MCContext &Context_, MCAsmBackend &Backend_, MCCodeEmitter &Emitter_, MCObjectWriter &Writer_, raw_ostream &OS_) : Context(Context_), Backend(Backend_), Emitter(Emitter_), Writer(Writer_), - OS(OS_), RelaxAll(false), NoExecStack(false), SubsectionsViaSymbols(false) { + OS(OS_), BundleAlignSize(0), RelaxAll(false), NoExecStack(false), + SubsectionsViaSymbols(false) { } MCAssembler::~MCAssembler() { } +void MCAssembler::reset() { + Sections.clear(); + Symbols.clear(); + SectionMap.clear(); + SymbolMap.clear(); + IndirectSymbols.clear(); + DataRegions.clear(); + ThumbFuncs.clear(); + RelaxAll = false; + NoExecStack = false; + SubsectionsViaSymbols = false; + + // reset objects owned by us + getBackend().reset(); + getEmitter().reset(); + getWriter().reset(); +} + bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { // Non-temporary labels should always be visible to the linker. if (!Symbol.isTemporary()) @@ -442,18 +387,14 @@ uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, const MCFragment &F) const { switch (F.getKind()) { case MCFragment::FT_Data: - return cast<MCDataFragment>(F).getContents().size(); + case MCFragment::FT_Relaxable: + return cast<MCEncodedFragment>(F).getContents().size(); case MCFragment::FT_Fill: return cast<MCFillFragment>(F).getSize(); - case MCFragment::FT_Inst: - return cast<MCInstFragment>(F).getInstSize(); case MCFragment::FT_LEB: return cast<MCLEBFragment>(F).getContents().size(); -// @LOCALMOD-BEGIN - case MCFragment::FT_Tiny: - return cast<MCTinyFragment>(F).getContents().size(); -// @LOCALMOD-END + case MCFragment::FT_Align: { const MCAlignFragment &AF = cast<MCAlignFragment>(F); unsigned Offset = Layout.getFragmentOffset(&AF); @@ -493,153 +434,84 @@ uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, llvm_unreachable("invalid fragment kind"); } -// @LOCALMOD-BEGIN -// Returns number of bytes of padding needed to align to bundle start. -static uint64_t AddressToBundlePadding(uint64_t Address, uint64_t BundleMask) { - return (~Address + 1) & BundleMask; -} - -static uint64_t ComputeBundleMask(uint64_t BundleSize) { - uint64_t BundleMask = BundleSize - 1; - assert(BundleSize != 0); - assert((BundleSize & BundleMask) == 0 && - "Bundle size must be a power of 2!"); - return BundleMask; -} - -static unsigned ComputeGroupSize(MCFragment *F) { - if (!F->isBundleGroupStart()) { - return 0; - } - - unsigned GroupSize = 0; - MCFragment *Cur = F; - while (Cur) { - switch (Cur->getKind()) { - default: llvm_unreachable("Unexpected fragment type in bundle!"); - case MCFragment::FT_Align: - case MCFragment::FT_Org: - case MCFragment::FT_Fill: - if (Cur == F && Cur->isBundleGroupEnd()) { - return 0; - } - llvm_unreachable(".bundle_lock cannot contain .align, .org, or .fill"); - case MCFragment::FT_Inst: - GroupSize += cast<MCInstFragment>(Cur)->getInstSize(); - break; - case MCFragment::FT_Data: - GroupSize += cast<MCDataFragment>(Cur)->getContents().size(); - break; - case MCFragment::FT_Tiny: - GroupSize += cast<MCTinyFragment>(Cur)->getContents().size(); - break; - } - if (Cur->isBundleGroupEnd()) - break; - Cur = Cur->getNextNode(); - } - return GroupSize; -} - -static uint8_t ComputeBundlePadding(const MCAssembler &Asm, - const MCAsmLayout &Layout, - MCFragment *F, - uint64_t FragmentOffset) { - if (!F->getParent()->isBundlingEnabled()) - return 0; - - uint64_t BundleSize = Asm.getBackend().getBundleSize(); - uint64_t BundleMask = ComputeBundleMask(BundleSize); - unsigned GroupSize = ComputeGroupSize(F); - - if (GroupSize > BundleSize) { - // EmitFill creates large groups consisting of repeated single bytes. - // These should be safe at any alignment, and in any case we cannot - // fix them up here. - return 0; - } - - uint64_t Padding = 0; - uint64_t OffsetInBundle = FragmentOffset & BundleMask; - - if (OffsetInBundle + GroupSize > BundleSize || - F->getBundleAlign() == MCFragment::BundleAlignStart) { - // If this group would cross the bundle boundary, or this group must be - // aligned to the start of a bundle, then pad up to start of the next bundle - Padding += AddressToBundlePadding(OffsetInBundle, BundleMask); - OffsetInBundle = 0; - } - if (F->getBundleAlign() == MCFragment::BundleAlignEnd) { - // Push to the end of the bundle - Padding += AddressToBundlePadding(OffsetInBundle + GroupSize, BundleMask); - } - return Padding; -} - -// Write out BundlePadding bytes in NOPs, being careful not to cross a bundle -// boundary. -static void WriteBundlePadding(const MCAssembler &Asm, - const MCAsmLayout &Layout, - uint64_t Offset, uint64_t TotalPadding, - MCObjectWriter *OW) { - uint64_t BundleSize = Asm.getBackend().getBundleSize(); - uint64_t BundleMask = ComputeBundleMask(BundleSize); - uint64_t PaddingLeft = TotalPadding; - uint64_t StartPos = Offset; - - bool FirstWrite = true; - while (PaddingLeft > 0) { - uint64_t NopsToWrite = - FirstWrite ? AddressToBundlePadding(StartPos, BundleMask) : - BundleSize; - if (NopsToWrite > PaddingLeft) - NopsToWrite = PaddingLeft; - if (!Asm.getBackend().writeNopData(NopsToWrite, OW)) - report_fatal_error("unable to write nop sequence of " + - Twine(NopsToWrite) + " bytes"); - PaddingLeft -= NopsToWrite; - FirstWrite = false; - } -} -// @LOCALMOD-END - -void MCAsmLayout::LayoutFragment(MCFragment *F) { +void MCAsmLayout::layoutFragment(MCFragment *F) { MCFragment *Prev = F->getPrevNode(); - // We should never try to recompute something which is up-to-date. - assert(!isFragmentUpToDate(F) && "Attempt to recompute up-to-date fragment!"); - // We should never try to compute the fragment layout if it's predecessor - // isn't up-to-date. - assert((!Prev || isFragmentUpToDate(Prev)) && - "Attempt to compute fragment before it's predecessor!"); + // We should never try to recompute something which is valid. + assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!"); + // We should never try to compute the fragment layout if its predecessor + // isn't valid. + assert((!Prev || isFragmentValid(Prev)) && + "Attempt to compute fragment before its predecessor!"); ++stats::FragmentLayouts; // Compute fragment offset and size. - uint64_t Offset = 0; if (Prev) - Offset += Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); - // @LOCALMOD-BEGIN - F->BundlePadding = ComputeBundlePadding(getAssembler(), *this, F, Offset); - Offset += F->BundlePadding; - // @LOCALMOD-END - F->Offset = Offset; + F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); + else + F->Offset = 0; LastValidFragment[F->getParent()] = F; + + // If bundling is enabled and this fragment has instructions in it, it has to + // obey the bundling restrictions. With padding, we'll have: + // + // + // BundlePadding + // ||| + // ------------------------------------- + // Prev |##########| F | + // ------------------------------------- + // ^ + // | + // F->Offset + // + // The fragment's offset will point to after the padding, and its computed + // size won't include the padding. + // + if (Assembler.isBundlingEnabled() && F->hasInstructions()) { + assert(isa<MCEncodedFragment>(F) && + "Only MCEncodedFragment implementations have instructions"); + uint64_t FSize = Assembler.computeFragmentSize(*this, *F); + + if (FSize > Assembler.getBundleAlignSize()) + report_fatal_error("Fragment can't be larger than a bundle size"); + + uint64_t RequiredBundlePadding = computeBundlePadding(F, F->Offset, FSize); + if (RequiredBundlePadding > UINT8_MAX) + report_fatal_error("Padding cannot exceed 255 bytes"); + F->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding)); + F->Offset += RequiredBundlePadding; + } } +/// \brief Write the contents of a fragment to the given object writer. Expects +/// a MCEncodedFragment. +static void writeFragmentContents(const MCFragment &F, MCObjectWriter *OW) { + MCEncodedFragment &EF = cast<MCEncodedFragment>(F); + OW->WriteBytes(EF.getContents()); +} -/// WriteFragmentData - Write the \p F data to the output file. -static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, - const MCFragment &F) { +/// \brief Write the fragment \p F to the output file. +static void writeFragment(const MCAssembler &Asm, const MCAsmLayout &Layout, + const MCFragment &F) { MCObjectWriter *OW = &Asm.getWriter(); - // @LOCALMOD-BEGIN - if (F.getParent()->isBundlingEnabled()) { - uint64_t BundlePadding = Layout.getFragmentPadding(&F); - uint64_t PaddingOffset = Layout.getFragmentOffset(&F) - BundlePadding; - WriteBundlePadding(Asm, Layout, PaddingOffset, BundlePadding, OW); + + // Should NOP padding be written out before this fragment? + unsigned BundlePadding = F.getBundlePadding(); + if (BundlePadding > 0) { + assert(Asm.isBundlingEnabled() && + "Writing bundle padding with disabled bundling"); + assert(F.hasInstructions() && + "Writing bundle padding for a fragment without instructions"); + + if (!Asm.getBackend().writeNopData(BundlePadding, OW)) + report_fatal_error("unable to write NOP sequence of " + + Twine(BundlePadding) + " bytes"); } - // @LOCALMOD-END + // This variable (and its dummy usage) is to participate in the assert at + // the end of the function. uint64_t Start = OW->getStream().tell(); (void) Start; @@ -649,6 +521,7 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); switch (F.getKind()) { case MCFragment::FT_Align: { + ++stats::EmittedAlignFragments; MCAlignFragment &AF = cast<MCAlignFragment>(F); uint64_t Count = FragmentSize / AF.getValueSize(); @@ -668,16 +541,6 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, // bytes left to fill use the Value and ValueSize to fill the rest. // If we are aligning with nops, ask that target to emit the right data. if (AF.hasEmitNops()) { - // @LOCALMOD-BEGIN - if (Asm.getBackend().getBundleSize()) { - WriteBundlePadding(Asm, Layout, - Layout.getFragmentOffset(&F), - FragmentSize, - OW); - break; - } - // @LOCALMOD-END - if (!Asm.getBackend().writeNopData(Count, OW)) report_fatal_error("unable to write nop sequence of " + Twine(Count) + " bytes"); @@ -697,23 +560,18 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, break; } - case MCFragment::FT_Data: { - MCDataFragment &DF = cast<MCDataFragment>(F); - assert(FragmentSize == DF.getContents().size() && "Invalid size!"); - OW->WriteBytes(DF.getContents().str()); + case MCFragment::FT_Data: + ++stats::EmittedDataFragments; + writeFragmentContents(F, OW); break; - } - // @LOCALMOD-BEGIN - case MCFragment::FT_Tiny: { - MCTinyFragment &TF = cast<MCTinyFragment>(F); - assert(FragmentSize == TF.getContents().size() && "Invalid size!"); - OW->WriteBytes(TF.getContents().str()); + case MCFragment::FT_Relaxable: + ++stats::EmittedRelaxableFragments; + writeFragmentContents(F, OW); break; - } - // @LOCALMOD-END case MCFragment::FT_Fill: { + ++stats::EmittedFillFragments; MCFillFragment &FF = cast<MCFillFragment>(F); assert(FF.getValueSize() && "Invalid virtual align in concrete fragment!"); @@ -730,12 +588,6 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, break; } - case MCFragment::FT_Inst: { - MCInstFragment &IF = cast<MCInstFragment>(F); - OW->WriteBytes(StringRef(IF.getCode().begin(), IF.getCode().size())); - break; - } - case MCFragment::FT_LEB: { MCLEBFragment &LF = cast<MCLEBFragment>(F); OW->WriteBytes(LF.getContents().str()); @@ -743,6 +595,7 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, } case MCFragment::FT_Org: { + ++stats::EmittedOrgFragments; MCOrgFragment &OF = cast<MCOrgFragment>(F); for (uint64_t i = 0, e = FragmentSize; i != e; ++i) @@ -763,7 +616,8 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, } } - assert(OW->getStream().tell() - Start == FragmentSize); + assert(OW->getStream().tell() - Start == FragmentSize && + "The stream should advance by fragment size"); } void MCAssembler::writeSectionData(const MCSectionData *SD, @@ -809,9 +663,9 @@ void MCAssembler::writeSectionData(const MCSectionData *SD, uint64_t Start = getWriter().getStream().tell(); (void)Start; - for (MCSectionData::const_iterator it = SD->begin(), - ie = SD->end(); it != ie; ++it) - WriteFragmentData(*this, Layout, *it); + for (MCSectionData::const_iterator it = SD->begin(), ie = SD->end(); + it != ie; ++it) + writeFragment(*this, Layout, *it); assert(getWriter().getStream().tell() - Start == Layout.getSectionAddressSize(SD)); @@ -858,9 +712,9 @@ void MCAssembler::Finish() { SD->setLayoutOrder(i); unsigned FragmentIndex = 0; - for (MCSectionData::iterator it2 = SD->begin(), - ie2 = SD->end(); it2 != ie2; ++it2) - it2->setLayoutOrder(FragmentIndex++); + for (MCSectionData::iterator iFrag = SD->begin(), iFragEnd = SD->end(); + iFrag != iFragEnd; ++iFrag) + iFrag->setLayoutOrder(FragmentIndex++); } // Layout until everything fits. @@ -888,24 +742,14 @@ void MCAssembler::Finish() { for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) { for (MCSectionData::iterator it2 = it->begin(), ie2 = it->end(); it2 != ie2; ++it2) { - MCDataFragment *DF = dyn_cast<MCDataFragment>(it2); - if (DF) { - for (MCDataFragment::fixup_iterator it3 = DF->fixup_begin(), - ie3 = DF->fixup_end(); it3 != ie3; ++it3) { - MCFixup &Fixup = *it3; - uint64_t FixedValue = handleFixup(Layout, *DF, Fixup); - getBackend().applyFixup(Fixup, DF->getContents().data(), - DF->getContents().size(), FixedValue); - } - } - MCInstFragment *IF = dyn_cast<MCInstFragment>(it2); - if (IF) { - for (MCInstFragment::fixup_iterator it3 = IF->fixup_begin(), - ie3 = IF->fixup_end(); it3 != ie3; ++it3) { + MCEncodedFragment *F = dyn_cast<MCEncodedFragment>(it2); + if (F) { + for (MCEncodedFragment::fixup_iterator it3 = F->fixup_begin(), + ie3 = F->fixup_end(); it3 != ie3; ++it3) { MCFixup &Fixup = *it3; - uint64_t FixedValue = handleFixup(Layout, *IF, Fixup); - getBackend().applyFixup(Fixup, IF->getCode().data(), - IF->getCode().size(), FixedValue); + uint64_t FixedValue = handleFixup(Layout, *F, Fixup); + getBackend().applyFixup(Fixup, F->getContents().data(), + F->getContents().size(), FixedValue); } } } @@ -918,11 +762,8 @@ void MCAssembler::Finish() { } bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, - const MCInstFragment *DF, + const MCRelaxableFragment *DF, const MCAsmLayout &Layout) const { - if (getRelaxAll()) - return true; - // If we cannot resolve the fixup value, it requires relaxation. MCValue Target; uint64_t Value; @@ -932,25 +773,25 @@ bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, return getBackend().fixupNeedsRelaxation(Fixup, Value, DF, Layout); } -bool MCAssembler::fragmentNeedsRelaxation(const MCInstFragment *IF, +bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, const MCAsmLayout &Layout) const { // If this inst doesn't ever need relaxation, ignore it. This occurs when we // are intentionally pushing out inst fragments, or because we relaxed a // previous instruction to one that doesn't need relaxation. - if (!getBackend().mayNeedRelaxation(IF->getInst())) + if (!getBackend().mayNeedRelaxation(F->getInst())) return false; - for (MCInstFragment::const_fixup_iterator it = IF->fixup_begin(), - ie = IF->fixup_end(); it != ie; ++it) - if (fixupNeedsRelaxation(*it, IF, Layout)) + for (MCRelaxableFragment::const_fixup_iterator it = F->fixup_begin(), + ie = F->fixup_end(); it != ie; ++it) + if (fixupNeedsRelaxation(*it, F, Layout)) return true; return false; } bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, - MCInstFragment &IF) { - if (!fragmentNeedsRelaxation(&IF, Layout)) + MCRelaxableFragment &F) { + if (!fragmentNeedsRelaxation(&F, Layout)) return false; ++stats::RelaxedInstructions; @@ -961,7 +802,7 @@ bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, // Relax the fragment. MCInst Relaxed; - getBackend().relaxInstruction(IF.getInst(), Relaxed); + getBackend().relaxInstruction(F.getInst(), Relaxed); // Encode the new instruction. // @@ -973,13 +814,10 @@ bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, getEmitter().EncodeInstruction(Relaxed, VecOS, Fixups); VecOS.flush(); - // Update the instruction fragment. - IF.setInst(Relaxed); - IF.getCode() = Code; - IF.getFixups().clear(); - // FIXME: Eliminate copy. - for (unsigned i = 0, e = Fixups.size(); i != e; ++i) - IF.getFixups().push_back(Fixups[i]); + // Update the fragment. + F.setInst(Relaxed); + F.getContents() = Code; + F.getFixups() = Fixups; return true; } @@ -1033,39 +871,43 @@ bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, return OldSize != Data.size(); } -bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, - MCSectionData &SD) { - MCFragment *FirstInvalidFragment = NULL; - // Scan for fragments that need relaxation. - for (MCSectionData::iterator it2 = SD.begin(), - ie2 = SD.end(); it2 != ie2; ++it2) { - // Check if this is an fragment that needs relaxation. - bool relaxedFrag = false; - switch(it2->getKind()) { +bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSectionData &SD) { + // Holds the first fragment which needed relaxing during this layout. It will + // remain NULL if none were relaxed. + // When a fragment is relaxed, all the fragments following it should get + // invalidated because their offset is going to change. + MCFragment *FirstRelaxedFragment = NULL; + + // Attempt to relax all the fragments in the section. + for (MCSectionData::iterator I = SD.begin(), IE = SD.end(); I != IE; ++I) { + // Check if this is a fragment that needs relaxation. + bool RelaxedFrag = false; + switch(I->getKind()) { default: - break; - case MCFragment::FT_Inst: - relaxedFrag = relaxInstruction(Layout, *cast<MCInstFragment>(it2)); + break; + case MCFragment::FT_Relaxable: + assert(!getRelaxAll() && + "Did not expect a MCRelaxableFragment in RelaxAll mode"); + RelaxedFrag = relaxInstruction(Layout, *cast<MCRelaxableFragment>(I)); break; case MCFragment::FT_Dwarf: - relaxedFrag = relaxDwarfLineAddr(Layout, - *cast<MCDwarfLineAddrFragment>(it2)); + RelaxedFrag = relaxDwarfLineAddr(Layout, + *cast<MCDwarfLineAddrFragment>(I)); break; case MCFragment::FT_DwarfFrame: - relaxedFrag = + RelaxedFrag = relaxDwarfCallFrameFragment(Layout, - *cast<MCDwarfCallFrameFragment>(it2)); + *cast<MCDwarfCallFrameFragment>(I)); break; case MCFragment::FT_LEB: - relaxedFrag = relaxLEB(Layout, *cast<MCLEBFragment>(it2)); + RelaxedFrag = relaxLEB(Layout, *cast<MCLEBFragment>(I)); break; } - // Update the layout, and remember that we relaxed. - if (relaxedFrag && !FirstInvalidFragment) - FirstInvalidFragment = it2; + if (RelaxedFrag && !FirstRelaxedFragment) + FirstRelaxedFragment = I; } - if (FirstInvalidFragment) { - Layout.Invalidate(FirstInvalidFragment); + if (FirstRelaxedFragment) { + Layout.invalidateFragmentsAfter(FirstRelaxedFragment); return true; } return false; @@ -1077,7 +919,7 @@ bool MCAssembler::layoutOnce(MCAsmLayout &Layout) { bool WasRelaxed = false; for (iterator it = begin(), ie = end(); it != ie; ++it) { MCSectionData &SD = *it; - while(layoutSectionOnce(Layout, SD)) + while (layoutSectionOnce(Layout, SD)) WasRelaxed = true; } @@ -1113,29 +955,17 @@ void MCFragment::dump() { case MCFragment::FT_Align: OS << "MCAlignFragment"; break; case MCFragment::FT_Data: OS << "MCDataFragment"; break; case MCFragment::FT_Fill: OS << "MCFillFragment"; break; - case MCFragment::FT_Inst: OS << "MCInstFragment"; break; + case MCFragment::FT_Relaxable: OS << "MCRelaxableFragment"; break; case MCFragment::FT_Org: OS << "MCOrgFragment"; break; case MCFragment::FT_Dwarf: OS << "MCDwarfFragment"; break; case MCFragment::FT_DwarfFrame: OS << "MCDwarfCallFrameFragment"; break; case MCFragment::FT_LEB: OS << "MCLEBFragment"; break; - // @LOCALMOD-BEGIN - case MCFragment::FT_Tiny: OS << "MCTinyFragment"; break; - // @LOCALMOD-END } OS << "<MCFragment " << (void*) this << " LayoutOrder:" << LayoutOrder - << " Offset:" << Offset; - // @LOCALMOD-BEGIN - if (BundleGroupStart) - OS << " BundleGroupStart"; - if (BundleGroupEnd) - OS << " BundleGroupEnd"; - if (BundleAlign == BundleAlignStart) - OS << " BundleAlign: Start"; - else if (BundleAlign == BundleAlignEnd) - OS << " BundleAlign: End"; - OS << ">"; - // @LOCALMOD-END + << " Offset:" << Offset + << " HasInstructions:" << hasInstructions() + << " BundlePadding:" << getBundlePadding() << ">"; switch (getKind()) { case MCFragment::FT_Align: { @@ -1159,7 +989,7 @@ void MCFragment::dump() { } OS << "] (" << Contents.size() << " bytes)"; - if (!DF->getFixups().empty()) { + if (DF->fixup_begin() != DF->fixup_end()) { OS << ",\n "; OS << " Fixups:["; for (MCDataFragment::const_fixup_iterator it = DF->fixup_begin(), @@ -1177,27 +1007,13 @@ void MCFragment::dump() { << " Size:" << FF->getSize(); break; } - case MCFragment::FT_Inst: { - const MCInstFragment *IF = cast<MCInstFragment>(this); + case MCFragment::FT_Relaxable: { + const MCRelaxableFragment *F = cast<MCRelaxableFragment>(this); OS << "\n "; OS << " Inst:"; - IF->getInst().dump_pretty(OS); - break; - } - // @LOCALMOD-BEGIN - case MCFragment::FT_Tiny: { - const MCTinyFragment *TF |