aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/X86/CMakeLists.txt1
-rw-r--r--lib/Target/X86/X86.h5
-rw-r--r--lib/Target/X86/X86.td9
-rw-r--r--lib/Target/X86/X86PadShortFunction.cpp184
-rw-r--r--lib/Target/X86/X86Subtarget.cpp1
-rw-r--r--lib/Target/X86/X86Subtarget.h5
-rw-r--r--lib/Target/X86/X86TargetMachine.cpp4
7 files changed, 206 insertions, 3 deletions
diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt
index 19912cc6bc..140c80dee4 100644
--- a/lib/Target/X86/CMakeLists.txt
+++ b/lib/Target/X86/CMakeLists.txt
@@ -25,6 +25,7 @@ set(sources
X86JITInfo.cpp
X86MCInstLower.cpp
X86MachineFunctionInfo.cpp
+ X86PadShortFunction.cpp
X86RegisterInfo.cpp
X86SelectionDAGInfo.cpp
X86Subtarget.cpp
diff --git a/lib/Target/X86/X86.h b/lib/Target/X86/X86.h
index 1e7b98d94f..88dbb6d67a 100644
--- a/lib/Target/X86/X86.h
+++ b/lib/Target/X86/X86.h
@@ -63,6 +63,11 @@ FunctionPass *createX86JITCodeEmitterPass(X86TargetMachine &TM,
///
FunctionPass *createEmitX86CodeToMemory();
+/// createX86PadShortFunctions - Return a pass that pads short functions
+/// with NOOPs. This will prevent a stall when returning from the function
+/// on the Atom.
+FunctionPass *createX86PadShortFunctions();
+
} // End llvm namespace
#endif
diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td
index b9d8cf7645..3ab2899365 100644
--- a/lib/Target/X86/X86.td
+++ b/lib/Target/X86/X86.td
@@ -123,8 +123,11 @@ def FeatureRTM : SubtargetFeature<"rtm", "HasRTM", "true",
def FeatureLeaForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true",
"Use LEA for adjusting the stack pointer">;
def FeatureSlowDivide : SubtargetFeature<"idiv-to-divb",
- "HasSlowDivide", "true",
- "Use small divide for positive values less than 256">;
+ "HasSlowDivide", "true",
+ "Use small divide for positive values less than 256">;
+def FeaturePadShortFunctions : SubtargetFeature<"pad-short-functions",
+ "PadShortFunctions", "true",
+ "Pad short functions">;
//===----------------------------------------------------------------------===//
// X86 processors supported.
@@ -167,7 +170,7 @@ def : Proc<"penryn", [FeatureSSE41, FeatureCMPXCHG16B,
FeatureSlowBTMem]>;
def : AtomProc<"atom", [ProcIntelAtom, FeatureSSSE3, FeatureCMPXCHG16B,
FeatureMOVBE, FeatureSlowBTMem, FeatureLeaForSP,
- FeatureSlowDivide]>;
+ FeatureSlowDivide, FeaturePadShortFunctions]>;
// "Arrandale" along with corei3 and corei5
def : Proc<"corei7", [FeatureSSE42, FeatureCMPXCHG16B,
FeatureSlowBTMem, FeatureFastUAMem,
diff --git a/lib/Target/X86/X86PadShortFunction.cpp b/lib/Target/X86/X86PadShortFunction.cpp
new file mode 100644
index 0000000000..05f8a62a75
--- /dev/null
+++ b/lib/Target/X86/X86PadShortFunction.cpp
@@ -0,0 +1,184 @@
+//===-------- X86PadShortFunction.cpp - pad short functions -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the pass which will pad short functions to prevent
+// a stall if a function returns before the return address is ready. This
+// is needed for some Intel Atom processors.
+//
+//===----------------------------------------------------------------------===//
+
+#include <map>
+#include <algorithm>
+
+#define DEBUG_TYPE "x86-pad-short-functions"
+#include "X86.h"
+#include "X86InstrInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+using namespace llvm;
+
+STATISTIC(NumBBsPadded, "Number of basic blocks padded");
+
+namespace {
+ struct PadShortFunc : public MachineFunctionPass {
+ static char ID;
+ PadShortFunc() : MachineFunctionPass(ID)
+ , Threshold(4)
+ {}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ virtual const char *getPassName() const
+ {
+ return "X86 Atom pad short functions";
+ }
+
+ private:
+ bool addPadding(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ unsigned int NOOPsToAdd);
+
+ void findReturn(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ unsigned int Cycles);
+
+ bool cyclesUntilReturn(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ unsigned int &Cycles,
+ MachineBasicBlock::iterator *Location = 0);
+
+ const unsigned int Threshold;
+ std::map<int, unsigned int> ReturnBBs;
+ };
+
+ char PadShortFunc::ID = 0;
+}
+
+FunctionPass *llvm::createX86PadShortFunctions() {
+ return new PadShortFunc();
+}
+
+/// runOnMachineFunction - Loop over all of the basic blocks, inserting
+/// NOOP instructions before early exits.
+bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
+ // Process all basic blocks.
+ ReturnBBs.clear();
+
+ // Search through basic blocks and mark the ones that have early returns
+ findReturn(MF, *MF.begin(), 0);
+
+ int BBNum;
+ MachineBasicBlock::iterator ReturnLoc;
+ MachineBasicBlock *MBB;
+
+ unsigned int Cycles = 0;
+ unsigned int BBCycles;
+
+ // Pad the identified basic blocks with NOOPs
+ for (std::map<int, unsigned int>::iterator I = ReturnBBs.begin();
+ I != ReturnBBs.end(); ++I) {
+ BBNum = I->first;
+ Cycles = I->second;
+
+ if (Cycles < Threshold) {
+ MBB = MF.getBlockNumbered(BBNum);
+ if (!cyclesUntilReturn(MF, *MBB, BBCycles, &ReturnLoc))
+ continue;
+
+ addPadding(MF, *MBB, ReturnLoc, Threshold - Cycles);
+ NumBBsPadded++;
+ }
+ }
+
+ return false;
+}
+
+/// findReturn - Starting at MBB, follow control flow and add all
+/// basic blocks that contain a return to ReturnBBs.
+void PadShortFunc::findReturn(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ unsigned int Cycles)
+{
+ // If this BB has a return, note how many cycles it takes to get there.
+ bool hasReturn = cyclesUntilReturn(MF, MBB, Cycles);
+ if (Cycles >= Threshold)
+ return;
+
+ if (hasReturn) {
+ int BBNum = MBB.getNumber();
+ ReturnBBs[BBNum] = std::max(ReturnBBs[BBNum], Cycles);
+
+ return;
+ }
+
+ // Follow branches in BB and look for returns
+ for (MachineBasicBlock::succ_iterator I = MBB.succ_begin();
+ I != MBB.succ_end(); ++I) {
+ findReturn(MF, **I, Cycles);
+ }
+}
+
+/// cyclesUntilReturn - if the MBB has a return instruction, set Location to
+/// to the instruction and return true. Return false otherwise.
+/// Cycles will be incremented by the number of cycles taken to reach the
+/// return or the end of the BB, whichever occurs first.
+bool PadShortFunc::cyclesUntilReturn(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ unsigned int &Cycles,
+ MachineBasicBlock::iterator *Location)
+{
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ const TargetMachine &Target = MF.getTarget();
+
+ for (MachineBasicBlock::iterator MBBI = MBB.begin(); MBBI != MBB.end();
+ ++MBBI) {
+ MachineInstr *MI = MBBI;
+ // Mark basic blocks with a return instruction. Calls to other functions
+ // do not count because the called function will be padded, if necessary
+ if (MI->isReturn() && !MI->isCall()) {
+ if (Location)
+ *Location = MBBI;
+ return true;
+ }
+
+ Cycles += TII.getInstrLatency(Target.getInstrItineraryData(), MI);
+ }
+
+ return false;
+}
+
+/// addPadding - Add the given number of NOOP instructions to the function
+/// right before the return at MBBI
+bool PadShortFunc::addPadding(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ unsigned int NOOPsToAdd)
+{
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ DebugLoc DL = MBBI->getDebugLoc();
+
+ while (NOOPsToAdd-- > 0) {
+ // Since Atom has two instruction execution ports,
+ // the code emits two noops, which will be executed in parallell
+ // during one cycle.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::NOOP));
+ BuildMI(MBB, MBBI, DL, TII.get(X86::NOOP));
+ }
+
+ return true;
+}
+
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index d493b78752..53c28f4fce 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -350,6 +350,7 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
, UseLeaForSP(false)
, HasSlowDivide(false)
, PostRAScheduler(false)
+ , PadShortFunctions(false)
, stackAlignment(4)
// FIXME: this is a known good value for Yonah. How about others?
, MaxInlineSizeThreshold(128)
diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h
index 44f38a1a91..080f4cfeca 100644
--- a/lib/Target/X86/X86Subtarget.h
+++ b/lib/Target/X86/X86Subtarget.h
@@ -146,6 +146,10 @@ protected:
/// PostRAScheduler - True if using post-register-allocation scheduler.
bool PostRAScheduler;
+ /// PadShortFunctions - True if the short functions should be padded to prevent
+ /// a stall when returning too early.
+ bool PadShortFunctions;
+
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
unsigned stackAlignment;
@@ -231,6 +235,7 @@ public:
bool hasCmpxchg16b() const { return HasCmpxchg16b; }
bool useLeaForSP() const { return UseLeaForSP; }
bool hasSlowDivide() const { return HasSlowDivide; }
+ bool padShortFunctions() const { return PadShortFunctions; }
bool isAtom() const { return X86ProcFamily == IntelAtom; }
diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp
index ea99796f35..8393f7e91e 100644
--- a/lib/Target/X86/X86TargetMachine.cpp
+++ b/lib/Target/X86/X86TargetMachine.cpp
@@ -190,6 +190,10 @@ bool X86PassConfig::addPreEmitPass() {
addPass(createX86IssueVZeroUpperPass());
ShouldPrint = true;
}
+ if (getX86Subtarget().padShortFunctions()){
+ addPass(createX86PadShortFunctions());
+ ShouldPrint = true;
+ }
return ShouldPrint;
}