1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
|
//===-------- X86PadShortFunction.cpp - pad short functions -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the pass which will pad short functions to prevent
// a stall if a function returns before the return address is ready. This
// is needed for some Intel Atom processors.
//
//===----------------------------------------------------------------------===//
#include <map>
#include <algorithm>
#define DEBUG_TYPE "x86-pad-short-functions"
#include "X86.h"
#include "X86InstrInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
STATISTIC(NumBBsPadded, "Number of basic blocks padded");
namespace {
struct PadShortFunc : public MachineFunctionPass {
static char ID;
PadShortFunc() : MachineFunctionPass(ID)
, Threshold(4)
{}
virtual bool runOnMachineFunction(MachineFunction &MF);
virtual const char *getPassName() const
{
return "X86 Atom pad short functions";
}
private:
bool addPadding(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned int NOOPsToAdd);
void findReturn(MachineFunction &MF,
MachineBasicBlock &MBB,
unsigned int Cycles);
bool cyclesUntilReturn(MachineFunction &MF,
MachineBasicBlock &MBB,
unsigned int &Cycles,
MachineBasicBlock::iterator *Location = 0);
const unsigned int Threshold;
std::map<int, unsigned int> ReturnBBs;
};
char PadShortFunc::ID = 0;
}
FunctionPass *llvm::createX86PadShortFunctions() {
return new PadShortFunc();
}
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// NOOP instructions before early exits.
bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
// Process all basic blocks.
ReturnBBs.clear();
// Search through basic blocks and mark the ones that have early returns
findReturn(MF, *MF.begin(), 0);
int BBNum;
MachineBasicBlock::iterator ReturnLoc;
MachineBasicBlock *MBB;
unsigned int Cycles = 0;
unsigned int BBCycles;
// Pad the identified basic blocks with NOOPs
for (std::map<int, unsigned int>::iterator I = ReturnBBs.begin();
I != ReturnBBs.end(); ++I) {
BBNum = I->first;
Cycles = I->second;
if (Cycles < Threshold) {
MBB = MF.getBlockNumbered(BBNum);
if (!cyclesUntilReturn(MF, *MBB, BBCycles, &ReturnLoc))
continue;
addPadding(MF, *MBB, ReturnLoc, Threshold - Cycles);
NumBBsPadded++;
}
}
return false;
}
/// findReturn - Starting at MBB, follow control flow and add all
/// basic blocks that contain a return to ReturnBBs.
void PadShortFunc::findReturn(MachineFunction &MF,
MachineBasicBlock &MBB,
unsigned int Cycles)
{
// If this BB has a return, note how many cycles it takes to get there.
bool hasReturn = cyclesUntilReturn(MF, MBB, Cycles);
if (Cycles >= Threshold)
return;
if (hasReturn) {
int BBNum = MBB.getNumber();
ReturnBBs[BBNum] = std::max(ReturnBBs[BBNum], Cycles);
return;
}
// Follow branches in BB and look for returns
for (MachineBasicBlock::succ_iterator I = MBB.succ_begin();
I != MBB.succ_end(); ++I) {
findReturn(MF, **I, Cycles);
}
}
/// cyclesUntilReturn - if the MBB has a return instruction, set Location to
/// to the instruction and return true. Return false otherwise.
/// Cycles will be incremented by the number of cycles taken to reach the
/// return or the end of the BB, whichever occurs first.
bool PadShortFunc::cyclesUntilReturn(MachineFunction &MF,
MachineBasicBlock &MBB,
unsigned int &Cycles,
MachineBasicBlock::iterator *Location)
{
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
const TargetMachine &Target = MF.getTarget();
for (MachineBasicBlock::iterator MBBI = MBB.begin(); MBBI != MBB.end();
++MBBI) {
MachineInstr *MI = MBBI;
// Mark basic blocks with a return instruction. Calls to other functions
// do not count because the called function will be padded, if necessary
if (MI->isReturn() && !MI->isCall()) {
if (Location)
*Location = MBBI;
return true;
}
Cycles += TII.getInstrLatency(Target.getInstrItineraryData(), MI);
}
return false;
}
/// addPadding - Add the given number of NOOP instructions to the function
/// right before the return at MBBI
bool PadShortFunc::addPadding(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned int NOOPsToAdd)
{
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
DebugLoc DL = MBBI->getDebugLoc();
while (NOOPsToAdd-- > 0) {
// Since Atom has two instruction execution ports,
// the code emits two noops, which will be executed in parallell
// during one cycle.
BuildMI(MBB, MBBI, DL, TII.get(X86::NOOP));
BuildMI(MBB, MBBI, DL, TII.get(X86::NOOP));
}
return true;
}
|