aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAustin Benson <arbenson@google.com>2012-07-31 16:19:17 -0700
committerDerek Schuff <dschuff@chromium.org>2012-07-31 16:19:17 -0700
commita048f2f2bf5138e8f307fb849641f9d0a7b4db63 (patch)
tree79d726aec1a092648663f70588ca2c65f10d1e52
parentb72d2a7aa1868c1cd6e8593e991645833b4212a5 (diff)
Add sfi-zero-based-sandbox option for zero-based sandbox model support on 64-bit
systems Review URL: http://codereview.chromium.org/10692083/
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp19
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp90
-rw-r--r--lib/Target/X86/X86NaClRewritePass.cpp49
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp14
4 files changed, 128 insertions, 44 deletions
diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index ec5b92e317..168b42f131 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -22,10 +22,15 @@
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
+// @LOCALMOD
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+// @LOCALMOD
+extern cl::opt<bool> FlagUseZeroBasedSandbox;
+
namespace {
class X86MCCodeEmitter : public MCCodeEmitter {
X86MCCodeEmitter(const X86MCCodeEmitter &); // DO NOT IMPLEMENT
@@ -843,6 +848,8 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
int MemOperand, const MCInst &MI,
const MCInstrDesc &Desc,
raw_ostream &OS) const {
+ // @LOCALMOD
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
// Emit the lock opcode prefix as needed.
if (TSFlags & X86II::LOCK)
@@ -871,6 +878,18 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
need_address_override = false;
}
+ // @LOCALMOD-begin
+ // With the zero-based sandbox model, CALLs and JMPs must jump to
+ // the bottom 4 GB. For reads and writes in the zero-based sandbox
+ // model, 32-bit memory operands are used for forcing an address
+ // prefix byte in the above logic. Since we cannot use that logic for
+ // the CALL and JMP instructions, we handle those cases here.
+ if (UseZeroBasedSandbox &&
+ (MI.getOpcode() == X86::CALL64r ||
+ MI.getOpcode() == X86::JMP64r))
+ need_address_override = true;
+ // @LOCALMOD-end
+
if (need_address_override)
EmitByte(0x67, CurByte, OS);
diff --git a/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp b/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp
index 6b42feee68..1c55698d00 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp
@@ -30,13 +30,19 @@ using namespace llvm;
// help with linking this code with non-sandboxed libs (at least for x86-32).
cl::opt<int> FlagSfiX86JmpMask("sfi-x86-jmp-mask", cl::init(-32));
+cl::opt<bool> FlagUseZeroBasedSandbox("sfi-zero-based-sandbox",
+ cl::desc("Use a zero-based sandbox model"
+ " for the NaCl SFI."),
+ cl::init(false));
+
static unsigned PrefixSaved = 0;
static bool PrefixPass = false;
-// See the note below where this function is defined.
-namespace llvm {
+// See the notes below where these functions are defined.
+namespace {
unsigned getX86SubSuperRegister_(unsigned Reg, EVT VT, bool High=false);
-}
+unsigned DemoteRegTo32_(unsigned RegIn);
+} // namespace
static void EmitDirectCall(const MCOperand &Op, bool Is64Bit,
MCStreamer &Out) {
@@ -52,6 +58,7 @@ static void EmitDirectCall(const MCOperand &Op, bool Is64Bit,
static void EmitIndirectBranch(const MCOperand &Op, bool Is64Bit, bool IsCall,
MCStreamer &Out) {
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
const int JmpMask = FlagSfiX86JmpMask;
const unsigned Reg32 = Op.getReg();
const unsigned Reg64 = getX86SubSuperRegister_(Reg32, MVT::i64);
@@ -68,7 +75,7 @@ static void EmitIndirectBranch(const MCOperand &Op, bool Is64Bit, bool IsCall,
ANDInst.addOperand(MCOperand::CreateImm(JmpMask));
Out.EmitInstruction(ANDInst);
- if (Is64Bit) {
+ if (Is64Bit && !UseZeroBasedSandbox) {
MCInst InstADD;
InstADD.setOpcode(X86::ADD64rr);
InstADD.addOperand(MCOperand::CreateReg(Reg64));
@@ -118,7 +125,9 @@ static void EmitTrap(bool Is64Bit, MCStreamer &Out) {
// Rewrite to:
// X86-32: mov $0, 0
// X86-64: mov $0, (%r15)
- unsigned BaseReg = Is64Bit ? X86::R15 : 0;
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
+ unsigned BaseReg = Is64Bit && !UseZeroBasedSandbox ? X86::R15 : 0;
+
MCInst Tmp;
Tmp.setOpcode(X86::MOV32mi);
Tmp.addOperand(MCOperand::CreateReg(BaseReg)); // BaseReg
@@ -134,15 +143,19 @@ static void EmitTrap(bool Is64Bit, MCStreamer &Out) {
// Fix a register after being truncated to 32-bits.
static void EmitRegFix(unsigned Reg64, MCStreamer &Out) {
// lea (%rsp, %r15, 1), %rsp
- MCInst Tmp;
- Tmp.setOpcode(X86::LEA64r);
- Tmp.addOperand(MCOperand::CreateReg(Reg64)); // DestReg
- Tmp.addOperand(MCOperand::CreateReg(Reg64)); // BaseReg
- Tmp.addOperand(MCOperand::CreateImm(1)); // Scale
- Tmp.addOperand(MCOperand::CreateReg(X86::R15)); // IndexReg
- Tmp.addOperand(MCOperand::CreateImm(0)); // Offset
- Tmp.addOperand(MCOperand::CreateReg(0)); // SegmentReg
- Out.EmitInstruction(Tmp);
+ // We do not need to add the R15 base for the zero-based sandbox model
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
+ if (!UseZeroBasedSandbox) {
+ MCInst Tmp;
+ Tmp.setOpcode(X86::LEA64r);
+ Tmp.addOperand(MCOperand::CreateReg(Reg64)); // DestReg
+ Tmp.addOperand(MCOperand::CreateReg(Reg64)); // BaseReg
+ Tmp.addOperand(MCOperand::CreateImm(1)); // Scale
+ Tmp.addOperand(MCOperand::CreateReg(X86::R15)); // IndexReg
+ Tmp.addOperand(MCOperand::CreateImm(0)); // Offset
+ Tmp.addOperand(MCOperand::CreateReg(0)); // SegmentReg
+ Out.EmitInstruction(Tmp);
+ }
}
static void EmitSPArith(unsigned Opc, const MCOperand &ImmOp,
@@ -254,6 +267,7 @@ static void EmitLoad(bool Is64Bit,
unsigned Offset,
unsigned SegmentReg,
MCStreamer &Out) {
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
// Load DestReg from address BaseReg + Scale * IndexReg + Offset
MCInst Load;
Load.setOpcode(Is64Bit ? X86::MOV64rm : X86::MOV32rm);
@@ -276,6 +290,7 @@ static void EmitStore(bool Is64Bit,
unsigned SegmentReg,
unsigned SrcReg,
MCStreamer &Out) {
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
// Store SrcReg to address BaseReg + Scale * IndexReg + Offset
MCInst Store;
Store.setOpcode(Is64Bit ? X86::MOV64mr : X86::MOV32mr);
@@ -301,6 +316,7 @@ static void EmitAndRegReg(bool Is64Bit, unsigned DestReg,
static bool SandboxMemoryRef(MCInst *Inst,
unsigned *IndexReg,
MCStreamer &Out) {
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
for (unsigned i = 0, last = Inst->getNumOperands(); i < last; i++) {
if (!Inst->getOperand(i).isReg() ||
Inst->getOperand(i).getReg() != X86::PSEUDO_NACL_SEG) {
@@ -312,6 +328,10 @@ static bool SandboxMemoryRef(MCInst *Inst,
// So if we found a match for a segment register value, we know that
// the index register is exactly two operands prior.
*IndexReg = Inst->getOperand(i - 2).getReg();
+
+ if (UseZeroBasedSandbox)
+ Inst->getOperand(i - 2).setReg(DemoteRegTo32_(*IndexReg));
+
// Remove the PSEUDO_NACL_SEG annotation.
Inst->getOperand(i).setReg(0);
return true;
@@ -347,6 +367,7 @@ static void EmitTLSAddr32(const MCInst &Inst, MCStreamer &Out) {
static void EmitREST(const MCInst &Inst, unsigned Reg32, bool IsMem, MCStreamer &Out) {
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
unsigned Reg64 = getX86SubSuperRegister_(Reg32, MVT::i64);
Out.EmitBundleLock();
if (!IsMem) {
@@ -354,7 +375,8 @@ static void EmitREST(const MCInst &Inst, unsigned Reg32, bool IsMem, MCStreamer
} else {
unsigned IndexReg;
MCInst SandboxedInst = Inst;
- if (SandboxMemoryRef(&SandboxedInst, &IndexReg, Out)) {
+ if (SandboxMemoryRef(&SandboxedInst, &IndexReg, Out) &&
+ !UseZeroBasedSandbox) {
EmitRegTruncate(IndexReg, Out);
}
EmitLoad(false,
@@ -377,11 +399,13 @@ static void EmitREST(const MCInst &Inst, unsigned Reg32, bool IsMem, MCStreamer
// The JMP_BUF is a structure that has the maximum size over all supported
// architectures. The callee-saves registers plus [er]ip and [er]sp are stored
// into the JMP_BUF.
+// TODO(arbenson): Is this code dead? If so, clean it up.
static void EmitSetjmp(bool Is64Bit, MCStreamer &Out) {
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
unsigned JmpBuf = Is64Bit ? X86::RDI : X86::ECX;
unsigned RetAddr = Is64Bit ? X86::RDX : X86::EDX;
if (Is64Bit) {
- unsigned BasePtr = X86::R15;
+ unsigned BasePtr = UseZeroBasedSandbox ? 0 : X86::R15;
unsigned Segment = X86::PSEUDO_NACL_SEG;
// Save the registers.
EmitStore(true, BasePtr, 1, JmpBuf, 0, Segment, X86::RBX, Out);
@@ -410,14 +434,16 @@ static void EmitSetjmp(bool Is64Bit, MCStreamer &Out) {
// value is in %eax.
// The JMP_BUF is a structure that has the maximum size over all supported
// architectures. The saved registers are restored from the JMP_BUF.
+// TODO(arbenson): Is this code dead? If so, clean it up.
static void EmitLongjmp(bool Is64Bit, MCStreamer &Out) {
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
unsigned JmpBuf = Is64Bit ? X86::RDI : X86::ECX;
// If the return value was 0, make it 1.
EmitAndRegReg(false, X86::EAX, X86::EAX, Out);
EmitMoveRegImm32(false, X86::EBX, 1, Out);
EmitCmove(false, X86::EAX, X86::EBX, Out);
if (Is64Bit) {
- unsigned BasePtr = X86::R15;
+ unsigned BasePtr = UseZeroBasedSandbox ? 0 : X86::R15;
unsigned Segment = X86::PSEUDO_NACL_SEG;
// Restore the registers.
EmitLoad(true, X86::RBX, BasePtr, 1, JmpBuf, 0, Segment, Out);
@@ -470,6 +496,7 @@ namespace llvm {
// these instead of combined instructions. At this time, having only
// one explicit prefix is supported.
bool CustomExpandInstNaClX86(const MCInst &Inst, MCStreamer &Out) {
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
// If we are emitting to .s, just emit all pseudo-instructions directly.
if (Out.hasRawTextSupport()) {
return false;
@@ -601,12 +628,19 @@ bool CustomExpandInstNaClX86(const MCInst &Inst, MCStreamer &Out) {
unsigned PrefixLocal = PrefixSaved;
PrefixSaved = 0;
- Out.EmitBundleLock();
- EmitRegTruncate(IndexReg, Out);
+ if (PrefixLocal || !UseZeroBasedSandbox)
+ Out.EmitBundleLock();
+
+ if (!UseZeroBasedSandbox) {
+ EmitRegTruncate(IndexReg, Out);
+ }
+
if (PrefixLocal)
EmitPrefix(PrefixLocal, Out);
Out.EmitInstruction(SandboxedInst);
- Out.EmitBundleUnlock();
+
+ if (PrefixLocal || !UseZeroBasedSandbox)
+ Out.EmitBundleUnlock();
return true;
}
@@ -633,7 +667,7 @@ bool CustomExpandInstNaClX86(const MCInst &Inst, MCStreamer &Out) {
// eventually be moved to MCTargetDesc, and then this copy can be
// removed.
-namespace llvm {
+namespace {
unsigned getX86SubSuperRegister_(unsigned Reg, EVT VT, bool High) {
switch (VT.getSimpleVT().SimpleTy) {
default: return Reg;
@@ -799,5 +833,19 @@ unsigned getX86SubSuperRegister_(unsigned Reg, EVT VT, bool High) {
return Reg;
}
+
+// This is a copy of DemoteRegTo32 from X86NaClRewritePass.cpp.
+// We cannot use the original because it uses part of libLLVMX86CodeGen,
+// which cannot be a dependency of this module (libLLVMX86Desc).
+// Note that this function calls getX86SubSuperRegister_, which is
+// also a copied function for the same reason.
+
+unsigned DemoteRegTo32_(unsigned RegIn) {
+ if (RegIn == 0)
+ return 0;
+ unsigned RegOut = getX86SubSuperRegister_(RegIn, MVT::i32, false);
+ assert(RegOut != 0);
+ return RegOut;
}
+} //namespace
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
diff --git a/lib/Target/X86/X86NaClRewritePass.cpp b/lib/Target/X86/X86NaClRewritePass.cpp
index 9b0922d2d0..7db3aee1dd 100644
--- a/lib/Target/X86/X86NaClRewritePass.cpp
+++ b/lib/Target/X86/X86NaClRewritePass.cpp
@@ -32,6 +32,8 @@
using namespace llvm;
+extern cl::opt<bool> FlagUseZeroBasedSandbox;
+
namespace {
class X86NaClRewritePass : public MachineFunctionPass {
public:
@@ -131,8 +133,10 @@ static bool IsDirectBranch(const MachineInstr &MI) {
}
static bool IsRegAbsolute(unsigned Reg) {
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
return (Reg == X86::RSP || Reg == X86::RBP ||
- Reg == X86::R15 || Reg == X86::RIP);
+ (Reg == X86::R15 && !UseZeroBasedSandbox) ||
+ Reg == X86::RIP);
}
static bool FindMemoryOperand(const MachineInstr &MI, unsigned* index) {
@@ -204,6 +208,7 @@ X86NaClRewritePass::TraceLog(const char *func,
bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) {
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
TraceLog("ApplyStackSFI", MBB, MBBI);
assert(Is64Bit);
MachineInstr &MI = *MBBI;
@@ -233,7 +238,7 @@ bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB,
if (NewOpc) {
BuildMI(MBB, MBBI, DL, TII->get(NewOpc))
.addImm(MI.getOperand(2).getImm())
- .addReg(X86::R15);
+ .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
MI.eraseFromParent();
return true;
}
@@ -272,7 +277,7 @@ bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB,
const MachineOperand &Offset = MI.getOperand(4);
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_SPADJi32))
.addImm(Offset.getImm())
- .addReg(X86::R15);
+ .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
MI.eraseFromParent();
return true;
}
@@ -280,7 +285,7 @@ bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB,
if (Opc == X86::MOV32rr || Opc == X86::MOV64rr) {
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_RESTSPr))
.addReg(DemoteRegTo32(MI.getOperand(1).getReg()))
- .addReg(X86::R15);
+ .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
MI.eraseFromParent();
return true;
}
@@ -292,7 +297,7 @@ bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB,
.addOperand(MI.getOperand(3)) // Index
.addOperand(MI.getOperand(4)) // Offset
.addOperand(MI.getOperand(5)) // Segment
- .addReg(X86::R15);
+ .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
MI.eraseFromParent();
return true;
}
@@ -303,6 +308,7 @@ bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB,
bool X86NaClRewritePass::ApplyFrameSFI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) {
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
TraceLog("ApplyFrameSFI", MBB, MBBI);
assert(Is64Bit);
MachineInstr &MI = *MBBI;
@@ -323,10 +329,10 @@ bool X86NaClRewritePass::ApplyFrameSFI(MachineBasicBlock &MBB,
return false;
// Rewrite: mov %rbp, %rX
- // To: naclrestbp %eX, %r15
+ // To: naclrestbp %eX, %rZP
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_RESTBPr))
.addReg(DemoteRegTo32(SrcReg))
- .addReg(X86::R15);
+ .addReg(UseZeroBasedSandbox ? 0 : X86::R15); // rZP
MI.eraseFromParent();
return true;
}
@@ -335,29 +341,33 @@ bool X86NaClRewritePass::ApplyFrameSFI(MachineBasicBlock &MBB,
if (Opc == X86::MOV64rm) {
assert(MI.getOperand(0).getReg() == X86::RBP);
+ // Zero-based sandbox model uses address clipping
+ if (UseZeroBasedSandbox)
+ return false;
+
// Rewrite: mov %rbp, (...)
- // To: naclrestbp (...), %r15
+ // To: naclrestbp (...), %rZP
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_RESTBPm))
.addOperand(MI.getOperand(1)) // Base
.addOperand(MI.getOperand(2)) // Scale
.addOperand(MI.getOperand(3)) // Index
.addOperand(MI.getOperand(4)) // Offset
.addOperand(MI.getOperand(5)) // Segment
- .addReg(X86::R15); // rZP
+ .addReg(UseZeroBasedSandbox ? 0 : X86::R15); // rZP
MI.eraseFromParent();
return true;
}
// Popping onto RBP
// Rewrite to:
- // naclrestbp (%rsp), %r15
- // naclasp $8, %r15
+ // naclrestbp (%rsp), %rZP
+ // naclasp $8, %rZP
//
// TODO(pdox): Consider rewriting to this instead:
// .bundle_lock
// pop %rbp
// mov %ebp,%ebp
- // add %r15, %rbp
+ // add %rZP, %rbp
// .bundle_unlock
if (Opc == X86::POP64r) {
assert(MI.getOperand(0).getReg() == X86::RBP);
@@ -368,11 +378,11 @@ bool X86NaClRewritePass::ApplyFrameSFI(MachineBasicBlock &MBB,
.addReg(0) // Index
.addImm(0) // Offset
.addReg(0) // Segment
- .addReg(X86::R15); // rZP
+ .addReg(UseZeroBasedSandbox ? 0 : X86::R15); // rZP
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_ASPi8))
.addImm(8)
- .addReg(X86::R15);
+ .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
MI.eraseFromParent();
return true;
@@ -384,6 +394,7 @@ bool X86NaClRewritePass::ApplyFrameSFI(MachineBasicBlock &MBB,
bool X86NaClRewritePass::ApplyControlSFI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) {
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
TraceLog("ApplyControlSFI", MBB, MBBI);
MachineInstr &MI = *MBBI;
@@ -414,7 +425,7 @@ bool X86NaClRewritePass::ApplyControlSFI(MachineBasicBlock &MBB,
BuildMI(MBB, MBBI, DL, TII->get(NewOpc))
.addOperand(MI.getOperand(0));
if (Is64Bit) {
- NewMI.addReg(X86::R15);
+ NewMI.addReg(UseZeroBasedSandbox ? 0 : X86::R15);
}
MI.eraseFromParent();
return true;
@@ -431,7 +442,7 @@ bool X86NaClRewritePass::ApplyControlSFI(MachineBasicBlock &MBB,
BuildMI(MBB, MBBI, DL, TII->get(X86::POP64r), X86::RCX);
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_JMP64r))
.addReg(X86::ECX)
- .addReg(X86::R15);
+ .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
} else {
BuildMI(MBB, MBBI, DL, TII->get(X86::POP32r), X86::ECX);
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_JMP32r))
@@ -460,7 +471,7 @@ bool X86NaClRewritePass::ApplyControlSFI(MachineBasicBlock &MBB,
// To maintain compatibility with nacl-as, for now we don't emit nacltrap.
// MI.setDesc(TII->get(Is64Bit ? X86::NACL_TRAP64 : X86::NACL_TRAP32));
BuildMI(MBB, MBBI, DL, TII->get(X86::MOV32mi))
- .addReg(Is64Bit ? X86::R15 : 0) // Base
+ .addReg(Is64Bit && !UseZeroBasedSandbox ? X86::R15 : 0) // Base
.addImm(1) // Scale
.addReg(0) // Index
.addImm(0) // Offset
@@ -488,6 +499,7 @@ bool X86NaClRewritePass::ApplyMemorySFI(MachineBasicBlock &MBB,
TraceLog("ApplyMemorySFI", MBB, MBBI);
assert(Is64Bit);
MachineInstr &MI = *MBBI;
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
if (!IsLoad(MI) && !IsStore(MI))
return false;
@@ -525,7 +537,7 @@ bool X86NaClRewritePass::ApplyMemorySFI(MachineBasicBlock &MBB,
AddrReg = 0;
} else {
assert(!BaseReg.getReg() && "Unexpected relative register pair");
- BaseReg.setReg(X86::R15);
+ BaseReg.setReg(UseZeroBasedSandbox ? 0 : X86::R15);
AddrReg = IndexReg.getReg();
}
@@ -540,7 +552,6 @@ bool X86NaClRewritePass::ApplyMemorySFI(MachineBasicBlock &MBB,
bool X86NaClRewritePass::ApplyRewrites(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) {
-
MachineInstr &MI = *MBBI;
DebugLoc DL = MI.getDebugLoc();
unsigned Opc = MI.getOpcode();
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index 8519592370..855f629c34 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -54,6 +54,9 @@ cl::opt<bool>
EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
cl::desc("Enable use of a base pointer for complex stack frames"));
+// @LOCALMOD
+extern cl::opt<bool> FlagUseZeroBasedSandbox;
+
X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
const TargetInstrInfo &tii)
: X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit()
@@ -355,11 +358,14 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// @LOCALMOD-START
const X86Subtarget& Subtarget = MF.getTarget().getSubtarget<X86Subtarget>();
+ const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
if (Subtarget.isTargetNaCl64()) {
- Reserved.set(X86::R15);
- Reserved.set(X86::R15D);
- Reserved.set(X86::R15W);
- Reserved.set(X86::R15B);
+ if (!UseZeroBasedSandbox) {
+ Reserved.set(X86::R15);
+ Reserved.set(X86::R15D);
+ Reserved.set(X86::R15W);
+ Reserved.set(X86::R15B);
+ }
Reserved.set(X86::RBP);
Reserved.set(X86::EBP);
Reserved.set(X86::BP);