aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/X86
diff options
context:
space:
mode:
authorJim Stichnoth <stichnot@chromium.org>2013-07-24 09:40:15 -0700
committerJim Stichnoth <stichnot@chromium.org>2013-07-24 09:40:15 -0700
commit4499aac2b3679e7e0f69649b99f9b96c2c03dc4f (patch)
tree9a435a1d29760f3c6634356c960365094474d2eb /lib/Target/X86
parentc7c01162adebb1df35707a8833ec6e0b1e5eaf6f (diff)
Hide the x86-64 sandbox base address.
Prevent sandbox addresses from being written to the stack. This covers the following cases: 1. Function calls manually push a masked return address and jump to the target, rather than using the call instruction. 2. When the function prolog chooses to use a frame pointer (rbp), it saves a masked version of the old rbp. 3. Indirect branches (jumps, calls, and returns) uniformly use r11 to construct the 64-bit target address. 4. Register r11 is marked as reserved (similar to r15) so that the register allocator won't inadvertently spill a code address to the stack. These transformations can be disabled for performance testing with the flag "-sfi-hide-sandbox-base=false". BUG= https://code.google.com/p/nativeclient/issues/detail?id=1235 R=eliben@chromium.org, mseaborn@chromium.org Review URL: https://codereview.chromium.org/19505003
Diffstat (limited to 'lib/Target/X86')
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp179
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp47
-rw-r--r--lib/Target/X86/X86NaClDecls.h28
-rw-r--r--lib/Target/X86/X86NaClRewritePass.cpp56
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp19
5 files changed, 272 insertions, 57 deletions
diff --git a/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp b/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp
index 9acaf68c82..63af3957fb 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp
@@ -13,6 +13,8 @@
#include "MCTargetDesc/X86MCTargetDesc.h"
#include "MCTargetDesc/X86BaseInfo.h"
#include "MCTargetDesc/X86MCNaCl.h"
+#include "X86NaClDecls.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
@@ -28,12 +30,22 @@ using namespace llvm;
// This option makes it possible to overwrite the x86 jmp mask immediate.
// Setting it to -1 will effectively turn masking into a nop which will
// help with linking this code with non-sandboxed libs (at least for x86-32).
-cl::opt<int> FlagSfiX86JmpMask("sfi-x86-jmp-mask", cl::init(-32));
+cl::opt<int> FlagSfiX86JmpMask("sfi-x86-jmp-mask",
+ cl::init(-kNaClX86InstructionBundleSize));
cl::opt<bool> FlagUseZeroBasedSandbox("sfi-zero-based-sandbox",
cl::desc("Use a zero-based sandbox model"
" for the NaCl SFI."),
cl::init(false));
+// This flag can be set to false to test the performance impact of
+// hiding the sandbox base.
+cl::opt<bool> FlagHideSandboxBase("sfi-hide-sandbox-base",
+ cl::desc("Prevent 64-bit NaCl sandbox"
+ " pointers from being written to"
+ " the stack. [default=true]"),
+ cl::init(true));
+
+const int kNaClX86InstructionBundleSize = 32;
static unsigned PrefixSaved = 0;
static bool PrefixPass = false;
@@ -44,25 +56,134 @@ unsigned getX86SubSuperRegister_(unsigned Reg, EVT VT, bool High=false);
unsigned DemoteRegTo32_(unsigned RegIn);
} // namespace
+static MCSymbol *CreateTempLabel(MCContext &Context, const char *Prefix) {
+ SmallString<128> NameSV;
+ raw_svector_ostream(NameSV)
+ << Context.getAsmInfo().getPrivateGlobalPrefix() // get internal label
+ << Prefix << Context.getUniqueSymbolID();
+ return Context.GetOrCreateSymbol(NameSV);
+}
+
static void EmitDirectCall(const MCOperand &Op, bool Is64Bit,
MCStreamer &Out) {
- Out.EmitBundleLock(true);
+ const bool HideSandboxBase = (FlagHideSandboxBase &&
+ Is64Bit && !FlagUseZeroBasedSandbox);
+ if (HideSandboxBase) {
+ // For NaCl64, the sequence
+ // call target
+ // return_addr:
+ // is changed to
+ // push return_addr
+ // jmp target
+ // .align 32
+ // return_addr:
+ // This avoids exposing the sandbox base address via the return
+ // address on the stack.
+
+ MCContext &Context = Out.getContext();
+
+ // Generate a label for the return address.
+ MCSymbol *RetTarget = CreateTempLabel(Context, "DirectCallRetAddr");
+ const MCExpr *RetTargetExpr = MCSymbolRefExpr::Create(RetTarget, Context);
+
+ // push return_addr
+ MCInst PUSHInst;
+ PUSHInst.setOpcode(X86::PUSH64i32);
+ PUSHInst.addOperand(MCOperand::CreateExpr(RetTargetExpr));
+ Out.EmitInstruction(PUSHInst);
+
+ // jmp target
+ MCInst JMPInst;
+ JMPInst.setOpcode(X86::JMP_4);
+ JMPInst.addOperand(Op);
+ Out.EmitInstruction(JMPInst);
- MCInst CALLInst;
- CALLInst.setOpcode(Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
- CALLInst.addOperand(Op);
- Out.EmitInstruction(CALLInst);
- Out.EmitBundleUnlock();
+ Out.EmitCodeAlignment(kNaClX86InstructionBundleSize);
+ Out.EmitLabel(RetTarget);
+ } else {
+ Out.EmitBundleLock(true);
+
+ MCInst CALLInst;
+ CALLInst.setOpcode(Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
+ CALLInst.addOperand(Op);
+ Out.EmitInstruction(CALLInst);
+ Out.EmitBundleUnlock();
+ }
}
static void EmitIndirectBranch(const MCOperand &Op, bool Is64Bit, bool IsCall,
MCStreamer &Out) {
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
+ const bool HideSandboxBase = (FlagHideSandboxBase &&
+ Is64Bit && !FlagUseZeroBasedSandbox);
const int JmpMask = FlagSfiX86JmpMask;
- const unsigned Reg32 = Op.getReg();
+ unsigned Reg32 = Op.getReg();
+
+ // For NaCl64, the sequence
+ // jmp *%rXX
+ // is changed to
+ // mov %rXX,%r11d
+ // and $0xffffffe0,%r11d
+ // add %r15,%r11
+ // jmpq *%r11
+ //
+ // And the sequence
+ // call *%rXX
+ // return_addr:
+ // is changed to
+ // mov %rXX,%r11d
+ // push return_addr
+ // and $0xffffffe0,%r11d
+ // add %r15,%r11
+ // jmpq *%r11
+ // .align 32
+ // return_addr:
+ //
+ // This avoids exposing the sandbox base address via the return
+ // address on the stack.
+
+ // For NaCl64, force an assignment of the branch target into r11,
+ // and subsequently use r11 as the ultimate branch target, so that
+ // only r11 (which will never be written to memory) exposes the
+ // sandbox base address. But avoid a redundant assignment if the
+ // original branch target is already r11 or r11d.
+ const unsigned SafeReg32 = X86::R11D;
+ const unsigned SafeReg64 = X86::R11;
+ if (HideSandboxBase) {
+ // In some cases, EmitIndirectBranch() is called with a 32-bit
+ // register Op (e.g. r11d), and in other cases a 64-bit register
+ // (e.g. r11), so we need to test both variants to avoid a
+ // redundant assignment. TODO(stichnot): Make callers consistent
+ // on 32 vs 64 bit register.
+ if ((Reg32 != SafeReg32) && (Reg32 != SafeReg64)) {
+ MCInst MOVInst;
+ MOVInst.setOpcode(X86::MOV32rr);
+ MOVInst.addOperand(MCOperand::CreateReg(SafeReg32));
+ MOVInst.addOperand(MCOperand::CreateReg(Reg32));
+ Out.EmitInstruction(MOVInst);
+ Reg32 = SafeReg32;
+ }
+ }
const unsigned Reg64 = getX86SubSuperRegister_(Reg32, MVT::i64);
- Out.EmitBundleLock(IsCall);
+ // Explicitly push the (32-bit) return address for a NaCl64 call
+ // instruction.
+ MCSymbol *RetTarget = NULL;
+ if (IsCall && HideSandboxBase) {
+ MCContext &Context = Out.getContext();
+
+ // Generate a label for the return address.
+ RetTarget = CreateTempLabel(Context, "IndirectCallRetAddr");
+ const MCExpr *RetTargetExpr = MCSymbolRefExpr::Create(RetTarget, Context);
+
+ // push return_addr
+ MCInst PUSHInst;
+ PUSHInst.setOpcode(X86::PUSH64i32);
+ PUSHInst.addOperand(MCOperand::CreateExpr(RetTargetExpr));
+ Out.EmitInstruction(PUSHInst);
+ }
+
+ const bool WillEmitCallInst = IsCall && !HideSandboxBase;
+ Out.EmitBundleLock(WillEmitCallInst);
MCInst ANDInst;
ANDInst.setOpcode(X86::AND32ri8);
@@ -71,7 +192,7 @@ static void EmitIndirectBranch(const MCOperand &Op, bool Is64Bit, bool IsCall,
ANDInst.addOperand(MCOperand::CreateImm(JmpMask));
Out.EmitInstruction(ANDInst);
- if (Is64Bit && !UseZeroBasedSandbox) {
+ if (Is64Bit && !FlagUseZeroBasedSandbox) {
MCInst InstADD;
InstADD.setOpcode(X86::ADD64rr);
InstADD.addOperand(MCOperand::CreateReg(Reg64));
@@ -80,24 +201,40 @@ static void EmitIndirectBranch(const MCOperand &Op, bool Is64Bit, bool IsCall,
Out.EmitInstruction(InstADD);
}
- if (IsCall) {
+ if (WillEmitCallInst) {
+ // callq *%rXX
MCInst CALLInst;
CALLInst.setOpcode(Is64Bit ? X86::CALL64r : X86::CALL32r);
CALLInst.addOperand(MCOperand::CreateReg(Is64Bit ? Reg64 : Reg32));
Out.EmitInstruction(CALLInst);
} else {
+ // jmpq *%rXX -or- jmpq *%r11
MCInst JMPInst;
JMPInst.setOpcode(Is64Bit ? X86::JMP64r : X86::JMP32r);
JMPInst.addOperand(MCOperand::CreateReg(Is64Bit ? Reg64 : Reg32));
Out.EmitInstruction(JMPInst);
}
Out.EmitBundleUnlock();
+ if (RetTarget) {
+ Out.EmitCodeAlignment(kNaClX86InstructionBundleSize);
+ Out.EmitLabel(RetTarget);
+ }
}
static void EmitRet(const MCOperand *AmtOp, bool Is64Bit, MCStreamer &Out) {
+ // For NaCl64 returns, follow the convention of using r11 to hold
+ // the target of an indirect jump to avoid potentially leaking the
+ // sandbox base address.
+ const bool HideSandboxBase = (FlagHideSandboxBase &&
+ Is64Bit && !FlagUseZeroBasedSandbox);
+ // For NaCl64 sandbox hiding, use r11 to hold the branch target.
+ // Otherwise, use rcx/ecx for fewer instruction bytes (no REX
+ // prefix).
+ const unsigned RegTarget = HideSandboxBase ? X86::R11 :
+ (Is64Bit ? X86::RCX : X86::ECX);
MCInst POPInst;
POPInst.setOpcode(Is64Bit ? X86::POP64r : X86::POP32r);
- POPInst.addOperand(MCOperand::CreateReg(Is64Bit ? X86::RCX : X86::ECX));
+ POPInst.addOperand(MCOperand::CreateReg(RegTarget));
Out.EmitInstruction(POPInst);
if (AmtOp) {
@@ -113,7 +250,7 @@ static void EmitRet(const MCOperand *AmtOp, bool Is64Bit, MCStreamer &Out) {
MCInst JMPInst;
JMPInst.setOpcode(Is64Bit ? X86::NACL_JMP64r : X86::NACL_JMP32r);
- JMPInst.addOperand(MCOperand::CreateReg(X86::ECX));
+ JMPInst.addOperand(MCOperand::CreateReg(RegTarget));
Out.EmitInstruction(JMPInst);
}
@@ -121,8 +258,7 @@ static void EmitTrap(bool Is64Bit, MCStreamer &Out) {
// Rewrite to:
// X86-32: mov $0, 0
// X86-64: mov $0, (%r15)
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
- unsigned BaseReg = Is64Bit && !UseZeroBasedSandbox ? X86::R15 : 0;
+ unsigned BaseReg = Is64Bit && !FlagUseZeroBasedSandbox ? X86::R15 : 0;
MCInst Tmp;
Tmp.setOpcode(X86::MOV32mi);
@@ -140,8 +276,7 @@ static void EmitTrap(bool Is64Bit, MCStreamer &Out) {
static void EmitRegFix(unsigned Reg64, MCStreamer &Out) {
// lea (%rsp, %r15, 1), %rsp
// We do not need to add the R15 base for the zero-based sandbox model
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
- if (!UseZeroBasedSandbox) {
+ if (!FlagUseZeroBasedSandbox) {
MCInst Tmp;
Tmp.setOpcode(X86::LEA64r);
Tmp.addOperand(MCOperand::CreateReg(Reg64)); // DestReg
@@ -215,9 +350,8 @@ static void EmitRegTruncate(unsigned Reg64, MCStreamer &Out) {
static void HandleMemoryRefTruncation(MCInst *Inst, unsigned IndexOpPosition,
MCStreamer &Out) {
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
unsigned IndexReg = Inst->getOperand(IndexOpPosition).getReg();
- if (UseZeroBasedSandbox) {
+ if (FlagUseZeroBasedSandbox) {
// With the zero-based sandbox, we use a 32-bit register on the index
Inst->getOperand(IndexOpPosition).setReg(DemoteRegTo32_(IndexReg));
} else {
@@ -352,7 +486,6 @@ namespace llvm {
// these instead of combined instructions. At this time, having only
// one explicit prefix is supported.
bool CustomExpandInstNaClX86(const MCInst &Inst, MCStreamer &Out) {
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
// If we are emitting to .s, just emit all pseudo-instructions directly.
if (Out.hasRawTextSupport()) {
return false;
@@ -473,7 +606,7 @@ bool CustomExpandInstNaClX86(const MCInst &Inst, MCStreamer &Out) {
unsigned PrefixLocal = PrefixSaved;
PrefixSaved = 0;
- if (PrefixLocal || !UseZeroBasedSandbox)
+ if (PrefixLocal || !FlagUseZeroBasedSandbox)
Out.EmitBundleLock(false);
HandleMemoryRefTruncation(&SandboxedInst, IndexOpPosition, Out);
@@ -483,7 +616,7 @@ bool CustomExpandInstNaClX86(const MCInst &Inst, MCStreamer &Out) {
EmitPrefix(PrefixLocal, Out);
Out.EmitInstruction(SandboxedInst);
- if (PrefixLocal || !UseZeroBasedSandbox)
+ if (PrefixLocal || !FlagUseZeroBasedSandbox)
Out.EmitBundleUnlock();
return true;
}
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index b024817891..89485cb06c 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -15,6 +15,7 @@
#include "X86InstrBuilder.h"
#include "X86InstrInfo.h"
#include "X86MachineFunctionInfo.h"
+#include "X86NaClDecls.h" // @LOCALMOD
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/SmallSet.h"
@@ -756,8 +757,52 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
MFI->setOffsetAdjustment(-NumBytes);
// Save EBP/RBP into the appropriate stack slot.
+ // @LOCALMOD-BEGIN
+ unsigned RegToPush = FramePtr;
+ const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
+ const bool HideSandboxBase = (FlagHideSandboxBase &&
+ Subtarget->isTargetNaCl64() &&
+ !FlagUseZeroBasedSandbox);
+ if (HideSandboxBase) {
+ // Hide the sandbox base address by masking off the upper 32
+ // bits of the pushed/saved RBP on the stack, using:
+ // mov %ebp, %r10d
+ // push %r10
+ // instead of:
+ // push %rbp
+ // Additionally, we can use rax instead of r10 when it is not a
+ // varargs function and therefore rax is available, saving one
+ // byte of REX prefix per instruction.
+ // Note that the epilog already adds R15 when restoring RBP.
+
+ // mov %ebp, %r10d
+ unsigned RegToPushLower;
+ if (Fn->isVarArg()) {
+ // Note: This use of r10 in the prolog can't be used with the
+ // gcc "nest" attribute, due to its use of r10. Example:
+ // target triple = "x86_64-pc-linux-gnu"
+ // define i64 @func(i64 nest %arg) {
+ // ret i64 %arg
+ // }
+ //
+ // $ clang -m64 llvm_nest_attr.ll -S -o -
+ // ...
+ // func:
+ // movq %r10, %rax
+ // ret
+ RegToPush = X86::R10;
+ RegToPushLower = X86::R10D;
+ } else {
+ RegToPush = X86::RAX;
+ RegToPushLower = X86::EAX;
+ }
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rr), RegToPushLower)
+ .addReg(FramePtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ // @LOCALMOD-END
BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
- .addReg(FramePtr, RegState::Kill)
+ .addReg(RegToPush, RegState::Kill) // @LOCALMOD
.setMIFlag(MachineInstr::FrameSetup);
if (needsFrameMoves) {
diff --git a/lib/Target/X86/X86NaClDecls.h b/lib/Target/X86/X86NaClDecls.h
new file mode 100644
index 0000000000..4050187c68
--- /dev/null
+++ b/lib/Target/X86/X86NaClDecls.h
@@ -0,0 +1,28 @@
+//===-- X86NaClDecls.h - Common X86 NaCl declarations -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides various NaCl-related declarations for the X86-32
+// and X86-64 architectures.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef X86NACLDECLS_H
+#define X86NACLDECLS_H
+
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+extern const int kNaClX86InstructionBundleSize;
+
+extern cl::opt<bool> FlagRestrictR15;
+extern cl::opt<bool> FlagUseZeroBasedSandbox;
+extern cl::opt<bool> FlagHideSandboxBase;
+
+#endif // X86NACLDECLS_H
diff --git a/lib/Target/X86/X86NaClRewritePass.cpp b/lib/Target/X86/X86NaClRewritePass.cpp
index 846c72f452..8a131029e2 100644
--- a/lib/Target/X86/X86NaClRewritePass.cpp
+++ b/lib/Target/X86/X86NaClRewritePass.cpp
@@ -19,6 +19,7 @@
#include "X86.h"
#include "X86InstrInfo.h"
+#include "X86NaClDecls.h"
#include "X86Subtarget.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
@@ -32,7 +33,6 @@
using namespace llvm;
-extern cl::opt<bool> FlagUseZeroBasedSandbox;
cl::opt<bool> FlagRestrictR15("sfi-restrict-r15",
cl::desc("Restrict use of %r15. This flag can"
" be turned off for the zero-based"
@@ -142,9 +142,8 @@ static bool IsDirectBranch(const MachineInstr &MI) {
}
static bool IsRegAbsolute(unsigned Reg) {
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
const bool RestrictR15 = FlagRestrictR15;
- assert(UseZeroBasedSandbox || RestrictR15);
+ assert(FlagUseZeroBasedSandbox || RestrictR15);
return (Reg == X86::RSP || Reg == X86::RBP ||
(Reg == X86::R15 && RestrictR15));
}
@@ -219,7 +218,6 @@ X86NaClRewritePass::TraceLog(const char *func,
bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) {
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
TraceLog("ApplyStackSFI", MBB, MBBI);
assert(Is64Bit);
MachineInstr &MI = *MBBI;
@@ -249,7 +247,7 @@ bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB,
if (NewOpc) {
BuildMI(MBB, MBBI, DL, TII->get(NewOpc))
.addImm(MI.getOperand(2).getImm())
- .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
+ .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15);
MI.eraseFromParent();
return true;
}
@@ -288,7 +286,7 @@ bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB,
const MachineOperand &Offset = MI.getOperand(4);
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_SPADJi32))
.addImm(Offset.getImm())
- .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
+ .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15);
MI.eraseFromParent();
return true;
}
@@ -296,7 +294,7 @@ bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB,
if (Opc == X86::MOV32rr || Opc == X86::MOV64rr) {
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_RESTSPr))
.addReg(DemoteRegTo32(MI.getOperand(1).getReg()))
- .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
+ .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15);
MI.eraseFromParent();
return true;
}
@@ -308,7 +306,7 @@ bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB,
.addOperand(MI.getOperand(3)) // Index
.addOperand(MI.getOperand(4)) // Offset
.addOperand(MI.getOperand(5)) // Segment
- .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
+ .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15);
MI.eraseFromParent();
return true;
}
@@ -319,7 +317,6 @@ bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB,
bool X86NaClRewritePass::ApplyFrameSFI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) {
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
TraceLog("ApplyFrameSFI", MBB, MBBI);
assert(Is64Bit);
MachineInstr &MI = *MBBI;
@@ -343,7 +340,7 @@ bool X86NaClRewritePass::ApplyFrameSFI(MachineBasicBlock &MBB,
// To: naclrestbp %eX, %rZP
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_RESTBPr))
.addReg(DemoteRegTo32(SrcReg))
- .addReg(UseZeroBasedSandbox ? 0 : X86::R15); // rZP
+ .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); // rZP
MI.eraseFromParent();
return true;
}
@@ -353,7 +350,7 @@ bool X86NaClRewritePass::ApplyFrameSFI(MachineBasicBlock &MBB,
assert(MI.getOperand(0).getReg() == X86::RBP);
// Zero-based sandbox model uses address clipping
- if (UseZeroBasedSandbox)
+ if (FlagUseZeroBasedSandbox)
return false;
// Rewrite: mov %rbp, (...)
@@ -364,7 +361,7 @@ bool X86NaClRewritePass::ApplyFrameSFI(MachineBasicBlock &MBB,
.addOperand(MI.getOperand(3)) // Index
.addOperand(MI.getOperand(4)) // Offset
.addOperand(MI.getOperand(5)) // Segment
- .addReg(UseZeroBasedSandbox ? 0 : X86::R15); // rZP
+ .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); // rZP
MI.eraseFromParent();
return true;
}
@@ -389,11 +386,11 @@ bool X86NaClRewritePass::ApplyFrameSFI(MachineBasicBlock &MBB,
.addReg(0) // Index
.addImm(0) // Offset
.addReg(0) // Segment
- .addReg(UseZeroBasedSandbox ? 0 : X86::R15); // rZP
+ .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); // rZP
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_ASPi8))
.addImm(8)
- .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
+ .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15);
MI.eraseFromParent();
return true;
@@ -405,7 +402,8 @@ bool X86NaClRewritePass::ApplyFrameSFI(MachineBasicBlock &MBB,
bool X86NaClRewritePass::ApplyControlSFI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) {
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
+ const bool HideSandboxBase = (FlagHideSandboxBase &&
+ Is64Bit && !FlagUseZeroBasedSandbox);
TraceLog("ApplyControlSFI", MBB, MBBI);
MachineInstr &MI = *MBBI;
@@ -436,7 +434,7 @@ bool X86NaClRewritePass::ApplyControlSFI(MachineBasicBlock &MBB,
BuildMI(MBB, MBBI, DL, TII->get(NewOpc))
.addOperand(MI.getOperand(0));
if (Is64Bit) {
- NewMI.addReg(UseZeroBasedSandbox ? 0 : X86::R15);
+ NewMI.addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15);
}
MI.eraseFromParent();
return true;
@@ -451,25 +449,32 @@ bool X86NaClRewritePass::ApplyControlSFI(MachineBasicBlock &MBB,
Opc == X86::RETI) {
// To maintain compatibility with nacl-as, for now we don't emit naclret.
// MI.setDesc(TII->get(Is64Bit ? X86::NACL_RET64 : X86::NACL_RET32));
+ //
+ // For NaCl64 returns, follow the convention of using r11 to hold
+ // the target of an indirect jump to avoid potentially leaking the
+ // sandbox base address.
+ unsigned RegTarget;
if (Is64Bit) {
- BuildMI(MBB, MBBI, DL, TII->get(X86::POP64r), X86::RCX);
+ RegTarget = (HideSandboxBase ? X86::R11 : X86::RCX);
+ BuildMI(MBB, MBBI, DL, TII->get(X86::POP64r), RegTarget);
if (Opc == X86::RETI) {
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_ASPi32))
.addOperand(MI.getOperand(0))
- .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
+ .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15);
}
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_JMP64r))
- .addReg(X86::ECX)
- .addReg(UseZeroBasedSandbox ? 0 : X86::R15);
+ .addReg(RegTarget)
+ .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15);
} else {
- BuildMI(MBB, MBBI, DL, TII->get(X86::POP32r), X86::ECX);
+ RegTarget = X86::ECX;
+ BuildMI(MBB, MBBI, DL, TII->get(X86::POP32r), RegTarget);
if (Opc == X86::RETI) {
BuildMI(MBB, MBBI, DL, TII->get(X86::ADD32ri), X86::ESP)
.addReg(X86::ESP)
.addOperand(MI.getOperand(0));
}
BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_JMP32r))
- .addReg(X86::ECX);
+ .addReg(RegTarget);
}
MI.eraseFromParent();
return true;
@@ -480,7 +485,7 @@ bool X86NaClRewritePass::ApplyControlSFI(MachineBasicBlock &MBB,
// To maintain compatibility with nacl-as, for now we don't emit nacltrap.
// MI.setDesc(TII->get(Is64Bit ? X86::NACL_TRAP64 : X86::NACL_TRAP32));
BuildMI(MBB, MBBI, DL, TII->get(X86::MOV32mi))
- .addReg(Is64Bit && !UseZeroBasedSandbox ? X86::R15 : 0) // Base
+ .addReg(Is64Bit && !FlagUseZeroBasedSandbox ? X86::R15 : 0) // Base
.addImm(1) // Scale
.addReg(0) // Index
.addImm(0) // Offset
@@ -502,7 +507,6 @@ bool X86NaClRewritePass::ApplyMemorySFI(MachineBasicBlock &MBB,
TraceLog("ApplyMemorySFI", MBB, MBBI);
assert(Is64Bit);
MachineInstr &MI = *MBBI;
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
if (!IsLoad(MI) && !IsStore(MI))
return false;
@@ -545,9 +549,9 @@ bool X86NaClRewritePass::ApplyMemorySFI(MachineBasicBlock &MBB,
} else {
if (!BaseReg.getReg()) {
// No base, fill in relative.
- BaseReg.setReg(UseZeroBasedSandbox ? 0 : X86::R15);
+ BaseReg.setReg(FlagUseZeroBasedSandbox ? 0 : X86::R15);
AddrReg = IndexReg.getReg();
- } else if (!UseZeroBasedSandbox) {
+ } else if (!FlagUseZeroBasedSandbox) {
// Switch base and index registers if index register is undefined.
// That is do conversions like "mov d(%r,0,0) -> mov d(%r15, %r, 1)".
assert (!IndexReg.getReg()
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index bab08b69df..67bac130b5 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -17,6 +17,7 @@
#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86MachineFunctionInfo.h"
+#include "X86NaClDecls.h" // @LOCALMOD
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/BitVector.h"
@@ -54,11 +55,6 @@ static cl::opt<bool>
EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
cl::desc("Enable use of a base pointer for complex stack frames"));
-// @LOCALMOD-BEGIN
-extern cl::opt<bool> FlagUseZeroBasedSandbox;
-extern cl::opt<bool> FlagRestrictR15;
-// @LOCALMOD-END
-
X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
const TargetInstrInfo &tii)
: X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
@@ -394,9 +390,8 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// @LOCALMOD-START
const X86Subtarget& Subtarget = MF.getTarget().getSubtarget<X86Subtarget>();
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
const bool RestrictR15 = FlagRestrictR15;
- assert(UseZeroBasedSandbox || RestrictR15);
+ assert(FlagUseZeroBasedSandbox || RestrictR15);
if (Subtarget.isTargetNaCl64()) {
if (RestrictR15) {
Reserved.set(X86::R15);
@@ -408,6 +403,16 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(X86::EBP);
Reserved.set(X86::BP);
Reserved.set(X86::BPL);
+ const bool RestrictR11 = FlagHideSandboxBase && !FlagUseZeroBasedSandbox;
+ if (RestrictR11) {
+ // Restrict r11 so that it can be used for indirect jump
+ // sequences that don't leak the sandbox base address onto the
+ // stack.
+ Reserved.set(X86::R11);
+ Reserved.set(X86::R11D);
+ Reserved.set(X86::R11W);
+ Reserved.set(X86::R11B);
+ }
}
// @LOCALMOD-END