aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/Mips
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/Mips')
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCNaCl.cpp261
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h19
-rw-r--r--lib/Target/Mips/MipsNaClHeaders.cpp128
-rw-r--r--lib/Target/Mips/MipsNaClRewritePass.cpp333
-rw-r--r--lib/Target/Mips/MipsNaClRewritePass.h21
5 files changed, 762 insertions, 0 deletions
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.cpp
new file mode 100644
index 0000000000..d39a60d41c
--- /dev/null
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.cpp
@@ -0,0 +1,261 @@
+//=== MipsMCNaCl.cpp - Expansion of NaCl pseudo-instructions --*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "mips-mc-nacl"
+
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "MCTargetDesc/MipsMCTargetDesc.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+/// Two helper functions for emitting the actual guard instructions
+
+static void EmitMask(MCStreamer &Out,
+ unsigned Addr, unsigned Mask) {
+ // and \Addr, \Addr, \Mask
+ MCInst MaskInst;
+ MaskInst.setOpcode(Mips::AND);
+ MaskInst.addOperand(MCOperand::CreateReg(Addr));
+ MaskInst.addOperand(MCOperand::CreateReg(Addr));
+ MaskInst.addOperand(MCOperand::CreateReg(Mask));
+ Out.EmitInstruction(MaskInst);
+}
+
+// This is ONLY used for sandboxing stack changes.
+// The reason why SFI_NOP_IF_AT_BUNDLE_END gets handled here is that
+// it must ensure that the two instructions are in the same bundle.
+// It just so happens that the SFI_NOP_IF_AT_BUNDLE_END is always
+// emitted in conjunction with a SFI_DATA_MASK
+//
+static void EmitDataMask(int I, MCInst Saved[], MCStreamer &Out) {
+ assert(I == 3 &&
+ (Mips::SFI_NOP_IF_AT_BUNDLE_END == Saved[0].getOpcode()) &&
+ (Mips::SFI_DATA_MASK == Saved[2].getOpcode()) &&
+ "Unexpected SFI Pseudo while lowering");
+
+ unsigned Addr = Saved[2].getOperand(0).getReg();
+ unsigned Mask = Saved[2].getOperand(2).getReg();
+ assert((Mips::SP == Addr) && "Unexpected register at stack guard");
+
+ Out.EmitBundleLock();
+ Out.EmitInstruction(Saved[1]);
+ EmitMask(Out, Addr, Mask);
+ Out.EmitBundleUnlock();
+}
+
+static void EmitDirectGuardCall(int I, MCInst Saved[],
+ MCStreamer &Out) {
+ // sfi_call_preamble --->
+ // sfi_nops_to_force_slot2
+ assert(I == 3 && (Mips::SFI_GUARD_CALL == Saved[0].getOpcode()) &&
+ "Unexpected SFI Pseudo while lowering SFI_GUARD_CALL");
+ Out.EmitBundleAlignEnd();
+ Out.EmitBundleLock();
+ Out.EmitInstruction(Saved[1]);
+ Out.EmitInstruction(Saved[2]);
+ Out.EmitBundleUnlock();
+}
+
+static void EmitIndirectGuardCall(int I, MCInst Saved[],
+ MCStreamer &Out) {
+ // sfi_indirect_call_preamble link --->
+ // sfi_nops_to_force_slot1
+ // sfi_code_mask \link \link \maskreg
+ assert(I == 3 && (Mips::SFI_GUARD_INDIRECT_CALL == Saved[0].getOpcode()) &&
+ "Unexpected SFI Pseudo while lowering SFI_GUARD_INDIRECT_CALL");
+
+ unsigned Addr = Saved[0].getOperand(0).getReg();
+ unsigned Mask = Saved[0].getOperand(2).getReg();
+
+ Out.EmitBundleAlignEnd();
+ Out.EmitBundleLock();
+ EmitMask(Out, Addr, Mask);
+ Out.EmitInstruction(Saved[1]);
+ Out.EmitInstruction(Saved[2]);
+ Out.EmitBundleUnlock();
+}
+
+static void EmitIndirectGuardJmp(int I, MCInst Saved[], MCStreamer &Out) {
+ // sfi_indirect_jump_preamble link --->
+ // sfi_nop_if_at_bundle_end
+ // sfi_code_mask \link \link \maskreg
+ assert(I == 2 && (Mips::SFI_GUARD_INDIRECT_JMP == Saved[0].getOpcode()) &&
+ "Unexpected SFI Pseudo while lowering SFI_GUARD_INDIRECT_JMP");
+ unsigned Addr = Saved[0].getOperand(0).getReg();
+ unsigned Mask = Saved[0].getOperand(2).getReg();
+
+ Out.EmitBundleLock();
+ EmitMask(Out, Addr, Mask);
+ Out.EmitInstruction(Saved[1]);
+ Out.EmitBundleUnlock();
+}
+
+static void EmitGuardReturn(int I, MCInst Saved[], MCStreamer &Out) {
+ // sfi_return_preamble reg --->
+ // sfi_nop_if_at_bundle_end
+ // sfi_code_mask \reg \reg \maskreg
+ assert(I == 2 && (Mips::SFI_GUARD_RETURN == Saved[0].getOpcode()) &&
+ "Unexpected SFI Pseudo while lowering SFI_GUARD_RETURN");
+ unsigned Reg = Saved[0].getOperand(0).getReg();
+ unsigned Mask = Saved[0].getOperand(2).getReg();
+
+ Out.EmitBundleLock();
+ EmitMask(Out, Reg, Mask);
+ Out.EmitInstruction(Saved[1]);
+ Out.EmitBundleUnlock();
+}
+
+static void EmitGuardLoadOrStore(int I, MCInst Saved[], MCStreamer &Out) {
+ // sfi_load_store_preamble reg --->
+ // sfi_nop_if_at_bundle_end
+ // sfi_data_mask \reg \reg \maskreg
+ assert(I == 2 && (Mips::SFI_GUARD_LOADSTORE == Saved[0].getOpcode()) &&
+ "Unexpected SFI Pseudo while lowering SFI_GUARD_LOADSTORE");
+ unsigned Reg = Saved[0].getOperand(0).getReg();
+ unsigned Mask = Saved[0].getOperand(2).getReg();
+
+ Out.EmitBundleLock();
+ EmitMask(Out, Reg, Mask);
+ Out.EmitInstruction(Saved[1]);
+ Out.EmitBundleUnlock();
+}
+
+namespace llvm {
+// CustomExpandInstNaClMips -
+// If Inst is a NaCl pseudo instruction, emits the substitute
+// expansion to the MCStreamer and returns true.
+// Otherwise, returns false.
+//
+// NOTE: Each time this function calls Out.EmitInstruction(), it will be
+// called again recursively to rewrite the new instruction being emitted.
+// Care must be taken to ensure that this does not result in an infinite
+// loop. Also, global state must be managed carefully so that it is
+// consistent during recursive calls.
+//
+// We need global state to keep track of the explicit prefix (PREFIX_*)
+// instructions. Unfortunately, the assembly parser prefers to generate
+// these instead of combined instructions. At this time, having only
+// one explicit prefix is supported.
+
+
+bool CustomExpandInstNaClMips(const MCInst &Inst, MCStreamer &Out) {
+ const int MaxSaved = 4;
+ static MCInst Saved[MaxSaved];
+ static int SaveCount = 0;
+ static int I = 0;
+ // This routine only executes if RecurseGuard == 0
+ static bool RecurseGuard = false;
+
+ // If we are emitting to .s, just emit all pseudo-instructions directly.
+ if (Out.hasRawTextSupport()) {
+ return false;
+ }
+
+ //No recursive calls allowed;
+ if (RecurseGuard) return false;
+
+ unsigned Opc = Inst.getOpcode();
+
+ DEBUG(dbgs() << "CustomExpandInstNaClMips("; Inst.dump(); dbgs() << ")\n");
+
+ // Note: SFI_NOP_IF_AT_BUNDLE_END is only emitted directly as part of
+ // a stack guard in conjunction with a SFI_DATA_MASK
+
+ // Logic:
+ // This is somewhat convoluted, but in the current model, the SFI
+ // guard pseudo instructions occur PRIOR to the actual instruction.
+ // So, the bundling/alignment operation has to refer to the FOLLOWING
+ // one or two instructions.
+ //
+ // When a SFI_* pseudo is detected, it is saved. Then, the saved SFI_*
+ // pseudo and the very next one or two instructions are used as arguments to
+ // the Emit*() functions in this file. This is the reason why we have a
+ // doublely nested switch here. First, to save the SFI_* pseudo, then to
+ // emit it and the next instruction
+
+ // By default, we only need to save two or three instructions
+
+ if ((I == 0) && (SaveCount == 0)) {
+ // Base State, no saved instructions.
+ // If the current instruction is a SFI instruction, set the SaveCount
+ // and fall through.
+ switch (Opc) {
+ default:
+ SaveCount = 0; // Nothing to do.
+ return false; // Handle this Inst elsewhere.
+ case Mips::SFI_NOP_IF_AT_BUNDLE_END:
+ case Mips::SFI_GUARD_CALL:
+ case Mips::SFI_GUARD_INDIRECT_CALL:
+ SaveCount = 3;
+ break;
+ case Mips::SFI_DATA_MASK:
+ SaveCount = 0; // Do nothing.
+ break;
+ case Mips::SFI_GUARD_INDIRECT_JMP:
+ case Mips::SFI_GUARD_RETURN:
+ case Mips::SFI_GUARD_LOADSTORE:
+ SaveCount = 2;
+ break;
+ }
+ }
+
+ if (I < SaveCount) {
+ // Othewise, save the current Inst and return
+ Saved[I++] = Inst;
+ if (I < SaveCount)
+ return true;
+ // Else fall through to next stat
+ }
+
+ if (SaveCount > 0) {
+ assert(I == SaveCount && "Bookeeping Error");
+ SaveCount = 0; // Reset for next iteration
+ // The following calls may call Out.EmitInstruction()
+ // which must not again call CustomExpandInst ...
+ // So set RecurseGuard = 1;
+ RecurseGuard = true;
+
+ switch (Saved[0].getOpcode()) {
+ default: /* No action required */ break;
+ case Mips::SFI_NOP_IF_AT_BUNDLE_END:
+ EmitDataMask(I, Saved, Out);
+ break;
+ case Mips::SFI_DATA_MASK:
+ assert(0 && "Unexpected NOP_IF_AT_BUNDLE_END as a Saved Inst");
+ break;
+ case Mips::SFI_GUARD_CALL:
+ EmitDirectGuardCall(I, Saved, Out);
+ break;
+ case Mips::SFI_GUARD_INDIRECT_CALL:
+ EmitIndirectGuardCall(I, Saved, Out);
+ break;
+ case Mips::SFI_GUARD_INDIRECT_JMP:
+ EmitIndirectGuardJmp(I, Saved, Out);
+ break;
+ case Mips::SFI_GUARD_RETURN:
+ EmitGuardReturn(I, Saved, Out);
+ break;
+ case Mips::SFI_GUARD_LOADSTORE:
+ EmitGuardLoadOrStore(I, Saved, Out);
+ break;
+ }
+ I = 0; // Reset I for next.
+ assert(RecurseGuard && "Illegal Depth");
+ RecurseGuard = false;
+ return true;
+ }
+ return false;
+}
+
+} // namespace llvm
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h b/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
new file mode 100644
index 0000000000..c90502ec33
--- /dev/null
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
@@ -0,0 +1,19 @@
+//===-- MipsMCNaCl.h - Prototype for CustomExpandInstNaClMips ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPSMCNACL_H
+#define MIPSMCNACL_H
+
+namespace llvm {
+ class MCInst;
+ class MCStreamer;
+ bool CustomExpandInstNaClMips(const MCInst &Inst, MCStreamer &Out);
+}
+
+#endif
diff --git a/lib/Target/Mips/MipsNaClHeaders.cpp b/lib/Target/Mips/MipsNaClHeaders.cpp
new file mode 100644
index 0000000000..375c287d67
--- /dev/null
+++ b/lib/Target/Mips/MipsNaClHeaders.cpp
@@ -0,0 +1,128 @@
+//===-- MipsNaClHeaders.cpp - Print SFI headers to an Mips .s file --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the initial header string needed
+// for the Native Client target in Mips assembly.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/raw_ostream.h"
+#include "MipsNaClRewritePass.h"
+#include <string>
+
+using namespace llvm;
+
+void EmitMipsSFIHeaders(raw_ostream &O) {
+ O << " # ========================================\n";
+ O << "# Branch: " << FlagSfiBranch << "\n";
+ O << "# Stack: " << FlagSfiStack << "\n";
+ O << "# Store: " << FlagSfiStore << "\n";
+ O << "# Load: " << FlagSfiLoad << "\n";
+
+ O << " # ========================================\n";
+ // NOTE: this macro does bundle alignment as follows
+ // if current bundle pos is X emit pX data items of value "val"
+ // NOTE: that pos will be one of: 0,4,8,12
+ //
+ O <<
+ "\t.macro sfi_long_based_on_pos p0 p1 p2 p3 val\n"
+ "\t.set pos, (. - XmagicX) % 16\n"
+ "\t.fill (((\\p3<<12)|(\\p2<<8)|(\\p1<<4)|\\p0)>>pos) & 15, 4, \\val\n"
+ "\t.endm\n"
+ "\n\n";
+
+ O <<
+ "\t.macro sfi_nop_if_at_bundle_end\n"
+ "\tsfi_long_based_on_pos 0 0 0 1 0x00000000\n"
+ "\t.endm\n"
+ "\n\n";
+
+ O <<
+ "\t.macro sfi_nops_to_force_slot3\n"
+ "\tsfi_long_based_on_pos 3 2 1 0 0x00000000\n"
+ "\t.endm\n"
+ "\n\n";
+
+ O <<
+ "\t.macro sfi_nops_to_force_slot2\n"
+ "\tsfi_long_based_on_pos 2 1 0 3 0x00000000\n"
+ "\t.endm\n"
+ "\n\n";
+
+ O <<
+ "\t.macro sfi_nops_to_force_slot1\n"
+ "\tsfi_long_based_on_pos 1 0 3 2 0x00000000\n"
+ "\t.endm\n"
+ "\n\n";
+
+ O << " # ========================================\n";
+ O <<
+ "\t.macro sfi_data_mask reg1 reg2 maskreg\n"
+ "\tand \\reg1, \\reg2, \\maskreg\n"
+ "\t.endm\n"
+ "\n\n";
+
+ O <<
+ "\t.macro sfi_code_mask reg1 reg2 maskreg\n"
+ "\tand \\reg1, \\reg2, \\maskreg\n"
+ "\t.endm\n"
+ "\n\n";
+
+ O << " # ========================================\n";
+ if (FlagSfiBranch) {
+ O <<
+ "\t.macro sfi_call_preamble\n"
+ "\tsfi_nops_to_force_slot2\n"
+ "\t.endm\n"
+ "\n\n";
+
+ O <<
+ "\t.macro sfi_return_preamble reg1 reg2 maskreg\n"
+ "\tsfi_nop_if_at_bundle_end\n"
+ "\tsfi_code_mask \\reg1, \\reg2, \\maskreg\n"
+ "\t.endm\n"
+ "\n\n";
+
+ // This is used just before "jr"
+ O <<
+ "\t.macro sfi_indirect_jump_preamble reg1 reg2 maskreg\n"
+ "\tsfi_nop_if_at_bundle_end\n"
+ "\tsfi_code_mask \\reg1, \\reg2, \\maskreg\n"
+ "\t.endm\n"
+ "\n\n";
+
+ // This is used just before "jalr"
+ O <<
+ "\t.macro sfi_indirect_call_preamble reg1 reg2 maskreg\n"
+ "\tsfi_nops_to_force_slot1\n"
+ "\tsfi_code_mask \\reg1, \\reg2, \\maskreg\n"
+ "\t.endm\n"
+ "\n\n";
+
+ }
+
+ if (FlagSfiStore) {
+ O << " # ========================================\n";
+
+ O <<
+ "\t.macro sfi_load_store_preamble reg1 reg2 maskreg\n"
+ "\tsfi_nop_if_at_bundle_end\n"
+ "\tsfi_data_mask \\reg1, \\reg2 , \\maskreg\n"
+ "\t.endm\n"
+ "\n\n";
+ } else {
+ O <<
+ "\t.macro sfi_load_store_preamble reg1 reg2 maskreg\n"
+ "\t.endm\n"
+ "\n\n";
+ }
+
+ O << " # ========================================\n";
+ O << "\t.text\n";
+}
diff --git a/lib/Target/Mips/MipsNaClRewritePass.cpp b/lib/Target/Mips/MipsNaClRewritePass.cpp
new file mode 100644
index 0000000000..cce770eebd
--- /dev/null
+++ b/lib/Target/Mips/MipsNaClRewritePass.cpp
@@ -0,0 +1,333 @@
+//===-- MipsNaClRewritePass.cpp - Native Client Rewrite Pass -----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Native Client Rewrite Pass
+// This final pass inserts the sandboxing instructions needed to run inside
+// the Native Client sandbox. Native Client requires certain software fault
+// isolation (SFI) constructions to be put in place, to prevent escape from
+// the sandbox. Native Client refuses to execute binaries without the correct
+// SFI sequences.
+//
+// Potentially dangerous operations which are protected include:
+// * Stores
+// * Branches
+// * Changes to SP
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "mips-sfi"
+#include "Mips.h"
+#include "MipsInstrInfo.h"
+#include "MipsNaClRewritePass.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/Function.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+unsigned Mips::IndirectBranchMaskReg = Mips::T6;
+unsigned Mips::LoadStoreStackMaskReg = Mips::T7;
+
+namespace {
+ class MipsNaClRewritePass : public MachineFunctionPass {
+ public:
+ static char ID;
+ MipsNaClRewritePass() : MachineFunctionPass(ID) {}
+
+ const MipsInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ virtual bool runOnMachineFunction(MachineFunction &Fn);
+
+ virtual const char *getPassName() const {
+ return "Mips Native Client Rewrite Pass";
+ }
+
+ private:
+
+ bool SandboxLoadsInBlock(MachineBasicBlock &MBB);
+ bool SandboxStoresInBlock(MachineBasicBlock &MBB);
+ void SandboxLoadStore(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineInstr &MI,
+ int AddrIdx);
+
+ bool SandboxBranchesInBlock(MachineBasicBlock &MBB);
+ bool SandboxStackChangesInBlock(MachineBasicBlock &MBB);
+
+ void SandboxStackChange(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI);
+ void AlignAllJumpTargets(MachineFunction &MF);
+ };
+ char MipsNaClRewritePass::ID = 0;
+}
+
+static bool IsReturn(const MachineInstr &MI) {
+ return (MI.getOpcode() == Mips::RET);
+}
+
+static bool IsIndirectJump(const MachineInstr &MI) {
+ return (MI.getOpcode() == Mips::JR);
+}
+
+static bool IsIndirectCall(const MachineInstr &MI) {
+ return (MI.getOpcode() == Mips::JALR);
+}
+
+static bool IsDirectCall(const MachineInstr &MI) {
+ return ((MI.getOpcode() == Mips::JAL) || (MI.getOpcode() == Mips::BGEZAL)
+ || (MI.getOpcode() == Mips::BLTZAL));
+;
+}
+
+static bool IsStackMask(const MachineInstr &MI) {
+ return (MI.getOpcode() == Mips::SFI_DATA_MASK);
+}
+
+static bool NeedSandboxStackChange(const MachineInstr &MI,
+ const TargetRegisterInfo *TRI) {
+ if (IsDirectCall(MI) || IsIndirectCall(MI)) {
+ // We check this first because method modifiesRegister
+ // returns true for calls.
+ return false;
+ }
+ return (MI.modifiesRegister(Mips::SP, TRI) && !IsStackMask(MI));
+}
+
+void MipsNaClRewritePass::SandboxStackChange(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) {
+ MachineInstr &MI = *MBBI;
+
+ BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(Mips::SFI_NOP_IF_AT_BUNDLE_END));
+
+ // Get to next instr (one + to get the original, and one more + to get past).
+ MachineBasicBlock::iterator MBBINext = (MBBI++);
+ MachineBasicBlock::iterator MBBINext2 = (MBBI++);
+
+ BuildMI(MBB, MBBINext2, MI.getDebugLoc(),
+ TII->get(Mips::SFI_DATA_MASK), Mips::SP)
+ .addReg(Mips::SP)
+ .addReg(Mips::LoadStoreStackMaskReg);
+ return;
+}
+
+bool MipsNaClRewritePass::SandboxStackChangesInBlock(MachineBasicBlock &MBB) {
+ bool Modified = false;
+ for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ if (NeedSandboxStackChange(MI, TRI)) {
+ SandboxStackChange(MBB, MBBI);
+ Modified = true;
+ }
+ }
+ return Modified;
+}
+
+bool MipsNaClRewritePass::SandboxBranchesInBlock(MachineBasicBlock &MBB) {
+ bool Modified = false;
+
+ for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+
+ if (IsReturn(MI)) {
+ unsigned AddrReg = MI.getOperand(0).getReg();
+ BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(Mips::SFI_GUARD_RETURN), AddrReg)
+ .addReg(AddrReg)
+ .addReg(Mips::IndirectBranchMaskReg);
+ Modified = true;
+ } else if (IsIndirectJump(MI)) {
+ unsigned AddrReg = MI.getOperand(0).getReg();
+ BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(Mips::SFI_GUARD_INDIRECT_JMP), AddrReg)
+ .addReg(AddrReg)
+ .addReg(Mips::IndirectBranchMaskReg);
+ Modified = true;
+ } else if (IsDirectCall(MI)) {
+ BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(Mips::SFI_GUARD_CALL));
+ Modified = true;
+ } else if (IsIndirectCall(MI)) {
+ unsigned AddrReg = MI.getOperand(0).getReg();
+ BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(Mips::SFI_GUARD_INDIRECT_CALL), AddrReg)
+ .addReg(AddrReg)
+ .addReg(Mips::IndirectBranchMaskReg);
+ Modified = true;
+ }
+ }
+
+ return Modified;
+}
+
+/*
+ * Sandboxes a load or store instruction by inserting an appropriate mask
+ * operation before it.
+ */
+void MipsNaClRewritePass::SandboxLoadStore(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineInstr &MI,
+ int AddrIdx) {
+ unsigned BaseReg = MI.getOperand(AddrIdx).getReg();
+
+ BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(Mips::SFI_GUARD_LOADSTORE), BaseReg)
+ .addReg(BaseReg)
+ .addReg(Mips::LoadStoreStackMaskReg);
+ return;
+}
+
+static bool IsDangerousLoad(const MachineInstr &MI, int *AddrIdx) {
+ unsigned Opcode = MI.getOpcode();
+ switch (Opcode) {
+ default: return false;
+
+ // Instructions with base address register in position 1
+ case Mips::LB:
+ case Mips::LBu:
+ case Mips::LH:
+ case Mips::LHu:
+ case Mips::LW:
+ case Mips::LWC1:
+ case Mips::LDC1:
+ case Mips::LL:
+ case Mips::LWL:
+ case Mips::LWR:
+ *AddrIdx = 1;
+ break;
+ }
+
+ if (MI.getOperand(*AddrIdx).getReg() == Mips::SP) {
+ // The contents of SP do not require masking.
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsDangerousStore(const MachineInstr &MI, int *AddrIdx) {
+ unsigned Opcode = MI.getOpcode();
+ switch (Opcode) {
+ default: return false;
+
+ // Instructions with base address register in position 1
+ case Mips::SB:
+ case Mips::SH:
+ case Mips::SW:
+ case Mips::SWC1:
+ case Mips::SDC1:
+ case Mips::SWL:
+ case Mips::SWR:
+ *AddrIdx = 1;
+ break;
+
+ case Mips::SC:
+ *AddrIdx = 2;
+ break;
+ }
+
+ if (MI.getOperand(*AddrIdx).getReg() == Mips::SP) {
+ // The contents of SP do not require masking.
+ return false;
+ }
+
+ return true;
+}
+
+bool MipsNaClRewritePass::SandboxLoadsInBlock(MachineBasicBlock &MBB) {
+ bool Modified = false;
+ for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ MBBI != E;
+ ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ int AddrIdx;
+
+ if (IsDangerousLoad(MI, &AddrIdx)) {
+ SandboxLoadStore(MBB, MBBI, MI, AddrIdx);
+ Modified = true;
+ }
+ }
+ return Modified;
+}
+
+bool MipsNaClRewritePass::SandboxStoresInBlock(MachineBasicBlock &MBB) {
+ bool Modified = false;
+ for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ MBBI != E;
+ ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ int AddrIdx;
+
+ if (IsDangerousStore(MI, &AddrIdx)) {
+ SandboxLoadStore(MBB, MBBI, MI, AddrIdx);
+ Modified = true;
+ }
+ }
+ return Modified;
+}
+
+// Make sure all jump targets are aligned
+void MipsNaClRewritePass::AlignAllJumpTargets(MachineFunction &MF) {
+ // JUMP TABLE TARGETS
+ MachineJumpTableInfo *jt_info = MF.getJumpTableInfo();
+ if (jt_info) {
+ const std::vector<MachineJumpTableEntry> &JT = jt_info->getJumpTables();
+ for (unsigned i=0; i < JT.size(); ++i) {
+ std::vector<MachineBasicBlock*> MBBs = JT[i].MBBs;
+
+ for (unsigned j=0; j < MBBs.size(); ++j) {
+ MBBs[j]->setAlignment(4);
+ }
+ }
+ }
+
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end();
+ I != E; ++I) {
+ MachineBasicBlock &MBB = *I;
+ if (MBB.hasAddressTaken())
+ MBB.setAlignment(4);
+ }
+}
+
+bool MipsNaClRewritePass::runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const MipsInstrInfo*>(MF.getTarget().getInstrInfo());
+ TRI = MF.getTarget().getRegisterInfo();
+
+ bool Modified = false;
+ for (MachineFunction::iterator MFI = MF.begin(), E = MF.end();
+ MFI != E;
+ ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+
+ if (FlagSfiLoad)
+ Modified |= SandboxLoadsInBlock(MBB);
+ if (FlagSfiStore)
+ Modified |= SandboxStoresInBlock(MBB);
+ if (FlagSfiBranch)
+ Modified |= SandboxBranchesInBlock(MBB);
+ if (FlagSfiStack)
+ Modified |= SandboxStackChangesInBlock(MBB);
+ }
+
+ if (FlagSfiBranch)
+ AlignAllJumpTargets(MF);
+
+ return Modified;
+}
+
+/// createMipsNaClRewritePass - returns an instance of the NaClRewritePass.
+FunctionPass *llvm::createMipsNaClRewritePass() {
+ return new MipsNaClRewritePass();
+}
diff --git a/lib/Target/Mips/MipsNaClRewritePass.h b/lib/Target/Mips/MipsNaClRewritePass.h
new file mode 100644
index 0000000000..4e729ec985
--- /dev/null
+++ b/lib/Target/Mips/MipsNaClRewritePass.h
@@ -0,0 +1,21 @@
+//===-- MipsNaClRewritePass.h - NaCl Sandboxing Pass ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TARGET_MIPSNACLREWRITEPASS_H
+#define TARGET_MIPSNACLREWRITEPASS_H
+
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+ extern cl::opt<bool> FlagSfiLoad;
+ extern cl::opt<bool> FlagSfiStore;
+ extern cl::opt<bool> FlagSfiStack;
+ extern cl::opt<bool> FlagSfiBranch;
+}
+
+#endif