aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/Target/ARM/ARMExpandPseudoInsts.cpp4
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp5
-rw-r--r--lib/Target/ARM/ARMNaClHeaders.cpp16
-rw-r--r--lib/Target/ARM/ARMNaClRewritePass.cpp15
-rw-r--r--lib/Target/ARM/ARMSubtarget.cpp2
-rw-r--r--lib/Target/ARM/ARMSubtarget.h11
-rw-r--r--lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp8
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp180
-rw-r--r--lib/Target/X86/X86InstrNaCl.td80
-rw-r--r--lib/Target/X86/X86NaClRewriteFinalPass.cpp4
-rw-r--r--lib/Target/X86/X86NaClRewritePass.cpp48
11 files changed, 20 insertions, 353 deletions
diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 7538cf9aa3..e75a006097 100644
--- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -1391,7 +1391,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::BX))
.addReg(TargetReg);
MI.eraseFromParent();
- break;
+ return true;
}
case ARM::MOVGOTAddr : {
// Expand the pseudo-inst that requests for the GOT address
@@ -1423,7 +1423,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
HI16.addImm(Pred).addReg(PredReg);
TransferImpOps(MI, LO16, HI16);
MI.eraseFromParent();
- break;
+ return true;
}
// @LOCALMOD-END
}
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 8cab015bf4..7d39704028 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -5251,8 +5251,7 @@ static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
// The effect of this is to adjust the stack pointer by "offset"
// and then branch to "handler".
SDValue ARMTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
- const {
- MachineFunction &MF = DAG.getMachineFunction();
+ const {
SDValue Chain = Op.getOperand(0);
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
@@ -5274,7 +5273,7 @@ SDValue ARMTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
Chain,
DAG.getRegister(OffsetReg, MVT::i32),
DAG.getRegister(AddrReg, getPointerTy()));
- }
+}
// @LOCALMOD-END
diff --git a/lib/Target/ARM/ARMNaClHeaders.cpp b/lib/Target/ARM/ARMNaClHeaders.cpp
index d20fd51b1b..a0b89ab05f 100644
--- a/lib/Target/ARM/ARMNaClHeaders.cpp
+++ b/lib/Target/ARM/ARMNaClHeaders.cpp
@@ -171,22 +171,6 @@ void EmitSFIHeaders(raw_ostream &O) {
"\n\n";
}
- const char* kPreds[] = {
- "eq",
- "ne",
- "lt",
- "le",
- "ls",
- "ge",
- "gt",
- "hs",
- "hi",
- "lo",
- "mi",
- "pl",
- NULL,
- };
-
O << " @ ========================================\n";
O << "\t.text\n";
}
diff --git a/lib/Target/ARM/ARMNaClRewritePass.cpp b/lib/Target/ARM/ARMNaClRewritePass.cpp
index bf3d4596d6..09bd54cdbd 100644
--- a/lib/Target/ARM/ARMNaClRewritePass.cpp
+++ b/lib/Target/ARM/ARMNaClRewritePass.cpp
@@ -13,7 +13,7 @@
// isolation (SFI) constructions to be put in place, to prevent escape from
// the sandbox. Native Client refuses to execute binaries without the correct
// SFI sequences.
-//
+//
// Potentially dangerous operations which are protected include:
// * Stores
// * Branches
@@ -154,12 +154,6 @@ static void DumpBasicBlockVerbose(const MachineBasicBlock &MBB) {
dbgs() << "<<<<< DUMP BASIC BLOCK END\n\n";
}
-static void DumpBasicBlockVerboseCond(const MachineBasicBlock &MBB, bool b) {
- if (b) {
- DumpBasicBlockVerbose(MBB);
- }
-}
-
/**********************************************************************/
/* Exported functions */
@@ -356,11 +350,10 @@ void ARMNaClRewritePass::SandboxStackChange(MachineBasicBlock &MBB,
BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(ARM::SFI_NOP_IF_AT_BUNDLE_END));
- // Get to next instr (one + to get the original, and one more + to get past)
- MachineBasicBlock::iterator MBBINext = (MBBI++);
- MachineBasicBlock::iterator MBBINext2 = (MBBI++);
+ // Get to next instr.
+ MachineBasicBlock::iterator MBBINext = (++MBBI);
- BuildMI(MBB, MBBINext2, MI.getDebugLoc(),
+ BuildMI(MBB, MBBINext, MI.getDebugLoc(),
TII->get(ARM::SFI_DATA_MASK))
.addReg(ARM::SP, RegState::Define) // modify SP (as dst)
.addReg(ARM::SP, RegState::Kill) // start with SP (as src)
diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp
index d2f0a28f78..fc67d418ea 100644
--- a/lib/Target/ARM/ARMSubtarget.cpp
+++ b/lib/Target/ARM/ARMSubtarget.cpp
@@ -39,7 +39,7 @@ static cl::opt<bool>
NoInlineJumpTables("no-inline-jumptables",
cl::desc("Do not place jump tables inline in the code"));
// @LOCALMOD-END
-
+
static cl::opt<bool>
UseFusedMulOps("arm-use-mulops",
cl::init(true), cl::Hidden);
diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h
index 47002a90a3..e99d1d4a48 100644
--- a/lib/Target/ARM/ARMSubtarget.h
+++ b/lib/Target/ARM/ARMSubtarget.h
@@ -100,6 +100,11 @@ protected:
/// IsR9Reserved - True if R9 is a not available as general purpose register.
bool IsR9Reserved;
+ // @LOCALMOD-START
+ /// UseInlineJumpTables - True if jump tables should be in-line in the code.
+ bool UseInlineJumpTables;
+ // @LOCALMOD-END
+
/// UseMovt - True if MOVT / MOVW pairs are used for materialization of 32-bit
/// imms (including global addresses).
bool UseMovt;
@@ -177,12 +182,6 @@ protected:
/// Selected instruction itineraries (one entry per itinerary class.)
InstrItineraryData InstrItins;
- // @LOCALMOD-START
- /// UseInlineJumpTables - True if jump tables should be in-line in the code.
- bool UseInlineJumpTables;
- // @LOCALMOD-END
-
-
public:
enum {
isELF, isDarwin
diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
index ddb38687d0..7dcc3da6c3 100644
--- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
+++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
@@ -272,14 +272,14 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
}
if (SFIInst) {
O << '\t' << SFIInst;
- if (SFIEmitDest != ~0) {
+ if (SFIEmitDest != (unsigned)~0) {
O << ' ';
printOperand(MI, SFIEmitDest, O);
}
- if (SFIEmitDest != ~0 && SFIEmitPred != ~0) {
+ if (SFIEmitDest != (unsigned)~0 && SFIEmitPred != (unsigned)~0) {
O << ',';
}
- if (SFIEmitPred != ~0) {
+ if (SFIEmitPred != (unsigned)~0) {
O << ' ';
printPredicateOperand(MI, SFIEmitPred, O);
}
@@ -287,7 +287,7 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
return;
}
// @LOCALMOD-END
-
+
if (Opcode == ARM::tLDMIA) {
bool Writeback = true;
unsigned BaseReg = MI->getOperand(0).getReg();
diff --git a/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp b/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp
index fde37ac60a..29d87ba2c6 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp
@@ -212,34 +212,6 @@ static void EmitMoveRegReg(bool Is64Bit, unsigned ToReg,
Out.EmitInstruction(Move);
}
-static void EmitMoveRegImm32(bool Is64Bit, unsigned ToReg,
- unsigned Imm32, MCStreamer &Out) {
- MCInst MovInst;
- MovInst.setOpcode(X86::MOV32ri);
- MovInst.addOperand(MCOperand::CreateReg(X86::EBX));
- MovInst.addOperand(MCOperand::CreateImm(Imm32));
- Out.EmitInstruction(MovInst);
-}
-
-static void EmitCmove(bool Is64Bit, unsigned ToReg,
- unsigned FromReg, MCStreamer &Out) {
- MCInst CmovInst;
- CmovInst.setOpcode(Is64Bit ? X86::CMOVE64rr : X86::CMOVE32rr);
- CmovInst.addOperand(MCOperand::CreateReg(ToReg));
- CmovInst.addOperand(MCOperand::CreateReg(ToReg));
- CmovInst.addOperand(MCOperand::CreateReg(FromReg));
- Out.EmitInstruction(CmovInst);
-}
-
-static void EmitClearReg(bool Is64Bit, unsigned Reg, MCStreamer &Out) {
- MCInst Clear;
- Clear.setOpcode(X86::XOR32rr);
- Clear.addOperand(MCOperand::CreateReg(Reg));
- Clear.addOperand(MCOperand::CreateReg(Reg));
- Clear.addOperand(MCOperand::CreateReg(Reg));
- Out.EmitInstruction(Clear);
-}
-
static void EmitRegTruncate(unsigned Reg64, MCStreamer &Out) {
unsigned Reg32 = getX86SubSuperRegister_(Reg64, MVT::i32);
EmitMoveRegReg(false, Reg32, Reg32, Out);
@@ -272,20 +244,6 @@ static void ShortenMemoryRef(MCInst *Inst, unsigned IndexOpPosition) {
}
}
-static void EmitPushReg(bool Is64Bit, unsigned FromReg, MCStreamer &Out) {
- MCInst Push;
- Push.setOpcode(Is64Bit ? X86::PUSH64r : X86::PUSH32r);
- Push.addOperand(MCOperand::CreateReg(FromReg));
- Out.EmitInstruction(Push);
-}
-
-static void EmitPopReg(bool Is64Bit, unsigned ToReg, MCStreamer &Out) {
- MCInst Pop;
- Pop.setOpcode(Is64Bit ? X86::POP64r : X86::POP32r);
- Pop.addOperand(MCOperand::CreateReg(ToReg));
- Out.EmitInstruction(Pop);
-}
-
static void EmitLoad(bool Is64Bit,
unsigned DestReg,
unsigned BaseReg,
@@ -306,44 +264,8 @@ static void EmitLoad(bool Is64Bit,
Out.EmitInstruction(Load);
}
-// Utility function for storing done by setjmp.
-// Creates a store from Reg into the address PtrReg + Offset.
-static void EmitStore(bool Is64Bit,
- unsigned BaseReg,
- unsigned Scale,
- unsigned IndexReg,
- unsigned Offset,
- unsigned SegmentReg,
- unsigned SrcReg,
- MCStreamer &Out) {
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
- // Store SrcReg to address BaseReg + Scale * IndexReg + Offset
- MCInst Store;
- Store.setOpcode(Is64Bit ? X86::MOV64mr : X86::MOV32mr);
- Store.addOperand(MCOperand::CreateReg(BaseReg));
- Store.addOperand(MCOperand::CreateImm(Scale));
- Store.addOperand(MCOperand::CreateReg(IndexReg));
- Store.addOperand(MCOperand::CreateImm(Offset));
- Store.addOperand(MCOperand::CreateReg(SegmentReg));
- Store.addOperand(MCOperand::CreateReg(SrcReg));
- Out.EmitInstruction(Store);
-}
-
-static void EmitAndRegReg(bool Is64Bit, unsigned DestReg,
- unsigned SrcReg, MCStreamer &Out) {
- MCInst AndInst;
- AndInst.setOpcode(X86::AND32rr);
- AndInst.addOperand(MCOperand::CreateReg(DestReg));
- AndInst.addOperand(MCOperand::CreateReg(DestReg));
- AndInst.addOperand(MCOperand::CreateReg(SrcReg));
- Out.EmitInstruction(AndInst);
-}
-
-
-
static bool SandboxMemoryRef(MCInst *Inst,
unsigned *IndexOpPosition) {
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
for (unsigned i = 0, last = Inst->getNumOperands(); i < last; i++) {
if (!Inst->getOperand(i).isReg() ||
Inst->getOperand(i).getReg() != X86::PSEUDO_NACL_SEG) {
@@ -390,7 +312,8 @@ static void EmitTLSAddr32(const MCInst &Inst, MCStreamer &Out) {
}
-static void EmitREST(const MCInst &Inst, unsigned Reg32, bool IsMem, MCStreamer &Out) {
+static void EmitREST(const MCInst &Inst, unsigned Reg32,
+ bool IsMem, MCStreamer &Out) {
unsigned Reg64 = getX86SubSuperRegister_(Reg32, MVT::i64);
Out.EmitBundleLock();
if (!IsMem) {
@@ -416,91 +339,6 @@ static void EmitREST(const MCInst &Inst, unsigned Reg32, bool IsMem, MCStreamer
Out.EmitBundleUnlock();
}
-// Does the x86 platform specific work for setjmp.
-// It expects that a pointer to a JMP_BUF in %ecx/%rdi, and that the return
-// address is in %edx/%rdx.
-// The JMP_BUF is a structure that has the maximum size over all supported
-// architectures. The callee-saves registers plus [er]ip and [er]sp are stored
-// into the JMP_BUF.
-// TODO(arbenson): Is this code dead? If so, clean it up.
-static void EmitSetjmp(bool Is64Bit, MCStreamer &Out) {
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
- unsigned JmpBuf = Is64Bit ? X86::RDI : X86::ECX;
- unsigned RetAddr = Is64Bit ? X86::RDX : X86::EDX;
- if (Is64Bit) {
- unsigned BasePtr = UseZeroBasedSandbox ? 0 : X86::R15;
- unsigned Segment = X86::PSEUDO_NACL_SEG;
- // Save the registers.
- EmitStore(true, BasePtr, 1, JmpBuf, 0, Segment, X86::RBX, Out);
- EmitStore(true, BasePtr, 1, JmpBuf, 8, Segment, X86::RBP, Out);
- EmitStore(true, BasePtr, 1, JmpBuf, 16, Segment, X86::RSP, Out);
- EmitStore(true, BasePtr, 1, JmpBuf, 24, Segment, X86::R12, Out);
- EmitStore(true, BasePtr, 1, JmpBuf, 32, Segment, X86::R13, Out);
- EmitStore(true, BasePtr, 1, JmpBuf, 40, Segment, X86::R14, Out);
- EmitStore(true, BasePtr, 1, JmpBuf, 48, Segment, X86::RDX, Out);
- } else {
- // Save the registers.
- EmitStore(false, JmpBuf, 1, 0, 0, 0, X86::EBX, Out);
- EmitStore(false, JmpBuf, 1, 0, 4, 0, X86::EBP, Out);
- EmitStore(false, JmpBuf, 1, 0, 8, 0, X86::ESP, Out);
- EmitStore(false, JmpBuf, 1, 0, 12, 0, X86::ESI, Out);
- EmitStore(false, JmpBuf, 1, 0, 16, 0, X86::EDI, Out);
- EmitStore(false, JmpBuf, 1, 0, 20, 0, X86::EDX, Out);
- }
- // Return 0.
- EmitClearReg(false, X86::EAX, Out);
-}
-
-// Does the x86 platform specific work for longjmp other than normalizing the
-// return parameter (returns of zero are changed to return 1 in the caller).
-// It expects that a pointer to a JMP_BUF in %ecx/%rdi, and that the return
-// value is in %eax.
-// The JMP_BUF is a structure that has the maximum size over all supported
-// architectures. The saved registers are restored from the JMP_BUF.
-// TODO(arbenson): Is this code dead? If so, clean it up.
-static void EmitLongjmp(bool Is64Bit, MCStreamer &Out) {
- const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
- unsigned JmpBuf = Is64Bit ? X86::RDI : X86::ECX;
- // If the return value was 0, make it 1.
- EmitAndRegReg(false, X86::EAX, X86::EAX, Out);
- EmitMoveRegImm32(false, X86::EBX, 1, Out);
- EmitCmove(false, X86::EAX, X86::EBX, Out);
- if (Is64Bit) {
- unsigned BasePtr = UseZeroBasedSandbox ? 0 : X86::R15;
- unsigned Segment = X86::PSEUDO_NACL_SEG;
- // Restore the registers.
- EmitLoad(true, X86::RBX, BasePtr, 1, JmpBuf, 0, Segment, Out);
- EmitLoad(true, X86::RDX, BasePtr, 1, JmpBuf, 8, Segment, Out);
- // restbp
- Out.EmitBundleLock();
- EmitRegTruncate(X86::RBP, Out);
- EmitRegFix(X86::RBP, Out);
- Out.EmitBundleUnlock();
- EmitLoad(true, X86::RDX, BasePtr, 1, JmpBuf, 16, Segment, Out);
- // restsp
- Out.EmitBundleLock();
- EmitRegTruncate(X86::RSP, Out);
- EmitRegFix(X86::RSP, Out);
- Out.EmitBundleUnlock();
- EmitLoad(true, X86::R12, BasePtr, 1, JmpBuf, 24, Segment, Out);
- EmitLoad(true, X86::R13, BasePtr, 1, JmpBuf, 32, Segment, Out);
- EmitLoad(true, X86::R14, BasePtr, 1, JmpBuf, 40, Segment, Out);
- EmitLoad(true, X86::RDX, BasePtr, 1, JmpBuf, 48, Segment, Out);
- } else {
- // Restore the registers.
- EmitLoad(false, X86::EBX, JmpBuf, 1, 0, 0, 0, Out);
- EmitLoad(false, X86::EBP, JmpBuf, 1, 0, 4, 0, Out);
- EmitLoad(false, X86::ESP, JmpBuf, 1, 0, 8, 0, Out);
- EmitLoad(false, X86::ESI, JmpBuf, 1, 0, 12, 0, Out);
- EmitLoad(false, X86::EDI, JmpBuf, 1, 0, 16, 0, Out);
- EmitLoad(false, X86::ECX, JmpBuf, 1, 0, 20, 0, Out);
- }
- // Jmp to the saved return address.
- MCInst JMPInst;
- JMPInst.setOpcode(Is64Bit ? X86::NACL_JMP64r : X86::NACL_JMP32r);
- JMPInst.addOperand(MCOperand::CreateReg(X86::ECX));
- Out.EmitInstruction(JMPInst);
-}
namespace llvm {
// CustomExpandInstNaClX86 -
@@ -632,20 +470,6 @@ bool CustomExpandInstNaClX86(const MCInst &Inst, MCStreamer &Out) {
assert(PrefixSaved == 0);
EmitREST(Inst, X86::ESP, false, Out);
return true;
- // Intrinsics for eliminating platform specific .s code from the client
- // side link. These are recognized in X86InstrNaCl.td.
- case X86::NACL_SETJ32:
- EmitSetjmp(false, Out);
- return true;
- case X86::NACL_SETJ64:
- EmitSetjmp(true, Out);
- return true;
- case X86::NACL_LONGJ32:
- EmitLongjmp(false, Out);
- return true;
- case X86::NACL_LONGJ64:
- EmitLongjmp(true, Out);
- return true;
}
unsigned IndexOpPosition;
diff --git a/lib/Target/X86/X86InstrNaCl.td b/lib/Target/X86/X86InstrNaCl.td
index a729b88797..8a7eebecd7 100644
--- a/lib/Target/X86/X86InstrNaCl.td
+++ b/lib/Target/X86/X86InstrNaCl.td
@@ -355,83 +355,3 @@ def NACL_CG_VAARG_64 : I<0, Pseudo,
(X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
(implicit EFLAGS)]>,
Requires<[IsNaCl, In64BitMode]>;
-
-//===----------------------------------------------------------------------===//
-// NativeClient intrinsics
-// These provide the ability to implement several low-level features without
-// having to link native ASM code on the client.
-// These need to be kept in sync with in lib/Target/ARM/ARMInstrInfo.td and
-// lib/Target/X86/X86InstrNaCl.cpp.
-// TODO(sehr): Separate this code to allow NaCl and non-NaCl versions.
-
-// Saves all the callee-saves registers, [er]sp, and [er]ip to the JMP_BUF
-// structure pointed to by 4(%esp) or rdi. The JMP_BUF structure is the
-// maximum size over all supported architectures. The MC expansions happen
-// in X86InstrNaCl.cpp.
-let Uses = [ECX, RDX], Defs = [EAX, EFLAGS] in {
- def NACL_SETJ32 : I<0, Pseudo, (outs), (ins),
- "movl %ebx, 0(%ecx); "
- "movl %ebp, 4(%ecx); "
- "movl %esp, 8(%ecx); "
- "movl %esi, 12(%ecx); "
- "movl %edi, 16(%ecx); "
- "movl %edx, 20(%ecx); "
- "xorl %eax, %eax; ",
- [(set EAX, (int_nacl_setjmp ECX, EDX))]>,
- Requires<[IsNaCl, In32BitMode]>;
-}
-let Uses = [EDI, RDX], Defs = [EAX, EFLAGS] in {
- def NACL_SETJ64 : I<0, Pseudo, (outs), (ins),
- "movq %rbx, %nacl:0(%r15, %rdi); "
- "movq %rbp, %nacl:8(%r15, %rdi); "
- "movq %rsp, %nacl:16(%r15, %rdi); "
- "movq %r12, %nacl:24(%r15, %rdi); "
- "movq %r13, %nacl:32(%r15, %rdi); "
- "movq %r14, %nacl:40(%r15, %rdi); "
- "movq %rdx, %nacl:48(%r15, %rdi); "
- "xorl %eax, %eax; ",
- [(set EAX, (int_nacl_setjmp EDI, EDX))]>,
- Requires<[IsNaCl, In64BitMode]>;
-}
-
-// Restores all the callee-saves registers, [er]sp, and [er]ip from the JMP_BUF
-// structure pointed to by 4(%esp) or %rdi. Returns the value in 8(%esp) or
-// %rsi at entry. This implements the tail of longjmp, with the normalization
-// of the return value (if the caller passes zero to longjmp, it should return
-// 1) done in the caller. The MC expansions happen in X86InstrNaCl.cpp.
-let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1,
- Uses = [EAX, ECX] in {
- def NACL_LONGJ32 : I<0, Pseudo, (outs), (ins),
- "movl $$1, %ebx; "
- "andl %eax, %eax; "
- "cmovzl %ebx, %eax; "
- "movl 0(%ecx), %ebx; "
- "movl 4(%ecx), %ebp; "
- "movl 8(%ecx), %esp; "
- "movl 12(%ecx), %esi; "
- "movl 16(%ecx), %edi; "
- "movl 20(%ecx), %ecx; "
- "nacljmp %ecx; ",
- [(int_nacl_longjmp ECX, EAX)]>,
- Requires<[IsNaCl, In32BitMode]>, TB;
-}
-
-let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1,
- Uses = [EAX, EDI, R15] in {
- def NACL_LONGJ64 : I<0, Pseudo, (outs), (ins),
- "movl $$1, %ebx; "
- "andl %eax, %eax; "
- "cmovzl %ebx, %eax; "
- "movq %nacl:0(%r15, %edi), %rbx; "
- "movq %nacl:8(%r15, %edi), %rdx; "
- "naclrestbp %edx, %r15; "
- "movq %nacl:16(%r15, %edi), %rdx; "
- "naclrestsp %edx, %r15; "
- "movq %nacl:24(%r15, %edi), %r12; "
- "movq %nacl:32(%r15, %edi), %r13; "
- "movq %nacl:40(%r15, %edi), %r14; "
- "movq %nacl:48(%r15, %edi), %rcx; "
- "nacljmp %ecx, %r15; ",
- [(int_nacl_longjmp EDI, EAX)]>,
- Requires<[IsNaCl, In64BitMode]>, TB;
-}
diff --git a/lib/Target/X86/X86NaClRewriteFinalPass.cpp b/lib/Target/X86/X86NaClRewriteFinalPass.cpp
index 93728ddb08..b6276dc583 100644
--- a/lib/Target/X86/X86NaClRewriteFinalPass.cpp
+++ b/lib/Target/X86/X86NaClRewriteFinalPass.cpp
@@ -181,10 +181,6 @@ bool X86NaClRewriteFinalPass::ApplyCommonRewrites(MachineBasicBlock &MBB,
case X86::NACL_RESTBPr:
case X86::NACL_RESTSPm:
case X86::NACL_RESTSPr:
- case X86::NACL_SETJ32:
- case X86::NACL_SETJ64:
- case X86::NACL_LONGJ32:
- case X86::NACL_LONGJ64:
dbgs() << "inst, opcode not handled: " << MI << Opcode;
assert(false && "NaCl Pseudo-inst not handled");
case X86::NACL_RET32:
diff --git a/lib/Target/X86/X86NaClRewritePass.cpp b/lib/Target/X86/X86NaClRewritePass.cpp
index 0ae96da96d..7310dcd77a 100644
--- a/lib/Target/X86/X86NaClRewritePass.cpp
+++ b/lib/Target/X86/X86NaClRewritePass.cpp
@@ -100,8 +100,6 @@ static bool IsPushPop(MachineInstr &MI) {
}
}
-static bool IsSandboxed(MachineInstr &MI);
-
static bool IsStore(MachineInstr &MI) {
return MI.getDesc().mayStore();
}
@@ -482,12 +480,6 @@ bool X86NaClRewritePass::ApplyControlSFI(MachineBasicBlock &MBB,
return true;
}
- if (Opc == X86::NACL_LONGJ32 ||
- Opc == X86::NACL_LONGJ64) {
- // The expansions for these intrinsics already handle control SFI.
- return false;
- }
-
DumpInstructionVerbose(MI);
llvm_unreachable("Unhandled Control SFI");
}
@@ -752,46 +744,6 @@ bool X86NaClRewritePass::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
return Modified;
}
-static bool IsSandboxed(MachineInstr &MI) {
- switch (MI.getOpcode()) {
- // 32-bit
- case X86::NACL_TRAP32:
- case X86::NACL_RET32:
- case X86::NACL_RETI32:
- case X86::NACL_JMP32r:
- case X86::NACL_CALL32d:
- case X86::NACL_CALL32r:
-
- // 64-bit
- case X86::NACL_TRAP64:
- case X86::NACL_RET64:
- case X86::NACL_JMP64r:
- case X86::NACL_JMP64z:
- case X86::NACL_CALL64r:
- case X86::NACL_CALL64d:
-
- case X86::NACL_ASPi8:
- case X86::NACL_ASPi32:
- case X86::NACL_SSPi8:
- case X86::NACL_SSPi32:
- case X86::NACL_SPADJi32:
- case X86::NACL_RESTSPr:
- case X86::NACL_RESTSPm:
- case X86::NACL_RESTSPrz:
- case X86::NACL_RESTBPr:
- case X86::NACL_RESTBPm:
- case X86::NACL_RESTBPrz:
- return true;
-
- case X86::MOV64rr:
- // copy from safe regs
- const MachineOperand &DestReg = MI.getOperand(0);
- const MachineOperand &SrcReg = MI.getOperand(1);
- return DestReg.getReg() == X86::RSP && SrcReg.getReg() == X86::RBP;
- }
- return false;
-}
-
static void DumpInstructionVerbose(const MachineInstr &MI) {
dbgs() << MI;
dbgs() << MI.getNumOperands() << " operands:" << "\n";