diff options
Diffstat (limited to 'lib/Target/X86')
40 files changed, 3767 insertions, 131 deletions
diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt index b886d46501..45fd42f205 100644 --- a/lib/Target/X86/CMakeLists.txt +++ b/lib/Target/X86/CMakeLists.txt @@ -27,6 +27,7 @@ set(sources X86JITInfo.cpp X86MCInstLower.cpp X86MachineFunctionInfo.cpp + X86NaClRewritePass.cpp X86RegisterInfo.cpp X86SelectionDAGInfo.cpp X86Subtarget.cpp diff --git a/lib/Target/X86/MCTargetDesc/CMakeLists.txt b/lib/Target/X86/MCTargetDesc/CMakeLists.txt index 1c240e52a3..8be0c5e6d7 100644 --- a/lib/Target/X86/MCTargetDesc/CMakeLists.txt +++ b/lib/Target/X86/MCTargetDesc/CMakeLists.txt @@ -3,6 +3,7 @@ add_llvm_library(LLVMX86Desc X86MCTargetDesc.cpp X86MCAsmInfo.cpp X86MCCodeEmitter.cpp + X86MCNaCl.cpp # LOCALMOD X86MachObjectWriter.cpp X86ELFObjectWriter.cpp X86WinCOFFObjectWriter.cpp diff --git a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp index 68464ed5cd..2696190d23 100644 --- a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -9,6 +9,7 @@ #include "MCTargetDesc/X86BaseInfo.h" #include "MCTargetDesc/X86FixupKinds.h" +#include "MCTargetDesc/X86MCNaCl.h" // @LOCALMOD #include "llvm/MC/MCAsmBackend.h" #include "llvm/MC/MCAssembler.h" #include "llvm/MC/MCELFObjectWriter.h" @@ -335,8 +336,10 @@ namespace { class ELFX86AsmBackend : public X86AsmBackend { public: uint8_t OSABI; - ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU) - : X86AsmBackend(T, CPU), OSABI(_OSABI) { + Triple::OSType OSType; // @LOCALMOD: kept OSTYPE vs upstream. FIXME: remove. + ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU, + Triple::OSType _OSType) + : X86AsmBackend(T, CPU), OSABI(_OSABI), OSType(_OSType) { HasReliableSymbolDifference = true; } @@ -344,12 +347,28 @@ public: const MCSectionELF &ES = static_cast<const MCSectionELF&>(Section); return ES.getFlags() & ELF::SHF_MERGE; } + + // @LOCALMOD-BEGIN + // FIXME! NaCl should inherit from ELFX86AsmBackend! + unsigned getBundleSize() const { + return OSType == Triple::NativeClient ? 32 : 0; + } + + bool CustomExpandInst(const MCInst &Inst, MCStreamer &Out) const { + if (OSType == Triple::NativeClient) { + return CustomExpandInstNaClX86(Inst, Out); + } + return false; + } + // @LOCALMOD-END + }; class ELFX86_32AsmBackend : public ELFX86AsmBackend { public: - ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) - : ELFX86AsmBackend(T, OSABI, CPU) {} + ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU, + Triple::OSType OSType) // @LOCALMOD: kept OSType + : ELFX86AsmBackend(T, OSABI, CPU, OSType) {} MCObjectWriter *createObjectWriter(raw_ostream &OS) const { return createX86ELFObjectWriter(OS, /*Is64Bit*/ false, OSABI); @@ -358,8 +377,9 @@ public: class ELFX86_64AsmBackend : public ELFX86AsmBackend { public: - ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) - : ELFX86AsmBackend(T, OSABI, CPU) {} + ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU, + Triple::OSType OSType) // @LOCALMOD: kept OSType + : ELFX86AsmBackend(T, OSABI, CPU, OSType) {} MCObjectWriter *createObjectWriter(raw_ostream &OS) const { return createX86ELFObjectWriter(OS, /*Is64Bit*/ true, OSABI); @@ -457,7 +477,7 @@ MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, StringRef TT, String return new WindowsX86AsmBackend(T, false, CPU); uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); - return new ELFX86_32AsmBackend(T, OSABI, CPU); + return new ELFX86_32AsmBackend(T, OSABI, CPU, TheTriple.getOS()); } MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, StringRef TT, StringRef CPU) { @@ -470,5 +490,5 @@ MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, StringRef TT, String return new WindowsX86AsmBackend(T, true, CPU); uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); - return new ELFX86_64AsmBackend(T, OSABI, CPU); + return new ELFX86_64AsmBackend(T, OSABI, CPU, TheTriple.getOS()); } diff --git a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h index db597fbfca..b0e5be3162 100644 --- a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -291,6 +291,8 @@ namespace X86II { /// manual, this operand is described as pntr16:32 and pntr16:16 RawFrmImm16 = 44, + CustomFrm = 62, // @LOCALMOD + FormMask = 63, //===------------------------------------------------------------------===// @@ -542,6 +544,7 @@ namespace X86II { case X86II::MRMSrcReg: case X86II::RawFrmImm8: case X86II::RawFrmImm16: + case X86II::CustomFrm: // @LOCALMOD return -1; case X86II::MRMDestMem: return 0; diff --git a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp index 16488eb7ae..7706b9308e 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp @@ -76,8 +76,18 @@ X86_64MCAsmInfoDarwin::X86_64MCAsmInfoDarwin(const Triple &Triple) void X86ELFMCAsmInfo::anchor() { } X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) { - if (T.getArch() == Triple::x86_64) - PointerSize = 8; + + // @LOCALMOD-BEGIN + if (T.getArch() == Triple::x86_64) { + if (T.getOS() == Triple::NativeClient) { + PointerSize = 4; + StackSlotSize = 8; + } else { + PointerSize = 8; + StackSlotSize = 8; + } + } + // @LOCALMOD-END AssemblerDialect = AsmWriterFlavor; diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp index 6b0ec4fb78..06138bf335 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -845,7 +845,6 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, const MCInst &MI, const MCInstrDesc &Desc, raw_ostream &OS) const { - // Emit the lock opcode prefix as needed. if (TSFlags & X86II::LOCK) EmitByte(0xF0, CurByte, OS); @@ -1011,6 +1010,10 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!"); case X86II::Pseudo: llvm_unreachable("Pseudo instruction shouldn't be emitted"); + // @LOCALMOD-BEGIN + case X86II::CustomFrm: + assert(0 && "CustomFrm instruction shouldn't be emitted"); + // @LOCALMOD-END case X86II::RawFrm: EmitByte(BaseOpcode, CurByte, OS); break; diff --git a/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp b/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp new file mode 100644 index 0000000000..fde37ac60a --- /dev/null +++ b/lib/Target/X86/MCTargetDesc/X86MCNaCl.cpp @@ -0,0 +1,876 @@ +//=== X86MCNaCl.cpp - Expansion of NaCl pseudo-instructions --*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +//===----------------------------------------------------------------------===// +#define DEBUG_TYPE "x86-sandboxing" + +#include "MCTargetDesc/X86MCTargetDesc.h" +#include "MCTargetDesc/X86BaseInfo.h" +#include "MCTargetDesc/X86MCNaCl.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCContext.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" + +using namespace llvm; + +// This option makes it possible to overwrite the x86 jmp mask immediate. +// Setting it to -1 will effectively turn masking into a nop which will +// help with linking this code with non-sandboxed libs (at least for x86-32). +cl::opt<int> FlagSfiX86JmpMask("sfi-x86-jmp-mask", cl::init(-32)); + +cl::opt<bool> FlagUseZeroBasedSandbox("sfi-zero-based-sandbox", + cl::desc("Use a zero-based sandbox model" + " for the NaCl SFI."), + cl::init(false)); + +static unsigned PrefixSaved = 0; +static bool PrefixPass = false; + +// See the notes below where these functions are defined. +namespace { +unsigned getX86SubSuperRegister_(unsigned Reg, EVT VT, bool High=false); +unsigned DemoteRegTo32_(unsigned RegIn); +} // namespace + +static void EmitDirectCall(const MCOperand &Op, bool Is64Bit, + MCStreamer &Out) { + Out.EmitBundleAlignEnd(); + Out.EmitBundleLock(); + + MCInst CALLInst; + CALLInst.setOpcode(Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32); + CALLInst.addOperand(Op); + Out.EmitInstruction(CALLInst); + Out.EmitBundleUnlock(); +} + +static void EmitIndirectBranch(const MCOperand &Op, bool Is64Bit, bool IsCall, + MCStreamer &Out) { + const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox; + const int JmpMask = FlagSfiX86JmpMask; + const unsigned Reg32 = Op.getReg(); + const unsigned Reg64 = getX86SubSuperRegister_(Reg32, MVT::i64); + + if (IsCall) + Out.EmitBundleAlignEnd(); + + Out.EmitBundleLock(); + + MCInst ANDInst; + ANDInst.setOpcode(X86::AND32ri8); + ANDInst.addOperand(MCOperand::CreateReg(Reg32)); + ANDInst.addOperand(MCOperand::CreateReg(Reg32)); + ANDInst.addOperand(MCOperand::CreateImm(JmpMask)); + Out.EmitInstruction(ANDInst); + + if (Is64Bit && !UseZeroBasedSandbox) { + MCInst InstADD; + InstADD.setOpcode(X86::ADD64rr); + InstADD.addOperand(MCOperand::CreateReg(Reg64)); + InstADD.addOperand(MCOperand::CreateReg(Reg64)); + InstADD.addOperand(MCOperand::CreateReg(X86::R15)); + Out.EmitInstruction(InstADD); + } + + if (IsCall) { + MCInst CALLInst; + CALLInst.setOpcode(Is64Bit ? X86::CALL64r : X86::CALL32r); + CALLInst.addOperand(MCOperand::CreateReg(Is64Bit ? Reg64 : Reg32)); + Out.EmitInstruction(CALLInst); + } else { + MCInst JMPInst; + JMPInst.setOpcode(Is64Bit ? X86::JMP64r : X86::JMP32r); + JMPInst.addOperand(MCOperand::CreateReg(Is64Bit ? Reg64 : Reg32)); + Out.EmitInstruction(JMPInst); + } + Out.EmitBundleUnlock(); +} + +static void EmitRet(const MCOperand *AmtOp, bool Is64Bit, MCStreamer &Out) { + MCInst POPInst; + POPInst.setOpcode(Is64Bit ? X86::POP64r : X86::POP32r); + POPInst.addOperand(MCOperand::CreateReg(Is64Bit ? X86::RCX : X86::ECX)); + Out.EmitInstruction(POPInst); + + if (AmtOp) { + assert(!Is64Bit); + MCInst ADDInst; + unsigned ADDReg = X86::ESP; + ADDInst.setOpcode(X86::ADD32ri); + ADDInst.addOperand(MCOperand::CreateReg(ADDReg)); + ADDInst.addOperand(MCOperand::CreateReg(ADDReg)); + ADDInst.addOperand(*AmtOp); + Out.EmitInstruction(ADDInst); + } + + MCInst JMPInst; + JMPInst.setOpcode(Is64Bit ? X86::NACL_JMP64r : X86::NACL_JMP32r); + JMPInst.addOperand(MCOperand::CreateReg(X86::ECX)); + Out.EmitInstruction(JMPInst); +} + +static void EmitTrap(bool Is64Bit, MCStreamer &Out) { + // Rewrite to: + // X86-32: mov $0, 0 + // X86-64: mov $0, (%r15) + const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox; + unsigned BaseReg = Is64Bit && !UseZeroBasedSandbox ? X86::R15 : 0; + + MCInst Tmp; + Tmp.setOpcode(X86::MOV32mi); + Tmp.addOperand(MCOperand::CreateReg(BaseReg)); // BaseReg + Tmp.addOperand(MCOperand::CreateImm(1)); // Scale + Tmp.addOperand(MCOperand::CreateReg(0)); // IndexReg + Tmp.addOperand(MCOperand::CreateImm(0)); // Offset + Tmp.addOperand(MCOperand::CreateReg(0)); // SegmentReg + Tmp.addOperand(MCOperand::CreateImm(0)); // Value + + Out.EmitInstruction(Tmp); +} + +// Fix a register after being truncated to 32-bits. +static void EmitRegFix(unsigned Reg64, MCStreamer &Out) { + // lea (%rsp, %r15, 1), %rsp + // We do not need to add the R15 base for the zero-based sandbox model + const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox; + if (!UseZeroBasedSandbox) { + MCInst Tmp; + Tmp.setOpcode(X86::LEA64r); + Tmp.addOperand(MCOperand::CreateReg(Reg64)); // DestReg + Tmp.addOperand(MCOperand::CreateReg(Reg64)); // BaseReg + Tmp.addOperand(MCOperand::CreateImm(1)); // Scale + Tmp.addOperand(MCOperand::CreateReg(X86::R15)); // IndexReg + Tmp.addOperand(MCOperand::CreateImm(0)); // Offset + Tmp.addOperand(MCOperand::CreateReg(0)); // SegmentReg + Out.EmitInstruction(Tmp); + } +} + +static void EmitSPArith(unsigned Opc, const MCOperand &ImmOp, + MCStreamer &Out) { + Out.EmitBundleLock(); + + MCInst Tmp; + Tmp.setOpcode(Opc); + Tmp.addOperand(MCOperand::CreateReg(X86::RSP)); + Tmp.addOperand(MCOperand::CreateReg(X86::RSP)); + Tmp.addOperand(ImmOp); + Out.EmitInstruction(Tmp); + + EmitRegFix(X86::RSP, Out); + Out.EmitBundleUnlock(); +} + +static void EmitSPAdj(const MCOperand &ImmOp, MCStreamer &Out) { + Out.EmitBundleLock(); + + MCInst Tmp; + Tmp.setOpcode(X86::LEA64_32r); + Tmp.addOperand(MCOperand::CreateReg(X86::RSP)); // DestReg + Tmp.addOperand(MCOperand::CreateReg(X86::RBP)); // BaseReg + Tmp.addOperand(MCOperand::CreateImm(1)); // Scale + Tmp.addOperand(MCOperand::CreateReg(0)); // IndexReg + Tmp.addOperand(ImmOp); // Offset + Tmp.addOperand(MCOperand::CreateReg(0)); // SegmentReg + Out.EmitInstruction(Tmp); + + EmitRegFix(X86::RSP, Out); + Out.EmitBundleUnlock(); +} + +static void EmitPrefix(unsigned Opc, MCStreamer &Out) { + assert(PrefixSaved == 0); + assert(PrefixPass == false); + + MCInst PrefixInst; + PrefixInst.setOpcode(Opc); + PrefixPass = true; + Out.EmitInstruction(PrefixInst); + + assert(PrefixSaved == 0); + assert(PrefixPass == false); +} + +static void EmitMoveRegReg(bool Is64Bit, unsigned ToReg, + unsigned FromReg, MCStreamer &Out) { + MCInst Move; + Move.setOpcode(Is64Bit ? X86::MOV64rr : X86::MOV32rr); + Move.addOperand(MCOperand::CreateReg(ToReg)); + Move.addOperand(MCOperand::CreateReg(FromReg)); + Out.EmitInstruction(Move); +} + +static void EmitMoveRegImm32(bool Is64Bit, unsigned ToReg, + unsigned Imm32, MCStreamer &Out) { + MCInst MovInst; + MovInst.setOpcode(X86::MOV32ri); + MovInst.addOperand(MCOperand::CreateReg(X86::EBX)); + MovInst.addOperand(MCOperand::CreateImm(Imm32)); + Out.EmitInstruction(MovInst); +} + +static void EmitCmove(bool Is64Bit, unsigned ToReg, + unsigned FromReg, MCStreamer &Out) { + MCInst CmovInst; + CmovInst.setOpcode(Is64Bit ? X86::CMOVE64rr : X86::CMOVE32rr); + CmovInst.addOperand(MCOperand::CreateReg(ToReg)); + CmovInst.addOperand(MCOperand::CreateReg(ToReg)); + CmovInst.addOperand(MCOperand::CreateReg(FromReg)); + Out.EmitInstruction(CmovInst); +} + +static void EmitClearReg(bool Is64Bit, unsigned Reg, MCStreamer &Out) { + MCInst Clear; + Clear.setOpcode(X86::XOR32rr); + Clear.addOperand(MCOperand::CreateReg(Reg)); + Clear.addOperand(MCOperand::CreateReg(Reg)); + Clear.addOperand(MCOperand::CreateReg(Reg)); + Out.EmitInstruction(Clear); +} + +static void EmitRegTruncate(unsigned Reg64, MCStreamer &Out) { + unsigned Reg32 = getX86SubSuperRegister_(Reg64, MVT::i32); + EmitMoveRegReg(false, Reg32, Reg32, Out); +} + +static void HandleMemoryRefTruncation(MCInst *Inst, unsigned IndexOpPosition, + MCStreamer &Out) { + const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox; + unsigned IndexReg = Inst->getOperand(IndexOpPosition).getReg(); + if (UseZeroBasedSandbox) { + // With the zero-based sandbox, we use a 32-bit register on the index + Inst->getOperand(IndexOpPosition).setReg(DemoteRegTo32_(IndexReg)); + } else { + EmitRegTruncate(IndexReg, Out); + } +} + +static void ShortenMemoryRef(MCInst *Inst, unsigned IndexOpPosition) { + unsigned ImmOpPosition = IndexOpPosition - 1; + unsigned BaseOpPosition = IndexOpPosition - 2; + unsigned IndexReg = Inst->getOperand(IndexOpPosition).getReg(); + // For the SIB byte, if the scale is 1 and the base is 0, then + // an equivalent setup moves index to base, and index to 0. The + // equivalent setup is optimized to remove the SIB byte in + // X86MCCodeEmitter.cpp. + if (Inst->getOperand(ImmOpPosition).getImm() == 1 && + Inst->getOperand(BaseOpPosition).getReg() == 0) { + Inst->getOperand(BaseOpPosition).setReg(IndexReg); + Inst->getOperand(IndexOpPosition).setReg(0); + } +} + +static void EmitPushReg(bool Is64Bit, unsigned FromReg, MCStreamer &Out) { + MCInst Push; + Push.setOpcode(Is64Bit ? X86::PUSH64r : X86::PUSH32r); + Push.addOperand(MCOperand::CreateReg(FromReg)); + Out.EmitInstruction(Push); +} + +static void EmitPopReg(bool Is64Bit, unsigned ToReg, MCStreamer &Out) { + MCInst Pop; + Pop.setOpcode(Is64Bit ? X86::POP64r : X86::POP32r); + Pop.addOperand(MCOperand::CreateReg(ToReg)); + Out.EmitInstruction(Pop); +} + +static void EmitLoad(bool Is64Bit, + unsigned DestReg, + unsigned BaseReg, + unsigned Scale, + unsigned IndexReg, + unsigned Offset, + unsigned SegmentReg, + MCStreamer &Out) { + // Load DestReg from address BaseReg + Scale * IndexReg + Offset + MCInst Load; + Load.setOpcode(Is64Bit ? X86::MOV64rm : X86::MOV32rm); + Load.addOperand(MCOperand::CreateReg(DestReg)); + Load.addOperand(MCOperand::CreateReg(BaseReg)); + Load.addOperand(MCOperand::CreateImm(Scale)); + Load.addOperand(MCOperand::CreateReg(IndexReg)); + Load.addOperand(MCOperand::CreateImm(Offset)); + Load.addOperand(MCOperand::CreateReg(SegmentReg)); + Out.EmitInstruction(Load); +} + +// Utility function for storing done by setjmp. +// Creates a store from Reg into the address PtrReg + Offset. +static void EmitStore(bool Is64Bit, + unsigned BaseReg, + unsigned Scale, + unsigned IndexReg, + unsigned Offset, + unsigned SegmentReg, + unsigned SrcReg, + MCStreamer &Out) { + const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox; + // Store SrcReg to address BaseReg + Scale * IndexReg + Offset + MCInst Store; + Store.setOpcode(Is64Bit ? X86::MOV64mr : X86::MOV32mr); + Store.addOperand(MCOperand::CreateReg(BaseReg)); + Store.addOperand(MCOperand::CreateImm(Scale)); + Store.addOperand(MCOperand::CreateReg(IndexReg)); + Store.addOperand(MCOperand::CreateImm(Offset)); + Store.addOperand(MCOperand::CreateReg(SegmentReg)); + Store.addOperand(MCOperand::CreateReg(SrcReg)); + Out.EmitInstruction(Store); +} + +static void EmitAndRegReg(bool Is64Bit, unsigned DestReg, + unsigned SrcReg, MCStreamer &Out) { + MCInst AndInst; + AndInst.setOpcode(X86::AND32rr); + AndInst.addOperand(MCOperand::CreateReg(DestReg)); + AndInst.addOperand(MCOperand::CreateReg(DestReg)); + AndInst.addOperand(MCOperand::CreateReg(SrcReg)); + Out.EmitInstruction(AndInst); +} + + + +static bool SandboxMemoryRef(MCInst *Inst, + unsigned *IndexOpPosition) { + const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox; + for (unsigned i = 0, last = Inst->getNumOperands(); i < last; i++) { + if (!Inst->getOperand(i).isReg() || + Inst->getOperand(i).getReg() != X86::PSEUDO_NACL_SEG) { + continue; + } + // Return the index register that will need to be truncated. + // The order of operands on a memory reference is always: + // (BaseReg, ScaleImm, IndexReg, DisplacementImm, SegmentReg), + // So if we found a match for a segment register value, we know that + // the index register is exactly two operands prior. + *IndexOpPosition = i - 2; + + // Remove the PSEUDO_NACL_SEG annotation. + Inst->getOperand(i).setReg(0); + return true; + } + return false; +} + +static void EmitTLSAddr32(const MCInst &Inst, MCStreamer &Out) { + Out.EmitBundleAlignEnd(); + Out.EmitBundleLock(); + + MCInst LeaInst; + LeaInst.setOpcode(X86::LEA32r); + LeaInst.addOperand(MCOperand::CreateReg(X86::EAX)); // DestReg + LeaInst.addOperand(Inst.getOperand(0)); // BaseReg + LeaInst.addOperand(Inst.getOperand(1)); // Scale + LeaInst.addOperand(Inst.getOperand(2)); // IndexReg + LeaInst.addOperand(Inst.getOperand(3)); // Offset + LeaInst.addOperand(Inst.getOperand(4)); // SegmentReg + Out.EmitInstruction(LeaInst); + + MCInst CALLInst; + CALLInst.setOpcode(X86::CALLpcrel32); + MCContext &context = Out.getContext(); + const MCSymbolRefExpr *expr = + MCSymbolRefExpr::Create( + context.GetOrCreateSymbol(StringRef("___tls_get_addr")), + MCSymbolRefExpr::VK_PLT, context); + CALLInst.addOperand(MCOperand::CreateExpr(expr)); + Out.EmitInstruction(CALLInst); + Out.EmitBundleUnlock(); +} + + +static void EmitREST(const MCInst &Inst, unsigned Reg32, bool IsMem, MCStreamer &Out) { + unsigned Reg64 = getX86SubSuperRegister_(Reg32, MVT::i64); + Out.EmitBundleLock(); + if (!IsMem) { + EmitMoveRegReg(false, Reg32, Inst.getOperand(0).getReg(), Out); + } else { + unsigned IndexOpPosition; + MCInst SandboxedInst = Inst; + if (SandboxMemoryRef(&SandboxedInst, &IndexOpPosition)) { + HandleMemoryRefTruncation(&SandboxedInst, IndexOpPosition, Out); + ShortenMemoryRef(&SandboxedInst, IndexOpPosition); + } + EmitLoad(false, + Reg32, + SandboxedInst.getOperand(0).getReg(), // BaseReg + SandboxedInst.getOperand(1).getImm(), // Scale + SandboxedInst.getOperand(2).getReg(), // IndexReg + SandboxedInst.getOperand(3).getImm(), // Offset + SandboxedInst.getOperand(4).getReg(), // SegmentReg + Out); + } + + EmitRegFix(Reg64, Out); + Out.EmitBundleUnlock(); +} + +// Does the x86 platform specific work for setjmp. +// It expects that a pointer to a JMP_BUF in %ecx/%rdi, and that the return +// address is in %edx/%rdx. +// The JMP_BUF is a structure that has the maximum size over all supported +// architectures. The callee-saves registers plus [er]ip and [er]sp are stored +// into the JMP_BUF. +// TODO(arbenson): Is this code dead? If so, clean it up. +static void EmitSetjmp(bool Is64Bit, MCStreamer &Out) { + const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox; + unsigned JmpBuf = Is64Bit ? X86::RDI : X86::ECX; + unsigned RetAddr = Is64Bit ? X86::RDX : X86::EDX; + if (Is64Bit) { + unsigned BasePtr = UseZeroBasedSandbox ? 0 : X86::R15; + unsigned Segment = X86::PSEUDO_NACL_SEG; + // Save the registers. + EmitStore(true, BasePtr, 1, JmpBuf, 0, Segment, X86::RBX, Out); + EmitStore(true, BasePtr, 1, JmpBuf, 8, Segment, X86::RBP, Out); + EmitStore(true, BasePtr, 1, JmpBuf, 16, Segment, X86::RSP, Out); + EmitStore(true, BasePtr, 1, JmpBuf, 24, Segment, X86::R12, Out); + EmitStore(true, BasePtr, 1, JmpBuf, 32, Segment, X86::R13, Out); + EmitStore(true, BasePtr, 1, JmpBuf, 40, Segment, X86::R14, Out); + EmitStore(true, BasePtr, 1, JmpBuf, 48, Segment, X86::RDX, Out); + } else { + // Save the registers. + EmitStore(false, JmpBuf, 1, 0, 0, 0, X86::EBX, Out); + EmitStore(false, JmpBuf, 1, 0, 4, 0, X86::EBP, Out); + EmitStore(false, JmpBuf, 1, 0, 8, 0, X86::ESP, Out); + EmitStore(false, JmpBuf, 1, 0, 12, 0, X86::ESI, Out); + EmitStore(false, JmpBuf, 1, 0, 16, 0, X86::EDI, Out); + EmitStore(false, JmpBuf, 1, 0, 20, 0, X86::EDX, Out); + } + // Return 0. + EmitClearReg(false, X86::EAX, Out); +} + +// Does the x86 platform specific work for longjmp other than normalizing the +// return parameter |