diff options
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 20 | ||||
-rw-r--r-- | lib/Target/X86/X86NaClRewritePass.cpp | 95 |
2 files changed, 19 insertions, 96 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 126dd4bfe0..fe853cba05 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -8378,13 +8378,31 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, break; } + // @LOCALMOD-BEGIN + // This function only peeks at the data dependencies of the DAG to find + // an arith op that also defines EFLAGS. However, function calls may + // clobber EFLAGS and the data dependencies do not show that. + // When that occurs, EFLAGS must be copied via PUSHF and POPF. + // The problem is that NaCl does not allow PUSHF and POPF. + // We could try to detect such clobbers for NaCl, but for now, we + // keep this code simple, and bail out for NaCl. A further + // PeepholeOptimizer pass can do a similar optimization + // (see optimizeCompareInstr in X86InstrInfo.cpp), so it's not *so* + // bad. This function also converts "add op, -1" to DEC, which can + // help fold load/stores: + // (store m, (add (load m), -1)) -> (dec m) + // So we lose out on that. + // BUG=http://code.google.com/p/nativeclient/issues/detail?id=2711 + bool ConservativeForNaCl = Subtarget->isTargetNaCl(); + // See if we can use the EFLAGS value from the operand instead of // doing a separate TEST. TEST always sets OF and CF to 0, so unless // we prove that the arithmetic won't overflow, we can't use OF or CF. - if (Op.getResNo() != 0 || NeedOF || NeedCF) + if (Op.getResNo() != 0 || NeedOF || NeedCF || ConservativeForNaCl) // Emit a CMP with 0, which is the TEST pattern. return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, DAG.getConstant(0, Op.getValueType())); + // @LOCALMOD-END unsigned Opcode = 0; unsigned NumOperands = 0; diff --git a/lib/Target/X86/X86NaClRewritePass.cpp b/lib/Target/X86/X86NaClRewritePass.cpp index 36a3d4bf7c..0d1381c8b0 100644 --- a/lib/Target/X86/X86NaClRewritePass.cpp +++ b/lib/Target/X86/X86NaClRewritePass.cpp @@ -81,9 +81,6 @@ namespace { void PassLightWeightValidator(MachineBasicBlock &MBB); bool AlignJumpTableTargets(MachineFunction &MF); - bool RewritePushfPopf(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - MachineBasicBlock::iterator *Next); }; char X86NaClRewritePass::ID = 0; @@ -690,97 +687,6 @@ bool X86NaClRewritePass::ApplyRewrites(MachineBasicBlock &MBB, return false; } -// Rewrite the sequence generated to implement CopyToReg for EFLAGS, when -// LLVM tries to keep EFLAGS live across a call to avoid emitting a CMP. -// %r/m = <some flags-setting op> -// pushf -// pop %rY -// <call> -// push %rY -// popf -// <conditional branch> -// becomes: -// %r/m = <some flags-setting op> -// %rY = %r/m -// <call> -// cmp %rY, 0 -// <conditional branch> -// A proper fix would involve fixing X86TargetLowering::EmitTest to check -// that a the path to the flags-setting op does not chain through a call -// and avoid the optimization in that case -// BUG: http://code.google.com/p/nativeclient/issues/detail?id=2711 - -bool X86NaClRewritePass::RewritePushfPopf(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - MachineBasicBlock::iterator *Next) { - MachineInstr &MI = *MBBI; - DebugLoc DL = MI.getDebugLoc(); - unsigned Opc = MI.getOpcode(); - bool Is64Bit = false; - - switch(Opc) { - case X86::PUSHF64: - Is64Bit = true; - // fall through - case X86::PUSHF32: { - MachineBasicBlock::iterator Prev = MBBI; - --Prev; - assert((*Next)->getOpcode() == (Is64Bit ? X86::POP64r : X86::POP32r) - && "Unknown pushf sequence"); - // Take the destination of the flags-setting op (Prev) and move it to - // the destination of the pop (Next) - int MovOpc; - if (Prev->memoperands_empty()) { - MovOpc = Is64Bit ? X86::MOV64rr : X86::MOV32rr; - BuildMI(MBB, MBBI, DL, TII->get(MovOpc)) - .addOperand((*Next)->getOperand(0)) - .addOperand(Prev->getOperand(0)); - } else { - MovOpc = Is64Bit ? X86::MOV64rm : X86::MOV32rm; - // Memory operands are an operand tuple of - // [base,scale,index,disp,segment] - BuildMI(MBB, MBBI, DL, TII->get(MovOpc)) - .addOperand((*Next)->getOperand(0)) - .addOperand(Prev->getOperand(0)) - .addOperand(Prev->getOperand(1)) - .addOperand(Prev->getOperand(2)) - .addOperand(Prev->getOperand(3)) - .addOperand(Prev->getOperand(4)) - .addMemOperand(*Prev->memoperands_begin()); - } - - MI.eraseFromParent(); - // Just use Prev as a placeholder to delete the pop - Prev = *Next; - ++(*Next); - Prev->eraseFromParent(); - return true; - } - case X86::POPF64: - Is64Bit = true; - // fall through - case X86::POPF32: { - int PushOpc; - int CmpOpc; - PushOpc = Is64Bit ? X86::PUSH64r : X86::PUSH32r; - CmpOpc = Is64Bit ? X86::CMP64ri32 : X86::CMP32ri; - - MachineBasicBlock::iterator Prev = MBBI; - --Prev; - // Create a compare of the destination of the push (Prev) to 0 - assert(Prev->getOpcode() == PushOpc && "Unknown popf sequence"); - BuildMI(MBB, MBBI, DL, TII->get(CmpOpc)) - .addReg(Prev->getOperand(0).getReg()) - .addImm(0); - Prev->eraseFromParent(); - MI.eraseFromParent(); - return true; - } - default: - return false; - } -} - bool X86NaClRewritePass::AlignJumpTableTargets(MachineFunction &MF) { bool Modified = true; @@ -835,7 +741,6 @@ bool X86NaClRewritePass::runOnMachineBasicBlock(MachineBasicBlock &MBB) { // When one of these methods makes a change, // it returns true, skipping the others. if (ApplyRewrites(MBB, MBBI) || - RewritePushfPopf(MBB, MBBI, &NextMBBI) || (Is64Bit && ApplyStackSFI(MBB, MBBI)) || (Is64Bit && ApplyMemorySFI(MBB, MBBI)) || (Is64Bit && ApplyFrameSFI(MBB, MBBI)) || |