diff options
author | Dan Gohman <gohman@apple.com> | 2008-10-18 02:06:02 +0000 |
---|---|---|
committer | Dan Gohman <gohman@apple.com> | 2008-10-18 02:06:02 +0000 |
commit | 6520e20e4fb31f2e65e25c38b372b19d33a83df4 (patch) | |
tree | bdd9f9d2f77e38a304c7abc38e87b8f734f06da8 /lib | |
parent | 95915730de69342d3733f5c391779ae007eb3efa (diff) |
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57748 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r-- | lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 22 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 10 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/TargetLowering.cpp | 17 | ||||
-rw-r--r-- | lib/Target/Alpha/AlphaISelLowering.cpp | 6 | ||||
-rw-r--r-- | lib/Target/Alpha/AlphaISelLowering.h | 2 | ||||
-rw-r--r-- | lib/Target/CellSPU/SPUISelLowering.cpp | 6 | ||||
-rw-r--r-- | lib/Target/CellSPU/SPUISelLowering.h | 2 | ||||
-rw-r--r-- | lib/Target/Mips/MipsISelLowering.cpp | 6 | ||||
-rw-r--r-- | lib/Target/Mips/MipsISelLowering.h | 2 | ||||
-rw-r--r-- | lib/Target/Sparc/SparcISelLowering.cpp | 6 | ||||
-rw-r--r-- | lib/Target/Sparc/SparcISelLowering.h | 2 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelDAGToDAG.cpp | 85 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 35 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.h | 3 | ||||
-rw-r--r-- | lib/Target/X86/X86TargetMachine.cpp | 9 |
15 files changed, 141 insertions, 72 deletions
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 4b1945efe4..81058152eb 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -967,6 +967,13 @@ SDValue DAGCombiner::visitADD(SDNode *N) { // fold (add x, 0) -> x if (N1C && N1C->isNullValue()) return N0; + // fold (add Sym, c) -> Sym+c + if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0)) + if (!AfterLegalize && TLI.isOffsetFoldingLegal(GA) && N1C && + GA->getOpcode() == ISD::GlobalAddress) + return DAG.getGlobalAddress(GA->getGlobal(), VT, + GA->getOffset() + + (uint64_t)N1C->getSExtValue()); // fold ((c1-A)+c2) -> (c1+c2)-A if (N1C && N0.getOpcode() == ISD::SUB) if (ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) @@ -1133,6 +1140,21 @@ SDValue DAGCombiner::visitSUB(SDNode *N) { if (N1.getOpcode() == ISD::UNDEF) return N1; + // If the relocation model supports it, consider symbol offsets. + if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0)) + if (!AfterLegalize && TLI.isOffsetFoldingLegal(GA)) { + // fold (sub Sym, c) -> Sym-c + if (N1C && GA->getOpcode() == ISD::GlobalAddress) + return DAG.getGlobalAddress(GA->getGlobal(), VT, + GA->getOffset() - + (uint64_t)N1C->getSExtValue()); + // fold (sub Sym+c1, Sym+c2) -> c1-c2 + if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1)) + if (GA->getGlobal() == GB->getGlobal()) + return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(), + VT); + } + return SDValue(); } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 9cfb1a1a22..07819404e1 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -959,10 +959,15 @@ SDValue SelectionDAG::getConstantFP(double Val, MVT VT, bool isTarget) { } SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, - MVT VT, int Offset, + MVT VT, int64_t Offset, bool isTargetGA) { unsigned Opc; + // Truncate (with sign-extension) the offset value to the pointer size. + unsigned BitWidth = VT.getSizeInBits(); + if (BitWidth < 64) + Offset = (Offset << (64 - BitWidth) >> (64 - BitWidth)); + const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); if (!GVar) { // If GV is an alias then use the aliasee for determining thread-localness. @@ -2017,6 +2022,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{ bool SelectionDAG::isVerifiedDebugInfoDesc(SDValue Op) const { GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); if (!GA) return false; + if (GA->getOffset() != 0) return false; GlobalVariable *GV = dyn_cast<GlobalVariable>(GA->getGlobal()); if (!GV) return false; MachineModuleInfo *MMI = getMachineModuleInfo(); @@ -4726,7 +4732,7 @@ HandleSDNode::~HandleSDNode() { } GlobalAddressSDNode::GlobalAddressSDNode(bool isTarget, const GlobalValue *GA, - MVT VT, int o) + MVT VT, int64_t o) : SDNode(isa<GlobalVariable>(GA) && cast<GlobalVariable>(GA)->isThreadLocal() ? // Thread Local diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 479e1380c5..1e7e847b8a 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -656,6 +656,23 @@ SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, return Table; } +bool +TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { + // Assume that everything is safe in static mode. + if (getTargetMachine().getRelocationModel() == Reloc::Static) + return true; + + // In dynamic-no-pic mode, assume that known defined values are safe. + if (getTargetMachine().getRelocationModel() == Reloc::DynamicNoPIC && + GA && + !GA->getGlobal()->isDeclaration() && + !GA->getGlobal()->mayBeOverridden()) + return true; + + // Otherwise assume nothing is safe. + return false; +} + //===----------------------------------------------------------------------===// // Optimization Methods //===----------------------------------------------------------------------===// diff --git a/lib/Target/Alpha/AlphaISelLowering.cpp b/lib/Target/Alpha/AlphaISelLowering.cpp index 33445f0fa4..7878830735 100644 --- a/lib/Target/Alpha/AlphaISelLowering.cpp +++ b/lib/Target/Alpha/AlphaISelLowering.cpp @@ -764,3 +764,9 @@ AlphaTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, return sinkMBB; } + +bool +AlphaTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { + // The Alpha target isn't yet aware of offsets. + return false; +} diff --git a/lib/Target/Alpha/AlphaISelLowering.h b/lib/Target/Alpha/AlphaISelLowering.h index 0a78997b19..3e870aff88 100644 --- a/lib/Target/Alpha/AlphaISelLowering.h +++ b/lib/Target/Alpha/AlphaISelLowering.h @@ -95,6 +95,8 @@ namespace llvm { MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *BB); + virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; + private: // Helpers for custom lowering. void LowerVAARG(SDNode *N, SDValue &Chain, SDValue &DataPtr, diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 086a25f1d8..659ba7530d 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -3058,3 +3058,9 @@ bool SPUTargetLowering::isLegalAddressImmediate(int64_t V, bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const { return false; } + +bool +SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { + // The SPU target isn't yet aware of offsets. + return false; +} diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h index e79b1363f2..54aac66571 100644 --- a/lib/Target/CellSPU/SPUISelLowering.h +++ b/lib/Target/CellSPU/SPUISelLowering.h @@ -139,6 +139,8 @@ namespace llvm { /// as the offset of the target addressing mode. virtual bool isLegalAddressImmediate(int64_t V, const Type *Ty) const; virtual bool isLegalAddressImmediate(GlobalValue *) const; + + virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; }; } diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 3224d73d5f..8fdc4b799b 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -1094,3 +1094,9 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint, } return std::vector<unsigned>(); } + +bool +MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { + // The Mips target isn't yet aware of offsets. + return false; +} diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h index b19ce58d2b..5704b7ef8e 100644 --- a/lib/Target/Mips/MipsISelLowering.h +++ b/lib/Target/Mips/MipsISelLowering.h @@ -118,6 +118,8 @@ namespace llvm { std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, MVT VT) const; + + virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; }; } diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index e81688e4e8..59e19d5466 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -996,3 +996,9 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint, return std::vector<unsigned>(); } + +bool +SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { + // The Sparc target isn't yet aware of offsets. + return false; +} diff --git a/lib/Target/Sparc/SparcISelLowering.h b/lib/Target/Sparc/SparcISelLowering.h index ea3171d027..a0e3c65344 100644 --- a/lib/Target/Sparc/SparcISelLowering.h +++ b/lib/Target/Sparc/SparcISelLowering.h @@ -70,6 +70,8 @@ namespace llvm { std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, MVT VT) const; + + virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; }; } // end namespace llvm diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index cf83e68192..6735f1d80b 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -759,6 +759,7 @@ void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) { /// addressing mode. bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM, bool isRoot, unsigned Depth) { + bool is64Bit = Subtarget->is64Bit(); DOUT << "MatchAddress: "; DEBUG(AM.dump()); // Limit recursion. if (Depth > 5) @@ -768,7 +769,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM, if (AM.isRIPRel) { if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) { int64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); - if (isInt32(AM.Disp + Val)) { + if (!is64Bit || isInt32(AM.Disp + Val)) { AM.Disp += Val; return false; } @@ -783,7 +784,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM, default: break; case ISD::Constant: { int64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); - if (isInt32(AM.Disp + Val)) { + if (!is64Bit || isInt32(AM.Disp + Val)) { AM.Disp += Val; return false; } @@ -791,10 +792,9 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM, } case X86ISD::Wrapper: { -DOUT << "Wrapper: 64bit " << Subtarget->is64Bit(); -DOUT << " AM "; DEBUG(AM.dump()); DOUT << "\n"; -DOUT << "AlreadySelected " << AlreadySelected << "\n"; - bool is64Bit = Subtarget->is64Bit(); + DOUT << "Wrapper: 64bit " << is64Bit; + DOUT << " AM "; DEBUG(AM.dump()); DOUT << "\n"; + DOUT << "AlreadySelected " << AlreadySelected << "\n"; // Under X86-64 non-small code model, GV (and friends) are 64-bits. // Also, base and index reg must be 0 in order to use rip as base. if (is64Bit && (TM.getCodeModel() != CodeModel::Small || @@ -808,17 +808,21 @@ DOUT << "AlreadySelected " << AlreadySelected << "\n"; if (!AlreadySelected || (AM.Base.Reg.getNode() && AM.IndexReg.getNode())) { SDValue N0 = N.getOperand(0); if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { - GlobalValue *GV = G->getGlobal(); - AM.GV = GV; - AM.Disp += G->getOffset(); - AM.isRIPRel = TM.symbolicAddressesAreRIPRel(); - return false; + if (!is64Bit || isInt32(AM.Disp + G->getOffset())) { + GlobalValue *GV = G->getGlobal(); + AM.GV = GV; + AM.Disp += G->getOffset(); + AM.isRIPRel = TM.symbolicAddressesAreRIPRel(); + return false; + } } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { - AM.CP = CP->getConstVal(); - AM.Align = CP->getAlignment(); - AM.Disp += CP->getOffset(); - AM.isRIPRel = TM.symbolicAddressesAreRIPRel(); - return false; + if (!is64Bit || isInt32(AM.Disp + CP->getOffset())) { + AM.CP = CP->getConstVal(); + AM.Align = CP->getAlignment(); + AM.Disp += CP->getOffset(); + AM.isRIPRel = TM.symbolicAddressesAreRIPRel(); + return false; + } } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) { AM.ES = S->getSymbol(); AM.isRIPRel = TM.symbolicAddressesAreRIPRel(); @@ -862,7 +866,7 @@ DOUT << "AlreadySelected " << AlreadySelected << "\n"; ConstantSDNode *AddVal = cast<ConstantSDNode>(ShVal.getNode()->getOperand(1)); uint64_t Disp = AM.Disp + (AddVal->getZExtValue() << Val); - if (isInt32(Disp)) + if (!is64Bit || isInt32(Disp)) AM.Disp = Disp; else AM.IndexReg = ShVal; @@ -905,7 +909,7 @@ DOUT << "AlreadySelected " << AlreadySelected << "\n"; cast<ConstantSDNode>(MulVal.getNode()->getOperand(1)); uint64_t Disp = AM.Disp + AddVal->getZExtValue() * CN->getZExtValue(); - if (isInt32(Disp)) + if (!is64Bit || isInt32(Disp)) AM.Disp = Disp; else Reg = N.getNode()->getOperand(0); @@ -944,7 +948,7 @@ DOUT << "AlreadySelected " << AlreadySelected << "\n"; // Address could not have picked a GV address for the displacement. AM.GV == NULL && // On x86-64, the resultant disp must fit in 32-bits. - isInt32(AM.Disp + CN->getSExtValue()) && + (!is64Bit || isInt32(AM.Disp + CN->getSExtValue())) && // Check to see if the LHS & C is zero. CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) { AM.Disp += CN->getZExtValue(); @@ -1248,49 +1252,6 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) { case X86ISD::GlobalBaseReg: return getGlobalBaseReg(); - case ISD::ADD: { - // Turn ADD X, c to MOV32ri X+c. This cannot be done with tblgen'd - // code and is matched first so to prevent it from being turned into - // LEA32r X+c. - // In 64-bit small code size mode, use LEA to take advantage of - // RIP-relative addressing. - if (TM.getCodeModel() != CodeModel::Small) - break; - MVT PtrVT = TLI.getPointerTy(); - SDValue N0 = N.getOperand(0); - SDValue N1 = N.getOperand(1); - if (N.getNode()->getValueType(0) == PtrVT && - N0.getOpcode() == X86ISD::Wrapper && - N1.getOpcode() == ISD::Constant) { - unsigned Offset = (unsigned)cast<ConstantSDNode>(N1)->getZExtValue(); - SDValue C(0, 0); - // TODO: handle ExternalSymbolSDNode. - if (GlobalAddressSDNode *G = - dyn_cast<GlobalAddressSDNode>(N0.getOperand(0))) { - C = CurDAG->getTargetGlobalAddress(G->getGlobal(), PtrVT, - G->getOffset() + Offset); - } else if (ConstantPoolSDNode *CP = - dyn_cast<ConstantPoolSDNode>(N0.getOperand(0))) { - C = CurDAG->getTargetConstantPool(CP->getConstVal(), PtrVT, - CP->getAlignment(), - CP->getOffset()+Offset); - } - - if (C.getNode()) { - if (Subtarget->is64Bit()) { - SDValue Ops[] = { CurDAG->getRegister(0, PtrVT), getI8Imm(1), - CurDAG->getRegister(0, PtrVT), C }; - return CurDAG->SelectNodeTo(N.getNode(), X86::LEA64r, - MVT::i64, Ops, 4); - } else - return CurDAG->SelectNodeTo(N.getNode(), X86::MOV32ri, PtrVT, C); - } - } - - // Other cases are handled by auto-generated code. - break; - } - case X86ISD::ATOMOR64_DAG: return SelectAtomic64(Node, X86::ATOMOR6432); case X86ISD::ATOMXOR64_DAG: diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index fa467693bc..c977dbe92b 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1705,7 +1705,8 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { // non-JIT mode. if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), getTargetMachine(), true)) - Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy(), + G->getOffset()); } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); } else if (IsTailCall) { @@ -4390,12 +4391,24 @@ X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) { SDValue X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, + int64_t Offset, SelectionDAG &DAG) const { - SDValue Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); + bool IsPic = getTargetMachine().getRelocationModel() == Reloc::PIC_; + bool ExtraLoadRequired = + Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false); + + // Create the TargetGlobalAddress node, folding in the constant + // offset if it is legal. + SDValue Result; + if (!IsPic && !ExtraLoadRequired) { + Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset); + Offset = 0; + } else + Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), 0); Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); + // With PIC, the address is actually $g + Offset. - if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && - !Subtarget->isPICStyleRIPRel()) { + if (IsPic && !Subtarget->isPICStyleRIPRel()) { Result = DAG.getNode(ISD::ADD, getPointerTy(), DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result); @@ -4406,17 +4419,24 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, // the GlobalAddress must be in the base or index register of the address, not // the GV offset field. Platform check is inside GVRequiresExtraLoad() call // The same applies for external symbols during PIC codegen - if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) + if (ExtraLoadRequired) Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, PseudoSourceValue::getGOT(), 0); + // If there was a non-zero offset that we didn't fold, create an explicit + // addition for it. + if (Offset != 0) + Result = DAG.getNode(ISD::ADD, getPointerTy(), Result, + DAG.getConstant(Offset, getPointerTy())); + return Result; } SDValue X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) { const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); - return LowerGlobalAddress(GV, DAG); + int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); + return LowerGlobalAddress(GV, Offset, DAG); } // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit @@ -7006,6 +7026,7 @@ bool X86TargetLowering::isGAPlusOffset(SDNode *N, if (N->getOpcode() == X86ISD::Wrapper) { if (isa<GlobalAddressSDNode>(N->getOperand(0))) { GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); + Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); return true; } } @@ -7448,7 +7469,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, if (GA) { if (hasMemory) - Op = LowerGlobalAddress(GA->getGlobal(), DAG); + Op = LowerGlobalAddress(GA->getGlobal(), Offset, DAG); else Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), Offset); diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index b3d165432f..b76ba845e5 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -551,7 +551,8 @@ namespace llvm { SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG); SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG); SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG); - SDValue LowerGlobalAddress(const GlobalValue *GV, SelectionDAG &DAG) const; + SDValue LowerGlobalAddress(const GlobalValue *GV, int64_t Offset, + SelectionDAG &DAG) const; SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG); diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index 923823b98c..ef4f897f5d 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -143,6 +143,15 @@ X86TargetMachine::X86TargetMachine(const Module &M, const std::string &FS, else setRelocationModel(Reloc::Static); } + + // ELF doesn't have a distinct dynamic-no-PIC model. Dynamic-no-PIC + // is defined as a model for code which may be used in static or + // dynamic executables but not necessarily a shared library. On ELF + // implement this by using the Static model. + if (Subtarget.isTargetELF() && + getRelocationModel() == Reloc::DynamicNoPIC) + setRelocationModel(Reloc::Static); + if (Subtarget.is64Bit()) { // No DynamicNoPIC support under X86-64. if (getRelocationModel() == Reloc::DynamicNoPIC) |