diff options
62 files changed, 728 insertions, 602 deletions
diff --git a/include/llvm/CodeGen/Analysis.h b/include/llvm/CodeGen/Analysis.h index d8e64071a1..fda801cb97 100644 --- a/include/llvm/CodeGen/Analysis.h +++ b/include/llvm/CodeGen/Analysis.h @@ -70,6 +70,10 @@ bool hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos, /// ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred); +/// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, +/// return the equivalent code if we're allowed to assume that NaNs won't occur. +ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC); + /// getICmpCondCode - Return the ISD condition code corresponding to /// the given LLVM IR integer condition code. /// diff --git a/include/llvm/Support/TargetRegistry.h b/include/llvm/Support/TargetRegistry.h index e1ef39e5c6..ea55c91841 100644 --- a/include/llvm/Support/TargetRegistry.h +++ b/include/llvm/Support/TargetRegistry.h @@ -44,6 +44,7 @@ namespace llvm { class MCTargetAsmLexer; class MCTargetAsmParser; class TargetMachine; + class TargetOptions; class raw_ostream; class formatted_raw_ostream; @@ -86,6 +87,7 @@ namespace llvm { StringRef TT, StringRef CPU, StringRef Features, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); @@ -334,13 +336,14 @@ namespace llvm { /// either the target triple from the module, or the target triple of the /// host if that does not exist. TargetMachine *createTargetMachine(StringRef Triple, StringRef CPU, - StringRef Features, + StringRef Features, const TargetOptions &Options, Reloc::Model RM = Reloc::Default, CodeModel::Model CM = CodeModel::Default, CodeGenOpt::Level OL = CodeGenOpt::Default) const { if (!TargetMachineCtorFn) return 0; - return TargetMachineCtorFn(*this, Triple, CPU, Features, RM, CM, OL); + return TargetMachineCtorFn(*this, Triple, CPU, Features, Options, + RM, CM, OL); } /// createMCAsmBackend - Create a target specific assembly parser. @@ -1017,10 +1020,11 @@ namespace llvm { private: static TargetMachine *Allocator(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) { - return new TargetMachineImpl(T, TT, CPU, FS, RM, CM, OL); + return new TargetMachineImpl(T, TT, CPU, FS, Options, RM, CM, OL); } }; diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td index 9714172aa4..8582015e77 100644 --- a/include/llvm/Target/Target.td +++ b/include/llvm/Target/Target.td @@ -423,7 +423,7 @@ class Predicate<string cond> { /// NoHonorSignDependentRounding - This predicate is true if support for /// sign-dependent-rounding is not enabled. def NoHonorSignDependentRounding - : Predicate<"!HonorSignDependentRoundingFPMath()">; + : Predicate<"!TM.Options.HonorSignDependentRoundingFPMath()">; class Requires<list<Predicate> preds> { list<Predicate> Predicates = preds; diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h index 4a5f33e401..c169e063d0 100644 --- a/include/llvm/Target/TargetMachine.h +++ b/include/llvm/Target/TargetMachine.h @@ -14,6 +14,7 @@ #ifndef LLVM_TARGET_TARGETMACHINE_H #define LLVM_TARGET_TARGETMACHINE_H +#include "llvm/Target/TargetOptions.h" #include "llvm/MC/MCCodeGenInfo.h" #include "llvm/ADT/StringRef.h" #include <cassert> @@ -63,7 +64,7 @@ class TargetMachine { void operator=(const TargetMachine &); // DO NOT IMPLEMENT protected: // Can only create subclasses. TargetMachine(const Target &T, StringRef TargetTriple, - StringRef CPU, StringRef FS); + StringRef CPU, StringRef FS, const TargetOptions &Options); /// getSubtargetImpl - virtual method implemented by subclasses that returns /// a reference to that target's TargetSubtargetInfo-derived member variable. @@ -101,6 +102,8 @@ public: const StringRef getTargetCPU() const { return TargetCPU; } const StringRef getTargetFeatureString() const { return TargetFS; } + TargetOptions Options; + // Interfaces to the major aspects of target machine information: // -- Instruction opcode and operand information // -- Pipelines and scheduling information @@ -284,10 +287,20 @@ public: class LLVMTargetMachine : public TargetMachine { protected: // Can only create subclasses. LLVMTargetMachine(const Target &T, StringRef TargetTriple, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, TargetOptions Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); + /// printNoVerify - Add a pass to dump the machine function, if debugging is + /// enabled. + /// + void printNoVerify(PassManagerBase &PM, const char *Banner) const; + + /// printAndVerify - Add a pass to dump then verify the machine function, if + /// those steps are enabled. + /// + void printAndVerify(PassManagerBase &PM, const char *Banner) const; + private: /// addCommonCodeGenPasses - Add standard LLVM codegen passes used for /// both emitting to assembly files or machine code output. diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h index 36913413b7..30018163aa 100644 --- a/include/llvm/Target/TargetOptions.h +++ b/include/llvm/Target/TargetOptions.h @@ -15,6 +15,8 @@ #ifndef LLVM_TARGET_TARGETOPTIONS_H #define LLVM_TARGET_TARGETOPTIONS_H +#include <string> + namespace llvm { class MachineFunction; class StringRef; @@ -27,140 +29,156 @@ namespace llvm { Hard // Hard float. }; } - - /// PrintMachineCode - This flag is enabled when the -print-machineinstrs - /// option is specified on the command line, and should enable debugging - /// output from the code generator. - extern bool PrintMachineCode; - - /// NoFramePointerElim - This flag is enabled when the -disable-fp-elim is - /// specified on the command line. If the target supports the frame pointer - /// elimination optimization, this option should disable it. - extern bool NoFramePointerElim; - - /// NoFramePointerElimNonLeaf - This flag is enabled when the - /// -disable-non-leaf-fp-elim is specified on the command line. If the target - /// supports the frame pointer elimination optimization, this option should - /// disable it for non-leaf functions. - extern bool NoFramePointerElimNonLeaf; - - /// DisableFramePointerElim - This returns true if frame pointer elimination - /// optimization should be disabled for the given machine function. - extern bool DisableFramePointerElim(const MachineFunction &MF); - - /// LessPreciseFPMAD - This flag is enabled when the - /// -enable-fp-mad is specified on the command line. When this flag is off - /// (the default), the code generator is not allowed to generate mad - /// (multiply add) if the result is "less precise" than doing those operations - /// individually. - extern bool LessPreciseFPMADOption; - extern bool LessPreciseFPMAD(); - - /// NoExcessFPPrecision - This flag is enabled when the - /// -disable-excess-fp-precision flag is specified on the command line. When - /// this flag is off (the default), the code generator is allowed to produce - /// results that are "more precise" than IEEE allows. This includes use of - /// FMA-like operations and use of the X86 FP registers without rounding all - /// over the place. - extern bool NoExcessFPPrecision; - - /// UnsafeFPMath - This flag is enabled when the - /// -enable-unsafe-fp-math flag is specified on the command line. When - /// this flag is off (the default), the code generator is not allowed to - /// produce results that are "less precise" than IEEE allows. This includes - /// use of X86 instructions like FSIN and FCOS instead of libcalls. - /// UnsafeFPMath implies LessPreciseFPMAD. - extern bool UnsafeFPMath; - - /// NoInfsFPMath - This flag is enabled when the - /// -enable-no-infs-fp-math flag is specified on the command line. When - /// this flag is off (the default), the code generator is not allowed to - /// assume the FP arithmetic arguments and results are never +-Infs. - extern bool NoInfsFPMath; - - /// NoNaNsFPMath - This flag is enabled when the - /// -enable-no-nans-fp-math flag is specified on the command line. When - /// this flag is off (the default), the code generator is not allowed to - /// assume the FP arithmetic arguments and results are never NaNs. - extern bool NoNaNsFPMath; - - /// HonorSignDependentRoundingFPMath - This returns true when the - /// -enable-sign-dependent-rounding-fp-math is specified. If this returns - /// false (the default), the code generator is allowed to assume that the - /// rounding behavior is the default (round-to-zero for all floating point to - /// integer conversions, and round-to-nearest for all other arithmetic - /// truncations). If this is enabled (set to true), the code generator must - /// assume that the rounding mode may dynamically change. - extern bool HonorSignDependentRoundingFPMathOption; - extern bool HonorSignDependentRoundingFPMath(); - - /// UseSoftFloat - This flag is enabled when the -soft-float flag is specified - /// on the command line. When this flag is on, the code generator will - /// generate libcalls to the software floating point library instead of - /// target FP instructions. - extern bool UseSoftFloat; - - /// FloatABIType - This setting is set by -float-abi=xxx option is specfied - /// on the command line. This setting may either be Default, Soft, or Hard. - /// Default selects the target's default behavior. Soft selects the ABI for - /// UseSoftFloat, but does not inidcate that FP hardware may not be used. - /// Such a combination is unfortunately popular (e.g. arm-apple-darwin). - /// Hard presumes that the normal FP ABI is used. - extern FloatABI::ABIType FloatABIType; - - /// NoZerosInBSS - By default some codegens place zero-initialized data to - /// .bss section. This flag disables such behaviour (necessary, e.g. for - /// crt*.o compiling). - extern bool NoZerosInBSS; - - /// JITExceptionHandling - This flag indicates that the JIT should emit - /// exception handling information. - extern bool JITExceptionHandling; - - /// JITEmitDebugInfo - This flag indicates that the JIT should try to emit - /// debug information and notify a debugger about it. - extern bool JITEmitDebugInfo; - - /// JITEmitDebugInfoToDisk - This flag indicates that the JIT should write - /// the object files generated by the JITEmitDebugInfo flag to disk. This - /// flag is hidden and is only for debugging the debug info. - extern bool JITEmitDebugInfoToDisk; - - /// GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is - /// specified on the commandline. When the flag is on, participating targets - /// will perform tail call optimization on all calls which use the fastcc - /// calling convention and which satisfy certain target-independent - /// criteria (being at the end of a function, having the same return type - /// as their parent function, etc.), using an alternate ABI if necessary. - extern bool GuaranteedTailCallOpt; - - /// StackAlignmentOverride - Override default stack alignment for target. - extern unsigned StackAlignmentOverride; - - /// RealignStack - This flag indicates whether the stack should be - /// automatically realigned, if needed. - extern bool RealignStack; - - /// DisableJumpTables - This flag indicates jump tables should not be - /// generated. - extern bool DisableJumpTables; - - /// EnableFastISel - This flag enables fast-path instruction selection - /// which trades away generated code quality in favor of reducing - /// compile time. - extern bool EnableFastISel; - + /// StrongPHIElim - This flag enables more aggressive PHI elimination /// wth earlier copy coalescing. extern bool StrongPHIElim; - /// getTrapFunctionName - If this returns a non-empty string, this means isel - /// should lower Intrinsic::trap to a call to the specified function name - /// instead of an ISD::TRAP node. - extern StringRef getTrapFunctionName(); - - extern bool EnableSegmentedStacks; - + class TargetOptions { + public: + TargetOptions() + : PrintMachineCode(false), NoFramePointerElim(false), + NoFramePointerElimNonLeaf(false), LessPreciseFPMADOption(false), + NoExcessFPPrecision(false), UnsafeFPMath(false), NoInfsFPMath(false), + NoNaNsFPMath(false), HonorSignDependentRoundingFPMathOption(false), + UseSoftFloat(false), NoZerosInBSS(false), JITExceptionHandling(false), + JITEmitDebugInfo(false), JITEmitDebugInfoToDisk(false), + GuaranteedTailCallOpt(false), StackAlignmentOverride(0), + RealignStack(true), DisableJumpTables(false), EnableFastISel(false), + EnableSegmentedStacks(false), TrapFuncName(""), + FloatABIType(FloatABI::Default) + {} + + /// PrintMachineCode - This flag is enabled when the -print-machineinstrs + /// option is specified on the command line, and should enable debugging + /// output from the code generator. + unsigned PrintMachineCode : 1; + + /// NoFramePointerElim - This flag is enabled when the -disable-fp-elim is + /// specified on the command line. If the target supports the frame pointer + /// elimination optimization, this option should disable it. + unsigned NoFramePointerElim : 1; + + /// NoFramePointerElimNonLeaf - This flag is enabled when the + /// -disable-non-leaf-fp-elim is specified on the command line. If the + /// target supports the frame pointer elimination optimization, this option + /// should disable it for non-leaf functions. + unsigned NoFramePointerElimNonLeaf : 1; + + /// DisableFramePointerElim - This returns true if frame pointer elimination + /// optimization should be disabled for the given machine function. + bool DisableFramePointerElim(const MachineFunction &MF) const; + + /// LessPreciseFPMAD - This flag is enabled when the + /// -enable-fp-mad is specified on the command line. When this flag is off + /// (the default), the code generator is not allowed to generate mad + /// (multiply add) if the result is "less precise" than doing those + /// operations individually. + unsigned LessPreciseFPMADOption : 1; + bool LessPreciseFPMAD() const; + + /// NoExcessFPPrecision - This flag is enabled when the + /// -disable-excess-fp-precision flag is specified on the command line. + /// When this flag is off (the default), the code generator is allowed to + /// produce results that are "more precise" than IEEE allows. This includes + /// use of FMA-like operations and use of the X86 FP registers without + /// rounding all over the place. + unsigned NoExcessFPPrecision : 1; + + /// UnsafeFPMath - This flag is enabled when the + /// -enable-unsafe-fp-math flag is specified on the command line. When + /// this flag is off (the default), the code generator is not allowed to + /// produce results that are "less precise" than IEEE allows. This includes + /// use of X86 instructions like FSIN and FCOS instead of libcalls. + /// UnsafeFPMath implies LessPreciseFPMAD. + unsigned UnsafeFPMath : 1; + + /// NoInfsFPMath - This flag is enabled when the + /// -enable-no-infs-fp-math flag is specified on the command line. When + /// this flag is off (the default), the code generator is not allowed to + /// assume the FP arithmetic arguments and results are never +-Infs. + unsigned NoInfsFPMath : 1; + + /// NoNaNsFPMath - This flag is enabled when the + /// -enable-no-nans-fp-math flag is specified on the command line. When + /// this flag is off (the default), the code generator is not allowed to + /// assume the FP arithmetic arguments and results are never NaNs. + unsigned NoNaNsFPMath : 1; + + /// HonorSignDependentRoundingFPMath - This returns true when the + /// -enable-sign-dependent-rounding-fp-math is specified. If this returns + /// false (the default), the code generator is allowed to assume that the + /// rounding behavior is the default (round-to-zero for all floating point + /// to integer conversions, and round-to-nearest for all other arithmetic + /// truncations). If this is enabled (set to true), the code generator must + /// assume that the rounding mode may dynamically change. + unsigned HonorSignDependentRoundingFPMathOption : 1; + bool HonorSignDependentRoundingFPMath() const; + + /// UseSoftFloat - This flag is enabled when the -soft-float flag is + /// specified on the command line. When this flag is on, the code generator + /// will generate libcalls to the software floating point library instead of + /// target FP instructions. + unsigned UseSoftFloat : 1; + + /// NoZerosInBSS - By default some codegens place zero-initialized data to + /// .bss section. This flag disables such behaviour (necessary, e.g. for + /// crt*.o compiling). + unsigned NoZerosInBSS : 1; + + /// JITExceptionHandling - This flag indicates that the JIT should emit + /// exception handling information. + unsigned JITExceptionHandling : 1; + + /// JITEmitDebugInfo - This flag indicates that the JIT should try to emit + /// debug information and notify a debugger about it. + unsigned JITEmitDebugInfo : 1; + + /// JITEmitDebugInfoToDisk - This flag indicates that the JIT should write + /// the object files generated by the JITEmitDebugInfo flag to disk. This + /// flag is hidden and is only for debugging the debug info. + unsigned JITEmitDebugInfoToDisk : 1; + + /// GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is + /// specified on the commandline. When the flag is on, participating targets + /// will perform tail call optimization on all calls which use the fastcc + /// calling convention and which satisfy certain target-independent + /// criteria (being at the end of a function, having the same return type + /// as their parent function, etc.), using an alternate ABI if necessary. + unsigned GuaranteedTailCallOpt : 1; + + /// StackAlignmentOverride - Override default stack alignment for target. + unsigned StackAlignmentOverride; + + /// RealignStack - This flag indicates whether the stack should be + /// automatically realigned, if needed. + unsigned RealignStack : 1; + + /// DisableJumpTables - This flag indicates jump tables should not be + /// generated. + unsigned DisableJumpTables : 1; + + /// EnableFastISel - This flag enables fast-path instruction selection + /// which trades away generated code quality in favor of reducing + /// compile time. + unsigned EnableFastISel : 1; + + unsigned EnableSegmentedStacks : 1; + + /// getTrapFunctionName - If this returns a non-empty string, this means + /// isel should lower Intrinsic::trap to a call to the specified function + /// name instead of an ISD::TRAP node. + std::string TrapFuncName; + StringRef getTrapFunctionName() const; + + /// FloatABIType - This setting is set by -float-abi=xxx option is specfied + /// on the command line. This setting may either be Default, Soft, or Hard. + /// Default selects the target's default behavior. Soft selects the ABI for + /// UseSoftFloat, but does not indicate that FP hardware may not be used. + /// Such a combination is unfortunately popular (e.g. arm-apple-darwin). + /// Hard presumes that the normal FP ABI is used. + FloatABI::ABIType FloatABIType; + }; } // End llvm namespace #endif diff --git a/lib/CodeGen/Analysis.cpp b/lib/CodeGen/Analysis.cpp index fafc01044d..fc28b21194 100644 --- a/lib/CodeGen/Analysis.cpp +++ b/lib/CodeGen/Analysis.cpp @@ -1,4 +1,4 @@ -//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities --*- C++ ------*-===// +//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===// // // The LLVM Compiler Infrastructure // @@ -149,33 +149,40 @@ llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos, /// consideration of global floating-point math flags. /// ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) { - ISD::CondCode FPC, FOC; switch (Pred) { - case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break; - case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break; - case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break; - case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break; - case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break; - case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break; - case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break; - case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break; - case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break; - case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break; - case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break; - case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break; - case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break; - case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break; - case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break; - case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break; - default: - llvm_unreachable("Invalid FCmp predicate opcode!"); - FOC = FPC = ISD::SETFALSE; - break; + case FCmpInst::FCMP_FALSE: return ISD::SETFALSE; + case FCmpInst::FCMP_OEQ: return ISD::SETOEQ; + case FCmpInst::FCMP_OGT: return ISD::SETOGT; + case FCmpInst::FCMP_OGE: return ISD::SETOGE; + case FCmpInst::FCMP_OLT: return ISD::SETOLT; + case FCmpInst::FCMP_OLE: return ISD::SETOLE; + case FCmpInst::FCMP_ONE: return ISD::SETONE; + case FCmpInst::FCMP_ORD: return ISD::SETO; + case FCmpInst::FCMP_UNO: return ISD::SETUO; + case FCmpInst::FCMP_UEQ: return ISD::SETUEQ; + case FCmpInst::FCMP_UGT: return ISD::SETUGT; + case FCmpInst::FCMP_UGE: return ISD::SETUGE; + case FCmpInst::FCMP_ULT: return ISD::SETULT; + case FCmpInst::FCMP_ULE: return ISD::SETULE; + case FCmpInst::FCMP_UNE: return ISD::SETUNE; + case FCmpInst::FCMP_TRUE: return ISD::SETTRUE; + default: break; + } + llvm_unreachable("Invalid FCmp predicate opcode!"); + return ISD::SETFALSE; +} + +ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) { + switch (CC) { + case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ; + case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE; + case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT; + case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE; + case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT; + case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE; + default: break; } - if (NoNaNsFPMath) - return FOC; - else - return FPC; + return CC; } /// getICmpCondCode - Return the ISD condition code corresponding to @@ -221,7 +228,8 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr, // longjmp on x86), it can end up causing miscompilation that has not // been fully understood. if (!Ret && - (!GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false; + (!TLI.getTargetMachine().Options.GuaranteedTailCallOpt || + !isa<UnreachableInst>(Term))) return false; // If I will have a chain, make sure no other instruction that will have a // chain interposes between I and the return. diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index dc46a5874b..1a216c0ddc 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -1414,7 +1414,7 @@ void DwarfDebug::endFunction(const MachineFunction *MF) { DIE *CurFnDIE = constructScopeDIE(TheCU, FnScope); - if (!DisableFramePointerElim(*MF)) + if (!MF->getTarget().Options.DisableFramePointerElim(*MF)) TheCU->addUInt(CurFnDIE, dwarf::DW_AT_APPLE_omit_frame_ptr, dwarf::DW_FORM_flag, 1); diff --git a/lib/CodeGen/LLVMTargetMachine.cpp b/lib/CodeGen/LLVMTargetMachine.cpp index 03b5693a6a..62227fd4d6 100644 --- a/lib/CodeGen/LLVMTargetMachine.cpp +++ b/lib/CodeGen/LLVMTargetMachine.cpp @@ -41,10 +41,6 @@ #include "llvm/Support/TargetRegistry.h" using namespace llvm; -namespace llvm { - bool EnableFastISel; -} - static cl::opt<bool> DisablePostRA("disable-post-ra", cl::Hidden, cl::desc("Disable Post Regalloc")); static cl::opt<bool> DisableBranchFold("disable-branch-fold", cl::Hidden, @@ -114,9 +110,10 @@ EnableFastISelOption("fast-isel", cl::Hidden, LLVMTargetMachine::LLVMTargetMachine(const Target &T, StringRef Triple, StringRef CPU, StringRef FS, + TargetOptions Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : TargetMachine(T, Triple, CPU, FS) { + : TargetMachine(T, Triple, CPU, FS, Options) { CodeGenInfo = T.createMCCodeGenInfo(Triple, RM, CM, OL); AsmInfo = T.createMCAsmInfo(Triple); // TargetSelect.h moved to a different directory between LLVM 2.9 and 3.0, @@ -275,14 +272,15 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM, return false; // success! } -static void printNoVerify(PassManagerBase &PM, const char *Banner) { - if (PrintMachineCode) +void LLVMTargetMachine::printNoVerify(PassManagerBase &PM, + const char *Banner) const { + if (Options.PrintMachineCode) PM.add(createMachineFunctionPrinterPass(dbgs(), Banner)); } -static void printAndVerify(PassManagerBase &PM, - const char *Banner) { - if (PrintMachineCode) +void LLVMTargetMachine::printAndVerify(PassManagerBase &PM, + const char *Banner) const { + if (Options.PrintMachineCode) PM.add(createMachineFunctionPrinterPass(dbgs(), Banner)); if (VerifyMachineCode) @@ -380,7 +378,7 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM, if (EnableFastISelOption == cl::BOU_TRUE || (getOptLevel() == CodeGenOpt::None && EnableFastISelOption != cl::BOU_FALSE)) - EnableFastISel = true; + Options.EnableFastISel = true; // Ask the target for an isel. if (addInstSelector(PM)) diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp index 32c932552b..c8b02e8c9f 100644 --- a/lib/CodeGen/PrologEpilogInserter.cpp +++ b/lib/CodeGen/PrologEpilogInserter.cpp @@ -706,7 +706,7 @@ void PEI::insertPrologEpilogCode(MachineFunction &Fn) { // we've been asked for it. This, when linked with a runtime with support // for segmented stacks (libgcc is one), will result in allocating stack // space in small chunks instead of one large contiguous block. - if (EnableSegmentedStacks) + if (Fn.getTarget().Options.EnableSegmentedStacks) TFI.adjustForSegmentedStacks(Fn); } diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index d8208a4433..62c2c6ae6c 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -361,6 +361,7 @@ CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { /// specified expression for the same cost as the expression itself, or 2 if we /// can compute the negated form more cheaply than the expression itself. static char isNegatibleForFree(SDValue Op, bool LegalOperations, + const TargetOptions *Options, unsigned Depth = 0) { // No compile time optimizations on this type. if (Op.getValueType() == MVT::ppcf128) @@ -383,34 +384,39 @@ static char isNegatibleForFree(SDValue Op, bool LegalOperations, return LegalOperations ? 0 : 1; case ISD::FADD: // FIXME: determine better conditions for this xform. - if (!UnsafeFPMath) return 0; + if (!Options->UnsafeFPMath) return 0; // fold (fsub (fadd A, B)) -> (fsub (fneg A), B) - if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1)) + if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, Options, + Depth + 1)) return V; // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) - return isNegatibleForFree(Op.getOperand(1), LegalOperations, Depth+1); + return isNegatibleForFree(Op.getOperand(1), LegalOperations, Options, + Depth + 1); case ISD::FSUB: // We can't turn -(A-B) into B-A when we honor signed zeros. - if (!UnsafeFPMath) return 0; + if (!Options->UnsafeFPMath) return 0; // fold (fneg (fsub A, B)) -> (fsub B, A) return 1; case ISD::FMUL: case ISD::FDIV: - if (HonorSignDependentRoundingFPMath()) return 0; + if (Options->HonorSignDependentRoundingFPMath()) return 0; // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y)) - if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1)) + if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, Options, + Depth + 1)) return V; - return isNegatibleForFree(Op.getOperand(1), LegalOperations, Depth+1); + return isNegatibleForFree(Op.getOperand(1), LegalOperations, Options, + Depth + 1); case ISD::FP_EXTEND: case ISD::FP_ROUND: case ISD::FSIN: - return isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1); + return isNegatibleForFree(Op.getOperand(0), LegalOperations, Options, + Depth + 1); } } @@ -434,10 +440,11 @@ static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG, } case ISD::FADD: // FIXME: determine better conditions for this xform. - assert(UnsafeFPMath); + assert(DAG.getTarget().Options.UnsafeFPMath); // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) - if (isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1)) + if (isNegatibleForFree(Op.getOperand(0), LegalOperations, + &DAG.getTarget().Options, Depth+1)) return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(), GetNegatedExpression(Op.getOperand(0), DAG, LegalOperations, Depth+1), @@ -449,7 +456,7 @@ static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG, Op.getOperand(0)); case ISD::FSUB: // We can't turn -(A-B) into B-A when we honor signed zeros. - assert(UnsafeFPMath); + assert(DAG.getTarget().Options.UnsafeFPMath); // fold (fneg (fsub 0, B)) -> B if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0))) @@ -462,10 +469,11 @@ static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG, case ISD::FMUL: case ISD::FDIV: - assert(!HonorSignDependentRoundingFPMath()); + assert(!DAG.getTarget().Options.HonorSignDependentRoundingFPMath()); // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) - if (isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1)) + if (isNegatibleForFree(Op.getOperand(0), LegalOperations, + &DAG.getTarget().Options, Depth+1)) return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(), GetNegatedExpression(Op.getOperand(0), DAG, LegalOperations, Depth+1), @@ -5254,20 +5262,22 @@ SDValue DAGCombiner::visitFADD(SDNode *N) { if (N0CFP && !N1CFP) return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N1, N0); // fold (fadd A, 0) -> A - if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero()) + if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && + N1CFP->getValueAPF().isZero()) return N0; // fold (fadd A, (fneg B)) -> (fsub A, B) - if (isNegatibleForFree(N1, LegalOperations) == 2) + if (isNegatibleForFree(N1, LegalOperations, &DAG.getTarget().Options) == 2) return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, GetNegatedExpression(N1, DAG, LegalOperations)); // fold (fadd (fneg A), B) -> (fsub B, A) - if (isNegatibleForFree(N0, LegalOperations) == 2) + if (isNegatibleForFree(N0, LegalOperations, &DAG.getTarget().Options) == 2) return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N1, GetNegatedExpression(N0, DAG, LegalOperations)); // If allowed, fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2)) - if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FADD && - N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1))) + if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && + N0.getOpcode() == ISD::FADD && N0.getNode()->hasOneUse() && + isa<ConstantFPSDNode>(N0.getOperand(1))) return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0.getOperand(0), DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0.getOperand(1), N1)); @@ -5292,17 +5302,19 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) { if (N0CFP && N1CFP && VT != MVT::ppcf128) return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, N1); // fold (fsub A, 0) -> A - if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero()) + if (DAG.getTarget().Options.UnsafeFPMath && + N1CFP && N1CFP->getValueAPF().isZero()) return N0; // fold (fsub 0, B) -> -B - if (UnsafeFPMath && N0CFP && N0CFP->getValueAPF().isZero()) { - if (isNegatibleForFree(N1, LegalOperations)) + if (DAG.getTarget().Options.UnsafeFPMath && + N0CFP && N0CFP->getValueAPF().isZero()) { + if (isNegatibleForFree(N1, LegalOperations, &DAG.getTarget().Options)) return GetNegatedExpression(N1, DAG, LegalOperations); if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N1); } // fold (fsub A, (fneg B)) -> (fadd A, B) - if (isNegatibleForFree(N1, LegalOperations)) + if (isNegatibleForFree(N1, LegalOperations, &DAG.getTarget().Options)) return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, GetNegatedExpression(N1, DAG, LegalOperations)); @@ -5329,10 +5341,12 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) { if (N0CFP && !N1CFP) return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N1, N0); // fold (fmul A, 0) -> 0 - if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero()) + if (DAG.getTarget().Options.UnsafeFPMath && + N1CFP && N1CFP->getValueAPF().isZero()) return N1; // fold (fmul A, 0) -> 0, vector edition. - if (UnsafeFPMath && ISD::isBuildVectorAllZeros(N1.getNode())) + if (DAG.getTarget().Options.UnsafeFPMath && + ISD::isBuildVectorAllZeros(N1.getNode())) return N1; // fold (fmul X, 2.0) -> (fadd X, X) if (N1CFP && N1CFP->isExactlyValue(+2.0)) @@ -5343,8 +5357,10 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) { return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N0); // fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y) - if (char LHSNeg = isNegatibleForFree(N0, LegalOperations)) { - if (char RHSNeg = isNegatibleForFree(N1, LegalOperations)) { + if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, + &DAG.getTarget().Options)) { + if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, + &DAG.getTarget().Options)) { // Both can be negated for free, check to see if at least one is cheaper // negated. if (LHSNeg == 2 || RHSNeg == 2) @@ -5355,7 +5371,8 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) { } // If allowed, fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2)) - if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FMUL && + if (DAG.getTarget().Options.UnsafeFPMath && + N1CFP && N0.getOpcode() == ISD::FMUL && N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1))) return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0.getOperand(0), DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, @@ -5383,8 +5400,10 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) { // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y) - if (char LHSNeg = isNegatibleForFree(N0, LegalOperations)) { - if (char RHSNeg = isNegatibleForFree(N1, LegalOperations)) { + if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, + &DAG.getTarget().Options)) { + if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, + &DAG.getTarget().Options)) { // Both can be negated for free, check to see if at least one is cheaper // negated. if (LHSNeg == 2 || RHSNeg == 2) @@ -5637,7 +5656,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) { SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); - if (isNegatibleForFree(N0, LegalOperations)) + if (isNegatibleForFree(N0, LegalOperations, &DAG.getTarget().Options)) return GetNegatedExpression(N0, DAG, LegalOperations); // Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index aa06955689..4487a9a5d2 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2334,7 +2334,7 @@ bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { // If we're told that NaNs won't happen, assume they won't. - if (NoNaNsFPMath) + if (getTarget().Options.NoNaNsFPMath) return true; // If the value is a constant, we can obviously see if it is a NaN or not. @@ -2607,7 +2607,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, break; case ISD::FNEG: // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 - if (UnsafeFPMath && OpOpcode == ISD::FSUB) + if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1), Operand.getNode()->getOperand(0)); if (OpOpcode == ISD::FNEG) // --X -> X @@ -2742,7 +2742,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, case ISD::FMUL: case ISD::FDIV: case ISD::FREM: - if (UnsafeFPMath) { + if (getTarget().Options.UnsafeFPMath) { if (Opcode == ISD::FADD) { // 0+x --> x if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) @@ -3065,7 +3065,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, case ISD::FMUL: case ISD::FDIV: case ISD::FREM: - if (UnsafeFPMath) + if (getTarget().Options.UnsafeFPMath) return N2; break; case ISD::MUL: diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index b3b653b9d2..a77401e359 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -1335,6 +1335,8 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond, Condition = getICmpCondCode(IC->getPredicate()); } else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) { Condition = getFCmpCondCode(FC->getPredicate()); + if (TM.Options.NoNaNsFPMath) + Condition = getFCmpCodeWithoutNaN(Condition); } else { Condition = ISD::SETEQ; // silence warning. llvm_unreachable("Unknown compare instruction"); @@ -2002,7 +2004,7 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR, } static inline bool areJTsAllowed(const TargetLowering &TLI) { - return !DisableJumpTables && + return !TLI.getTargetMachine().Options.DisableJumpTables && (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other)); } @@ -2625,6 +2627,8 @@ void SelectionDAGBuilder::visitFCmp(const User &I) { SDValue Op1 = getValue(I.getOperand(0)); SDValue Op2 = getValue(I.getOperand(1)); ISD::CondCode Condition = getFCmpCondCode(predicate); + if (TM.Options.NoNaNsFPMath) + Condition = getFCmpCodeWithoutNaN(Condition); EVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition)); } @@ -5059,7 +5063,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { } case Intrinsic::trap: { - StringRef TrapFuncName = getTrapFunctionName(); + StringRef TrapFuncName = TM.Options.getTrapFunctionName(); if (TrapFuncName.empty()) { DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot())); return 0; @@ -5221,7 +5225,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, // If there's a possibility that fast-isel has already selected some amount // of the current basic block, don't emit a tail call. - if (isTailCall && EnableFastISel) + if (isTailCall && TM.Options.EnableFastISel) isTailCall = false; std::pair<SDValue,SDValue> Result = @@ -6511,10 +6515,10 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) { /// isOnlyUsedInEntryBlock - If the specified argument is only used in the /// entry block, return true. This includes arguments used by switches, since /// the switch may expand into multiple basic blocks. -static bool isOnlyUsedInEntryBlock(const Argument *A) { +static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) { // With FastISel active, we may be splitting blocks, so force creation // of virtual registers for all non-dead arguments. - if (EnableFastISel) + if (FastISel) return A->use_empty(); const BasicBlock *Entry = A->getParent()->begin(); @@ -6704,7 +6708,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) { SDB->getCurDebugLoc()); SDB->setValue(I, Res); - if (!EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) { + if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) { if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(Res.getOperand(0).getNode())) if (FrameIndexSDNode *FI = @@ -6714,7 +6718,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) { // If this argument is live outside of the entry block, insert a copy from // wherever we got it to the vreg that other BB's will reference it as. - if (!EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) { + if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) { // If we can, though, try to skip creating an unnecessary vreg. // FIXME: This isn't very clean... it would be nice to make this more // general. It's also subtly incompatible with the hacks FastISel @@ -6725,7 +6729,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) { continue; } } - if (!isOnlyUsedInEntryBlock(I)) { + if (!isOnlyUsedInEntryBlock(I, TM.Options.EnableFastISel)) { FuncInfo->InitializeRegForValue(I); SDB->CopyToExportRegsIfNeeded(I); } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 8cecc17d12..b74142dc80 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -256,9 +256,9 @@ static void SplitCriticalSideEffectEdges(Function &Fn, Pass *SDISel) { bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { // Do some sanity-checking on the command-line options. - assert((!EnableFastISelVerbose || EnableFastISel) && + assert((!EnableFastISelVerbose || TM.Options.EnableFastISel) && "-fast-isel-verbose requires -fast-isel"); - assert((!EnableFastISelAbort || EnableFastISel) && + assert((!EnableFastISelAbort || TM.Options.EnableFastISel) && "-fast-isel-abort requires -fast-isel"); const Function &Fn = *mf.getFunction(); @@ -823,7 +823,7 @@ static bool isFoldedOrDeadInstruction(const Instruction *I, void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { // Initialize the Fast-ISel state, if needed. FastISel *FastIS = 0; - if (EnableFastISel) + if (TM.Options.EnableFastISel) FastIS = TLI.createFastISel(*FuncInfo); // Iterate over all basic blocks in the function. diff --git a/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp b/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp index 2e9096883b..abb70fb707 100644 --- a/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp +++ b/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp @@ -115,7 +115,7 @@ std::string JITDebugRegisterer::MakeELF(const Function *F, DebugInfo &I) { // When trying to debug why GDB isn't getting the debug info right, it's // awfully helpful to write the object file to disk so that it can be // inspected with readelf and objdump. - if (JITEmitDebugInfoToDisk) { + if (TM.Options.JITEmitDebugInfoToDisk) { std::string Filename; raw_string_ostream O2(Filename); O2 << "/tmp/llvm_function_" << I.FnStart << "_" << F->getName() << ".o"; diff --git a/lib/ExecutionEngine/JIT/JITEmitter.cpp b/lib/ExecutionEngine/JIT/JITEmitter.cpp index 24020ee6d6..d9fa509afb 100644 --- a/lib/ExecutionEngine/JIT/JITEmitter.cpp +++ b/lib/ExecutionEngine/JIT/JITEmitter.cpp @@ -362,10 +362,16 @@ namespace { /// Instance of the JIT JIT *TheJIT; + bool JITExceptionHandling; + + bool JITEmitDebugInfo; + public: JITEmitter(JIT &jit, JITMemoryManager *JMM, TargetMachine &TM) : SizeEstimate(0), Resolver(jit, *this), MMI(0), CurFn(0), - EmittedFunctions(this), TheJIT(&jit) { + EmittedFunctions(this), TheJIT(&jit), + JITExceptionHandling(TM.Options.JITExceptionHandling), + JITEmitDebugInfo(TM.Options.JITEmitDebugInfo) { MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager(); if (jit.getJITInfo().needsGOT()) { MemMgr->AllocateGOT(); @@ -1037,7 +1043,7 @@ void JITEmitter::deallocateMemForFunction(const Function *F) { EmittedFunctions.erase(Emitted); } - if(JITExceptionHandling) { + if (JITExceptionHandling) { TheJIT->DeregisterTable(F); } @@ -1047,7 +1053,7 @@ void JITEmitter::deallocateMemForFunction(const Function *F) { } -void* JITEmitter::allocateSpace(uintptr_t Size, unsigned Alignment) { +void *JITEmitter::allocateSpace(uintptr_t Size, unsigned Alignment) { if (BufferBegin) return JITCodeEmitter::allocateSpace(Size, Alignment); @@ -1059,7 +1065,7 @@ void* JITEmitter::allocateSpace(uintptr_t Size, unsigned Alignment) { return CurBufferPtr; } -void* JITEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) { +void *JITEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) { // Delegate this call through the memory manager. return MemMgr->allocateGlobal(Size, Alignment); } diff --git a/lib/ExecutionEngine/TargetSelect.cpp b/lib/ExecutionEngine/TargetSelect.cpp index cf2d9ff561..9a0d41d48e 100644 --- a/lib/ExecutionEngine/TargetSelect.cpp +++ b/lib/ExecutionEngine/TargetSelect.cpp @@ -86,8 +86,10 @@ TargetMachine *EngineBuilder::selectTarget(Module *Mod, } // Allocate a target... + TargetOptions Options; TargetMachine *Target = TheTarget->createTargetMachine(TheTriple.getTriple(), MCPU, FeaturesStr, + Options, RM, CM, OL); assert(Target && "Could not allocate target machine!"); return Target; diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp index bbca228a0c..eca6039abf 100644 --- a/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/ARMAsmPrinter.cpp @@ -739,14 +739,14 @@ void ARMAsmPrinter::emitAttributes() { } // Signal various FP modes. - if (!UnsafeFPMath) { + if (!TM.Options.UnsafeFPMath) { AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_denormal, ARMBuildAttrs::Allowed); AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_exceptions, ARMBuildAttrs::Allowed); } - if (NoInfsFPMath && NoNaNsFPMath) + if (TM.Options.NoInfsFPMath && TM.Options.NoNaNsFPMath) AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_number_model, ARMBuildAttrs::Allowed); else @@ -759,7 +759,7 @@ void ARMAsmPrinter::emitAttributes() { AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_align8_preserved, 1); // Hard float. Use both S and D registers and conform to AAPCS-VFP. - if (Subtarget->isAAPCS_ABI() && FloatABIType == FloatABI::Hard) { + if (Subtarget->isAAPCS_ABI() && TM.Options.FloatABIType == FloatABI::Hard) { AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_HardFP_use, 3); AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_VFP_args, 1); } @@ -1934,4 +1934,3 @@ extern "C" void LLVMInitializeARMAsmPrinter() { RegisterAsmPrinter<ARMAsmPrinter> X(TheARMTarget); RegisterAsmPrinter<ARMAsmPrinter> Y(TheThumbTarget); } - diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 7c42342229..8ee6ce29d9 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -631,7 +631,7 @@ bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { // 1. Dynamic stack realignment is explicitly disabled, // 2. This is a Thumb1 function (it's not useful, so we don't bother), or // 3. There are VLAs in the function and the base pointer is disabled. - return (RealignStack && !AFI->isThumb1OnlyFunction() && + return (MF.getTarget().Options.RealignStack && !AFI->isThumb1OnlyFunction() && (!MFI->hasVarSizedObjects() || EnableBasePointer)); } @@ -649,7 +649,7 @@ needsStackRealignment(const MachineFunction &MF) const { bool ARMBaseRegisterInfo:: cannotEliminateFrame(const MachineFunction &MF) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); - if (DisableFramePointerElim(MF) && MFI->adjustsStack()) + if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI->adjustsStack()) return true; return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken() || needsStackRealignment(MF); diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 086eeb9ebc..f43f084c03 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -1716,7 +1716,7 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { // Use target triple & subtarget features to do actual dispatch. if (Subtarget->isAAPCS_ABI()) { if (Subtarget->hasVFP2() && - FloatABIType == FloatABI::Hard) + TM.Options.FloatABIType == FloatABI::Hard) return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); else return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp index 2d1de6fe8e..f2eacdc632 100644 --- a/lib/Target/ARM/ARMFrameLowering.cpp +++ b/lib/Target/ARM/ARMFrameLowering.cpp @@ -37,7 +37,8 @@ bool ARMFrameLowering::hasFP(const MachineFunction &MF) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); // Always eliminate non-leaf frame pointers. - return ((DisableFramePointerElim(MF) && MFI->hasCalls()) || + return ((MF.getTarget().Options.DisableFramePointerElim(MF) && + MFI->hasCalls()) || RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 9b996460a7..e9f70d5ace 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -432,7 +432,8 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); else addRegisterClass(MVT::i32, ARM::GPRRegisterClass); - if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { + if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && + !Subtarget->isThumb1Only()) { addRegisterClass(MVT::f32, ARM::SPRRegisterClass); if (!Subtarget->isFPOnlySP()) addRegisterClass(MVT::f64, ARM::DPRRegisterClass); @@ -674,7 +675,8 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) } setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { + if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && + !Subtarget->isThumb1Only()) { // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR // iff target supports vfp2. setOperationAction(ISD::BITCAST, MVT::i64, Custom); @@ -712,7 +714,8 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setOperationAction(ISD::FCOS, MVT::f64, Expand); setOperationAction(ISD::FREM, MVT::f64, Expand); setOperationAction(ISD::FREM, MVT::f32, Expand); - if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { + if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && + !Subtarget->isThumb1Only()) { setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); } @@ -723,7 +726,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setOperationAction(ISD::FMA, MVT::f32, Expand); // Various VFP goodness - if (!UseSoftFloat && !Subtarget->isThumb1Only()) { + if (!TM.Options.UseSoftFloat && !Subtarget->isThumb1Only()) { // int <-> fp are custom expanded into bit_convert + ARMISD ops. if (Subtarget->hasVFP2()) { setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); @@ -751,7 +754,8 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setStackPointerRegisterToSaveRestore(ARM::SP); - if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) + if (TM.Options.UseSoftFloat || Subtarget->isThumb1Only() || + !Subtarget->hasVFP2()) setSchedulingPreference(Sched::RegPressure); else setSchedulingPreference(Sched::Hybrid); @@ -1092,7 +1096,8 @@ CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, if (!Subtarget->isAAPCS_ABI()) return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); else if (Subtarget->hasVFP2() && - FloatABIType == FloatABI::Hard && !isVarArg) + getTargetMachine().Options.FloatABIType == FloatABI::Hard && + !isVarArg) return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); } @@ -2951,7 +2956,7 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); - if (UnsafeFPMath && + if (getTargetMachine().Options.UnsafeFPMath && (CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETNE || CC == ISD::SETUNE)) { SDValue Result = OptimizeVFPBrcond(Op, DAG); @@ -7948,7 +7953,7 @@ static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, // will return -0, so vmin can only be used for unsafe math or if one of // the operands is known to be nonzero. if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && - !UnsafeFPMath && + !DAG.getTarget().Options.UnsafeFPMath && !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) break; Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; @@ -7970,7 +7975,7 @@ static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, // will return +0, so vmax can only be used for unsafe math or if one of // the operands is known to be nonzero. if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && - !UnsafeFPMath && + !DAG.getTarget().Options.UnsafeFPMath && !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) break; Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp index 6cbb24b4bd..963cc472f3 100644 --- a/lib/Target/ARM/ARMTargetMachine.cpp +++ b/lib/Target/ARM/ARMTargetMachine.cpp @@ -38,22 +38,25 @@ extern "C" void LLVMInitializeARMTarget() { /// ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : LLVMTargetMachine(T, TT, CPU, FS, RM, CM, OL), + : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS), JITInfo(), InstrItins(Subtarget.getInstrItineraryData()) { // Default to soft float ABI - if (FloatABIType == FloatABI::Default) - FloatABIType = FloatABI::Soft; + if (Options.FloatABIType == FloatABI::Default) + this->Options.FloatABIType = FloatABI::Soft; } ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : ARMBaseTargetMachine(T, TT, CPU, FS, RM, CM, OL), InstrInfo(Subtarget), + : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), + InstrInfo(Subtarget), DataLayout(Subtarget.isAPCS_ABI() ? std::string("e-p:32:32-f64:32:64-i64:32:64-" "v128:32:128-v64:32:64-n32-S32") : @@ -73,9 +76,10 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT, ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : ARMBaseTargetMachine(T, TT, CPU, FS, RM, CM, OL), + : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), InstrInfo(Subtarget.hasThumb2() ? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget)) : ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))), diff --git a/lib/Target/ARM/ARMTargetMachine.h b/lib/Target/ARM/ARMTargetMachine.h index a1f517b0f4..cd77822a33 100644 --- a/lib/Target/ARM/ARMTargetMachine.h +++ b/lib/Target/ARM/ARMTargetMachine.h @@ -41,6 +41,7 @@ private: public: ARMBaseTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); @@ -71,6 +72,7 @@ class ARMTargetMachine : public ARMBaseTargetMachine { public: ARMTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); @@ -112,6 +114,7 @@ class ThumbTargetMachine : public ARMBaseTargetMachine { public: ThumbTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); diff --git a/lib/Target/CBackend/CTargetMachine.h b/lib/Target/CBackend/CTargetMachine.h index ca346af539..8b2286ed6b 100644 --- a/lib/Target/CBackend/CTargetMachine.h +++ b/lib/Target/CBackend/CTargetMachine.h @@ -21,10 +21,10 @@ namespace llvm { struct CTargetMachine : public TargetMachine { CTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : TargetMachine(T, TT, CPU, FS) {} + : TargetMachine(T, TT, CPU, FS, Options) { } virtual bool addPassesToEmitFile(PassManagerBase &PM, formatted_raw_ostream &Out, diff --git a/lib/Target/CellSPU/SPUFrameLowering.cpp b/lib/Target/CellSPU/SPUFrameLowering.cpp index 093f99f287..916f9bad37 100644 --- a/lib/Target/CellSPU/SPUFrameLowering.cpp +++ b/lib/Target/CellSPU/SPUFrameLowering.cpp @@ -47,7 +47,8 @@ bool SPUFrameLowering::hasFP(const MachineFunction &MF) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); return MFI->getStackSize() && - (DisableFramePointerElim(MF) || MFI->hasVarSizedObjects()); + (MF.getTarget().Options.DisableFramePointerElim(MF) || + MFI->hasVarSizedObjects()); } diff --git a/lib/Target/CellSPU/SPUTargetMachine.cpp b/lib/Target/CellSPU/SPUTargetMachine.cpp index 69403160ac..1e922a4efd 100644 --- a/lib/Target/CellSPU/SPUTargetMachine.cpp +++ b/lib/Target/CellSPU/SPUTargetMachine.cpp @@ -34,9 +34,10 @@ SPUFrameLowering::getCalleeSaveSpillSlots(unsigned &NumEntries) const { SPUTargetMachine::SPUTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : LLVMTargetMachine(T, TT, CPU, FS, RM, CM, OL), + : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS), DataLayout(Subtarget.getTargetDataString()), InstrInfo(*this), diff --git a/lib/Target/CellSPU/SPUTargetMachine.h b/lib/Target/CellSPU/SPUTargetMachine.h index 909f12e4ff..0841feef32 100644 --- a/lib/Target/CellSPU/SPUTargetMachine.h +++ b/lib/Target/CellSPU/SPUTargetMachine.h @@ -39,7 +39,7 @@ class SPUTargetMachine : public LLVMTargetMachine { InstrItineraryData InstrItins; public: SPUTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); diff --git a/lib/Target/CppBackend/CPPTargetMachine.h b/lib/Target/CppBackend/CPPTargetMachine.h index a3613b40bd..92bca6c3c7 100644 --- a/lib/Target/CppBackend/CPPTargetMachine.h +++ b/lib/Target/CppBackend/CPPTargetMachine.h @@ -23,10 +23,10 @@ class formatted_raw_ostream; struct CPPTargetMachine : public TargetMachine { CPPTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : TargetMachine(T, TT, CPU, FS) {} + : TargetMachine(T, TT, CPU, FS, Options) {} virtual bool addPassesToEmitFile(PassManagerBase &PM, formatted_raw_ostream &Out, diff --git a/lib/Target/MBlaze/MBlazeFrameLowering.cpp b/lib/Target/MBlaze/MBlazeFrameLowering.cpp index fc3cd0204e..37919bce27 100644 --- a/lib/Target/MBlaze/MBlazeFrameLowering.cpp +++ b/lib/Target/MBlaze/MBlazeFrameLowering.cpp @@ -334,7 +334,8 @@ int MBlazeFrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) // if frame pointer elimination is disabled. bool MBlazeFrameLowering::hasFP(const MachineFunction &MF) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); - return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects(); + return MF.getTarget().Options.DisableFramePointerElim(MF) || + MFI->hasVarSizedObjects(); } void MBlazeFrameLowering::emitPrologue(MachineFunction &MF) const { diff --git a/lib/Target/MBlaze/MBlazeTargetMachine.cpp b/lib/Target/MBlaze/MBlazeTargetMachine.cpp index 4ad7bd6343..5ed81dd28b 100644 --- a/lib/Target/MBlaze/MBlazeTargetMachine.cpp +++ b/lib/Target/MBlaze/MBlazeTargetMachine.cpp @@ -33,16 +33,16 @@ extern "C" void LLVMInitializeMBlazeTarget() { // an easier handling. MBlazeTargetMachine:: MBlazeTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL): - LLVMTargetMachine(T, TT, CPU, FS, RM, CM, OL), - Subtarget(TT, CPU, FS), - DataLayout("E-p:32:32:32-i8:8:8-i16:16:16"), - InstrInfo(*this), - FrameLowering(Subtarget), - TLInfo(*this), TSInfo(*this), ELFWriterInfo(*this), - InstrItins(Subtarget.getInstrItineraryData()) { + CodeGenOpt::Level OL) + : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), + Subtarget(TT, CPU, FS), + DataLayout("E-p:32:32:32-i8:8:8-i16:16:16"), + InstrInfo(*this), + FrameLowering(Subtarget), + TLInfo(*this), TSInfo(*this), ELFWriterInfo(*this), + InstrItins(Subtarget.getInstrItineraryData()) { } // Install an instruction selector pass using diff --git a/lib/Target/MBlaze/MBlazeTargetMachine.h b/lib/Target/MBlaze/MBlazeTargetMachine.h index 1c1aa530f9..036f1b6cf5 100644 --- a/lib/Target/MBlaze/MBlazeTargetMachine.h +++ b/lib/Target/MBlaze/MBlazeTargetMachine.h @@ -43,6 +43,7 @@ namespace llvm { public: MBlazeTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); diff --git a/lib/Target/MSP430/MSP430FrameLowering.cpp b/lib/Target/MSP430/MSP430FrameLowering.cpp index c99f4ab6c2..3fc7c10c36 100644 --- a/lib/Target/MSP430/MSP430FrameLowering.cpp +++ b/lib/Target/MSP430/MSP430FrameLowering.cpp @@ -29,7 +29,7 @@ using namespace llvm; bool MSP430FrameLowering::hasFP(const MachineFunction &MF) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); - return (DisableFramePointerElim(MF) || + return (MF.getTarget().Options.DisableFramePointerElim(MF) || MF.getFrameInfo()->hasVarSizedObjects() || MFI->isFrameAddressTaken()); } diff --git a/lib/Target/MSP430/MSP430TargetMachine.cpp b/lib/Target/MSP430/MSP430TargetMachine.cpp index fe185fb4ea..a0fc3daa3c 100644 --- a/lib/Target/MSP430/MSP430TargetMachine.cpp +++ b/lib/Target/MSP430/MSP430TargetMachine.cpp @@ -28,9 +28,10 @@ MSP430TargetMachine::MSP430TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : LLVMTargetMachine(T, TT, CPU, FS, RM, CM, OL), + : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS), // FIXME: Check TargetData string. DataLayout("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"), diff --git a/lib/Target/MSP430/MSP430TargetMachine.h b/lib/Target/MSP430/MSP430TargetMachine.h index 4fb060f793..28d482a28f 100644 --- a/lib/Target/MSP430/MSP430TargetMachine.h +++ b/lib/Target/MSP430/MSP430TargetMachine.h @@ -39,7 +39,7 @@ class MSP430TargetMachine : public LLVMTargetMachine { public: MSP430TargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); diff --git a/lib/Target/Mips/MipsFrameLowering.cpp b/lib/Target/Mips/MipsFrameLowering.cpp index 36aef99eaf..246654580e 100644 --- a/lib/Target/Mips/MipsFrameLowering.cpp +++ b/lib/Target/Mips/MipsFrameLowering.cpp @@ -85,8 +85,8 @@ using namespace llvm; // if frame pointer elimination is disabled. bool MipsFrameLowering::hasFP(const MachineFunction &MF) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); - return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects() - || MFI->isFrameAddressTaken(); + return MF.getTarget().Options.DisableFramePointerElim(MF) || + MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken(); } bool MipsFrameLowering::targetHandlesStackFrameRounding() const { diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp index 5d6b24f434..02887fa9a4 100644 --- a/lib/Target/Mips/MipsTargetMachine.cpp +++ b/lib/Target/Mips/MipsTargetMachine.cpp @@ -34,51 +34,51 @@ extern "C" void LLVMInitializeMipsTarget() { // Using CodeModel::Large enables different CALL behavior. MipsTargetMachine:: MipsTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL, - bool isLittle): - LLVMTargetMachine(T, TT, CPU, FS, RM, CM, OL), - Subtarget(TT, CPU, FS, isLittle), - DataLayout(isLittle ? - (Subtarget.isABI_N64() ? - "e-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" : - "e-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32") : - (Subtarget.isABI_N64() ? - "E-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" : - "E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32")), - InstrInfo(*this), - FrameLowering(Subtarget), - TLInfo(*this), TSInfo(*this), JITInfo() { + bool isLittle) + : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), + Subtarget(TT, CPU, FS, isLittle), + DataLayout(isLittle ? + (Subtarget.isABI_N64() ? + "e-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" : + "e-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32") : + (Subtarget.isABI_N64() ? + "E-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" : + "E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32")), + InstrInfo(*this), + FrameLowering(Subtarget), + TLInfo(*this), TSInfo(*this), JITInfo() { } MipsebTargetMachine:: MipsebTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL) : - MipsTargetMachine(T, TT, CPU, FS, RM, CM, OL, false) {} + CodeGenOpt::Level OL) + : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} MipselTargetMachine:: MipselTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL) : - MipsTargetMachine(T, TT, CPU, FS, RM, CM, OL, true) {} + CodeGenOpt::Level OL) + : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} Mips64ebTargetMachine:: Mips64ebTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL) : - MipsTargetMachine(T, TT, CPU, FS, RM, CM, OL, false) {} + CodeGenOpt::Level OL) + : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} Mips64elTargetMachine:: Mips64elTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL) : - MipsTargetMachine(T, TT, CPU, FS, RM, CM, OL, true) {} + CodeGenOpt::Level OL) + : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} // Install an instruction selector pass using // the ISelDag to gen Mips code. @@ -120,4 +120,3 @@ bool MipsTargetMachine::addCodeEmitter(PassManagerBase &PM, PM.add(createMipsJITCodeEmitterPass(*this, JCE)); return false; } - diff --git a/lib/Target/Mips/MipsTargetMachine.h b/lib/Target/Mips/MipsTargetMachine.h index e40d9e256d..6842373f15 100644 --- a/lib/Target/Mips/MipsTargetMachine.h +++ b/lib/Target/Mips/MipsTargetMachine.h @@ -38,7 +38,7 @@ namespace llvm { public: MipsTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle); @@ -82,7 +82,7 @@ namespace llvm { class MipsebTargetMachine : public MipsTargetMachine { public: MipsebTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); }; @@ -92,7 +92,7 @@ public: class MipselTargetMachine : public MipsTargetMachine { public: MipselTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); }; @@ -103,6 +103,7 @@ class Mips64ebTargetMachine : public MipsTargetMachine { public: Mips64ebTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); }; @@ -113,6 +114,7 @@ class Mips64elTargetMachine : public MipsTargetMachine { public: Mips64elTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); }; diff --git a/lib/Target/PTX/PTXTargetMachine.cpp b/lib/Target/PTX/PTXTargetMachine.cpp index 292ea5e085..4efdc2784d 100644 --- a/lib/Target/PTX/PTXTargetMachine.cpp +++ b/lib/Target/PTX/PTXTargetMachine.cpp @@ -67,30 +67,16 @@ namespace { "e-p:32:32-i64:32:32-f64:32:32-v128:32:128-v64:32:64-n32:64"; const char* DataLayout64 = "e-p:64:64-i64:32:32-f64:32:32-v128:32:128-v64:32:64-n32:64"; - - // Copied from LLVMTargetMachine.cpp - void printNoVerify(PassManagerBase &PM, const char *Banner) { - if (PrintMachineCode) - PM.add(createMachineFunctionPrinterPass(dbgs(), Banner)); - } - - void printAndVerify(PassManagerBase &PM, - const char *Banner) { - if (PrintMachineCode) - PM.add(createMachineFunctionPrinterPass(dbgs(), Banner)); - - //if (VerifyMachineCode) - // PM.add(createMachineVerifierPass(Banner)); - } } // DataLayout and FrameLowering are filled with dummy data PTXTargetMachine::PTXTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL, bool is64Bit) - : LLVMTargetMachine(T, TT, CPU, FS, RM, CM, OL), + : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), DataLayout(is64Bit ? DataLayout64 : DataLayout32), Subtarget(TT, CPU, FS, is64Bit), FrameLowering(Subtarget), @@ -101,16 +87,18 @@ PTXTargetMachine::PTXTargetMachine(const Target &T, PTX32TargetMachine::PTX32TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : PTXTargetMachine(T, TT, CPU, FS, RM, CM, OL, false) { + : PTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) { } PTX64TargetMachine::PTX64TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : PTXTargetMachine(T, TT, CPU, FS, RM, CM, OL, true) { + : PTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) { } bool PTXTargetMachine::addInstSelector(PassManagerBase &PM) { diff --git a/lib/Target/PTX/PTXTargetMachine.h b/lib/Target/PTX/PTXTargetMachine.h index 19f6c0fdd0..22911f76a1 100644 --- a/lib/Target/PTX/PTXTargetMachine.h +++ b/lib/Target/PTX/PTXTargetMachine.h @@ -35,7 +35,7 @@ class PTXTargetMachine : public LLVMTargetMachine { public: PTXTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL, bool is64Bit); @@ -94,7 +94,7 @@ class PTX32TargetMachine : public PTXTargetMachine { public: PTX32TargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); }; // class PTX32TargetMachine @@ -103,7 +103,7 @@ class PTX64TargetMachine : public PTXTargetMachine { public: PTX64TargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); }; // class PTX32TargetMachine diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp index 0b85fea657..ec4231e6ff 100644 --- a/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -244,8 +244,10 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const { if (MF.getFunction()->hasFnAttr(Attribute::Naked)) return false; - return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects() || - (GuaranteedTailCallOpt && MF.getInfo<PPCFunctionInfo>()->hasFastCall()); + return MF.getTarget().Options.DisableFramePointerElim(MF) || + MFI->hasVarSizedObjects() || + (MF.getTarget().Options.GuaranteedTailCallOpt && + MF.getInfo<PPCFunctionInfo>()->hasFastCall()); } @@ -655,7 +657,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, // Callee pop calling convention. Pop parameter/linkage area. Used for tail // call optimization - if (GuaranteedTailCallOpt && RetOpcode == PPC::BLR && + if (MF.getTarget().Options.GuaranteedTailCallOpt && RetOpcode == PPC::BLR && MF.getFunction()->getCallingConv() == CallingConv::Fast) { PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); unsigned CallerAllocatedAmt = FI->getMinReservedArea(); @@ -758,7 +760,8 @@ PPCFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // Reserve stack space to move the linkage area to in case of a tail call. int TCSPDelta = 0; - if (GuaranteedTailCallOpt && (TCSPDelta = FI->getTailCallSPDelta()) < 0) { + if (MF.getTarget().Options.GuaranteedTailCallOpt && + (TCSPDelta = FI->getTailCallSPDelta()) < 0) { MFI->CreateFixedObject(-1 * TCSPDelta, TCSPDelta, true); } @@ -863,7 +866,8 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF) // Take into account stack space reserved for tail calls. int TCSPDelta = 0; - if (GuaranteedTailCallOpt && (TCSPDelta = PFI->getTailCallSPDelta()) < 0) { + if (MF.getTarget().Options.GuaranteedTailCallOpt && + (TCSPDelta = PFI->getTailCallSPDelta()) < 0) { LowerBound = TCSPDelta; } diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 36d5c415f0..067daf7d4f 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1667,7 +1667,8 @@ PPCTargetLowering::LowerFormalArguments_SVR4( EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Potential tail calls could cause overwriting of argument stack slots. - bool isImmutable = !(GuaranteedTailCallOpt && (CallConv==CallingConv::Fast)); + bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && + (CallConv == CallingConv::Fast)); unsigned PtrByteSize = 4; // Assign locations to all of the incoming arguments. @@ -1857,7 +1858,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin( EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); bool isPPC64 = PtrVT == MVT::i64; // Potential tail calls could cause overwriting of argument stack slots. - bool isImmutable = !(GuaranteedTailCallOpt && (CallConv==CallingConv::Fast)); + bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && + (CallConv == CallingConv::Fast)); unsigned PtrByteSize = isPPC64 ? 8 : 4; unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); @@ -2263,9 +2265,9 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); // Tail call needs the stack to be aligned. - if (CC==CallingConv::Fast && GuaranteedTailCallOpt) { - unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()-> - getStackAlignment(); + if (CC == CallingConv::Fast && DAG.getTarget().Options.GuaranteedTailCallOpt){ + unsigned TargetAlign = DAG.getMachineFunction().getTarget(). + getFrameLowering()->getStackAlignment(); unsigned AlignMask = TargetAlign-1; NumBytes = (NumBytes + AlignMask) & ~AlignMask; } @@ -2299,7 +2301,7 @@ PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG& DAG) const { - if (!GuaranteedTailCallOpt) + if (!getTargetMachine().Options.GuaranteedTailCallOpt) return false; // Variable argument functions are not supported. @@ -2752,7 +2754,8 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl, // the stack. Account for this here so these bytes can be pushed back on in // PPCRegisterInfo::eliminateCallFramePseudoInstr. int BytesCalleePops = - (CallConv==CallingConv::Fast && GuaranteedTailCallOpt) ? NumBytes : 0; + (CallConv == CallingConv::Fast && + getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; if (InFlag.getNode()) Ops.push_back(InFlag); @@ -2868,7 +2871,8 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee, // and restoring the callers stack pointer in this functions epilog. This is // done because by tail calling the called function might overwrite the value // in this function's (MF) stack pointer stack slot 0(SP). - if (GuaranteedTailCallOpt && CallConv==CallingConv::Fast) + if (getTargetMachine().Options.GuaranteedTailCallOpt && + CallConv == CallingConv::Fast) MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); // Count how many bytes are to be pushed on the stack, including the linkage @@ -3075,7 +3079,8 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, // and restoring the callers stack pointer in this functions epilog. This is // done because by tail calling the called function might overwrite the value // in this function's (MF) stack pointer stack slot 0(SP). - if (GuaranteedTailCallOpt && CallConv==CallingConv::Fast) + if (getTargetMachine().Options.GuaranteedTailCallOpt && + CallConv == CallingConv::Fast) MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); unsigned nAltivecParamsAtEnd = 0; @@ -5754,7 +5759,8 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); MFI->setFrameAddressIsTaken(true); - bool is31 = (DisableFramePointerElim(MF) || MFI->hasVarSizedObjects()) && + bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) || + MFI->hasVarSizedObjects()) && MFI->getStackSize() && !MF.getFunction()->hasFnAttr(Attribute::Naked); unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) : diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td index 17f63e02ff..74a42ba8c4 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.td +++ b/lib/Target/PowerPC/PPCInstrInfo.td @@ -349,7 +349,7 @@ def iaddroff : ComplexPattern<iPTR, 1, "SelectAddrImmOffs", [], []>; //===----------------------------------------------------------------------===// // PowerPC Instruction Predicate Definitions. -def FPContractions : Predicate<"!NoExcessFPPrecision">; +def FPContractions : Predicate<"!TM.Options.NoExcessFPPrecision">; def In32BitMode : Predicate<"!PPCSubTarget.isPPC64()">; def In64BitMode : Predicate<"PPCSubTarget.isPPC64()">; def IsBookE : Predicate<"PPCSubTarget.isBookE()">; diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp index 3ba9260be9..47ab442ce3 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -301,7 +301,8 @@ PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, void PPCRegisterInfo:: eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const { - if (GuaranteedTailCallOpt && I->getOpcode() == PPC::ADJCALLSTACKUP) { + if (MF.getTarget().Options.GuaranteedTailCallOpt && + I->getOpcode() == PPC::ADJCALLSTACKUP) { // Add (actually subtract) back the amount the callee popped on return. if (int CalleeAmt = I->getOperand(1).getImm()) { bool is64Bit = Subtarget.isPPC64(); diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp index de8fca0777..8e71c46717 100644 --- a/lib/Target/PowerPC/PPCTargetMachine.cpp +++ b/lib/Target/PowerPC/PPCTargetMachine.cpp @@ -28,10 +28,11 @@ extern "C" void LLVMInitializePowerPCTarget() { PPCTargetMachine::PPCTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL, bool is64Bit) - : LLVMTargetMachine(T, TT, CPU, FS, RM, CM, OL), + : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS, is64Bit), DataLayout(Subtarget.getTargetDataString()), InstrInfo(*this), FrameLowering(Subtarget), JITInfo(*this, is64Bit), @@ -45,17 +46,19 @@ bool PPCTargetMachine::getEnableTailMergeDefault() const { return false; } PPC32TargetMachine::PPC32TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : PPCTargetMachine(T, TT, CPU, FS, RM, CM, OL, false) { + : PPCTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) { } PPC64TargetMachine::PPC64TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : PPCTargetMachine(T, TT, CPU, FS, RM, CM, OL, true) { + : PPCTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) { } @@ -81,7 +84,7 @@ bool PPCTargetMachine::addCodeEmitter(PassManagerBase &PM, if (Subtarget.isPPC64()) // Temporary workaround for the inability of PPC64 JIT to handle jump // tables. - DisableJumpTables = true; + Options.DisableJumpTables = true; // Inform the subtarget that we are in JIT mode. FIXME: does this break macho // writing? diff --git a/lib/Target/PowerPC/PPCTargetMachine.h b/lib/Target/PowerPC/PPCTargetMachine.h index 03b27c6ae0..042787659d 100644 --- a/lib/Target/PowerPC/PPCTargetMachine.h +++ b/lib/Target/PowerPC/PPCTargetMachine.h @@ -41,7 +41,7 @@ class PPCTargetMachine : public LLVMTargetMachine { public: PPCTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL, bool is64Bit); @@ -79,7 +79,7 @@ public: class PPC32TargetMachine : public PPCTargetMachine { public: PPC32TargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); }; @@ -89,7 +89,7 @@ public: class PPC64TargetMachine : public PPCTargetMachine { public: PPC64TargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); }; diff --git a/lib/Target/Sparc/SparcTargetMachine.cpp b/lib/Target/Sparc/SparcTargetMachine.cpp index 7dff79941d..8e16fd7948 100644 --- a/lib/Target/Sparc/SparcTargetMachine.cpp +++ b/lib/Target/Sparc/SparcTargetMachine.cpp @@ -26,10 +26,11 @@ extern "C" void LLVMInitializeSparcTarget() { /// SparcTargetMachine::SparcTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL, bool is64bit) - : LLVMTargetMachine(T, TT, CPU, FS, RM, CM, OL), + : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS, is64bit), DataLayout(Subtarget.getDataLayout()), TLInfo(*this), TSInfo(*this), InstrInfo(Subtarget), @@ -52,16 +53,20 @@ bool SparcTargetMachine::addPreEmitPass(PassManagerBase &PM){ SparcV8TargetMachine::SparcV8TargetMachine(const Target &T, StringRef TT, StringRef CPU, - StringRef FS, Reloc::Model RM, + StringRef FS, + const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : SparcTargetMachine(T, TT, CPU, FS, RM, CM, OL, false) { + : SparcTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) { } SparcV9TargetMachine::SparcV9TargetMachine(const Target &T, StringRef TT, StringRef CPU, - StringRef FS, Reloc::Model RM, + StringRef FS, + const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : SparcTargetMachine(T, TT, CPU, FS, RM, CM, OL, true) { + : SparcTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) { } diff --git a/lib/Target/Sparc/SparcTargetMachine.h b/lib/Target/Sparc/SparcTargetMachine.h index 63bfa5d36c..cedc1e33de 100644 --- a/lib/Target/Sparc/SparcTargetMachine.h +++ b/lib/Target/Sparc/SparcTargetMachine.h @@ -34,9 +34,9 @@ class SparcTargetMachine : public LLVMTargetMachine { SparcFrameLowering FrameLowering; public: SparcTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL, bool is64bit); + CodeGenOpt::Level OL, bool is64bit); virtual const SparcInstrInfo *getInstrInfo() const { return &InstrInfo; } virtual const TargetFrameLowering *getFrameLowering() const { @@ -65,6 +65,7 @@ class SparcV8TargetMachine : public SparcTargetMachine { public: SparcV8TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); }; @@ -75,6 +76,7 @@ class SparcV9TargetMachine : public SparcTargetMachine { public: SparcV9TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); }; diff --git a/lib/Target/TargetLoweringObjectFile.cpp b/lib/Target/TargetLoweringObjectFile.cpp index 56b7b69de0..fc8b67b001 100644 --- a/lib/Target/TargetLoweringObjectFile.cpp +++ b/lib/Target/TargetLoweringObjectFile.cpp @@ -48,7 +48,7 @@ void TargetLoweringObjectFile::Initialize(MCContext &ctx, TargetLoweringObjectFile::~TargetLoweringObjectFile() { } -static bool isSuitableForBSS(const GlobalVariable *GV) { +static bool isSuitableForBSS(const GlobalVariable *GV, bool NoZerosInBSS) { const Constant *C = GV->getInitializer(); // Must have zero initializer. @@ -133,7 +133,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV, // Handle thread-local data first. if (GVar->isThreadLocal()) { - if (isSuitableForBSS(GVar)) + if (isSuitableForBSS(GVar, TM.Options.NoZerosInBSS)) return SectionKind::getThreadBSS(); return SectionKind::getThreadData(); } @@ -143,7 +143,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV, return SectionKind::getCommon(); // Variable can be easily put to BSS section. - if (isSuitableForBSS(GVar)) { + if (isSuitableForBSS(GVar, TM.Options.NoZerosInBSS)) { if (GVar->hasLocalLinkage()) return SectionKind::getBSSLocal(); else if (GVar->hasExternalLinkage()) diff --git a/lib/Target/TargetMachine.cpp b/lib/Target/TargetMachine.cpp index 805e16e817..80cc5fa75f 100644 --- a/lib/Target/TargetMachine.cpp +++ b/lib/Target/TargetMachine.cpp @@ -24,153 +24,11 @@ using namespace llvm; // namespace llvm { - bool LessPreciseFPMADOption; - bool PrintMachineCode; - bool NoFramePointerElim; - bool NoFramePointerElimNonLeaf; - bool NoExcessFPPrecision; - bool UnsafeFPMath; - bool NoInfsFPMath; - bool NoNaNsFPMath; - bool HonorSignDependentRoundingFPMathOption; - bool UseSoftFloat; - FloatABI::ABIType FloatABIType; - bool NoImplicitFloat; - bool NoZerosInBSS; - bool JITExceptionHandling; - bool JITEmitDebugInfo; - bool JITEmitDebugInfoToDisk; - bool GuaranteedTailCallOpt; - unsigned StackAlignmentOverride; - bool RealignStack; - bool DisableJumpTables; bool StrongPHIElim; bool HasDivModLibcall; bool AsmVerbosityDefault(false); - bool EnableSegmentedStacks; } -static cl::opt<bool, true> -PrintCode("print-machineinstrs", - cl::desc("Print generated machine code"), - cl::location(PrintMachineCode), cl::init(false)); -static cl::opt<bool, true> -DisableFPElim("disable-fp-elim", - cl::desc("Disable frame pointer elimination optimization"), - cl::location(NoFramePointerElim), - cl::init(false)); -static cl::opt<bool, true> -DisableFPElimNonLeaf("disable-non-leaf-fp-elim", - cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"), - cl::location(NoFramePointerElimNonLeaf), - cl::init(false)); -static cl::opt<bool, true> -DisableExcessPrecision("disable-excess-fp-precision", - cl::desc("Disable optimizations that may increase FP precision"), - cl::location(NoExcessFPPrecision), - cl::init(false)); -static cl::opt<bool, true> -EnableFPMAD("enable-fp-mad", - cl::desc("Enable less precise MAD instructions to be generated"), - cl::location(LessPreciseFPMADOption), - cl::init(false)); -static cl::opt<bool, true> -EnableUnsafeFPMath("enable-unsafe-fp-math", - cl::desc("Enable optimizations that may decrease FP precision"), - cl::location(UnsafeFPMath), - cl::init(false)); -static cl::opt<bool, true> -EnableNoInfsFPMath("enable-no-infs-fp-math", - cl::desc("Enable FP math optimizations that assume no +-Infs"), - cl::location(NoInfsFPMath), - cl::init(false)); -static cl::opt<bool, true> -EnableNoNaNsFPMath("enable-no-nans-fp-math", - cl::desc("Enable FP math optimizations that assume no NaNs"), - cl::location(NoNaNsFPMath), - cl::init(false)); -static cl::opt<bool, true> -EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math", - cl::Hidden, - cl::desc("Force codegen to assume rounding mode can change dynamically"), - cl::location(HonorSignDependentRoundingFPMathOption), - cl::init(false)); -static cl::opt<bool, true> -GenerateSoftFloatCalls("soft-float", - cl::desc("Generate software floating point library calls"), - cl::location(UseSoftFloat), - cl::init(false)); -static cl::opt<llvm::FloatABI::ABIType, true> -FloatABIForCalls("float-abi", - cl::desc("Choose float ABI type"), - cl::location(FloatABIType), - cl::init(FloatABI::Default), - cl::values( - clEnumValN(FloatABI::Default, "default", - "Target default float ABI type"), - clEnumValN(FloatABI::Soft, "soft", - "Soft float ABI (implied by -soft-float)"), - clEnumValN(FloatABI::Hard, "hard", - "Hard float ABI (uses FP registers)"), - clEnumValEnd)); -static cl::opt<bool, true> -DontPlaceZerosInBSS("nozero-initialized-in-bss", - cl::desc("Don't place zero-initialized symbols into bss section"), - cl::location(NoZerosInBSS), - cl::init(false)); -static cl::opt<bool, true> -EnableJITExceptionHandling("jit-enable-eh", - cl::desc("Emit exception handling information"), - cl::location(JITExceptionHandling), - cl::init(false)); -// In debug builds, make this default to true. -#ifdef NDEBUG -#define EMIT_DEBUG false -#else -#define EMIT_DEBUG true -#endif -static cl::opt<bool, true> -EmitJitDebugInfo("jit-emit-debug", - cl::desc("Emit debug information to debugger"), - cl::location(JITEmitDebugInfo), - cl::init(EMIT_DEBUG)); -#undef EMIT_DEBUG -static cl::opt<bool, true> -EmitJitDebugInfoToDisk("jit-emit-debug-to-disk", - cl::Hidden, - cl::desc("Emit debug info objfiles to disk"), - cl::location(JITEmitDebugInfoToDisk), - cl::init(false)); - -static cl::opt<bool, true> -EnableGuaranteedTailCallOpt("tailcallopt", - cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."), - cl::location(GuaranteedTailCallOpt), - cl::init(false)); -static cl::opt<unsigned, true> -OverrideStackAlignment("stack-alignment", - cl::desc("Override default stack alignment"), - cl::location(StackAlignmentOverride), - cl::init(0)); -static cl::opt<bool, true> -EnableRealignStack("realign-stack", - cl::desc("Realign stack if needed"), - cl::location(RealignStack), - cl::init(true)); -static cl::opt<bool, true> -DisableSwitchTables(cl::Hidden, "disable-jump-tables", - cl::desc("Do not generate jump tables."), - cl::location(DisableJumpTables), - cl::init(false)); -static cl::opt<bool, true> -EnableStrongPHIElim(cl::Hidden, "strong-phi-elim", - cl::desc("Use strong PHI elimination."), - cl::location(StrongPHIElim), - cl::init(false)); -static cl::opt<std::string> -TrapFuncName("trap-func", cl::Hidden, - cl::desc("Emit a call to trap function rather than a trap instruction"), - cl::init("")); static cl::opt<bool> DataSections("fdata-sections", cl::desc("Emit data into separate sections"), @@ -179,18 +37,14 @@ static cl::opt<bool> FunctionSections("ffunction-sections", cl::desc("Emit functions into separate sections"), cl::init(false)); -static cl::opt<bool, true> -SegmentedStacks("segmented-stacks", - cl::desc("Use segmented stacks if possible."), - cl::location(EnableSegmentedStacks), - cl::init(false)); //--------------------------------------------------------------------------- // TargetMachine Class // TargetMachine::TargetMachine(const Target &T, - StringRef TT, StringRef CPU, StringRef FS) + StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options) : TheTarget(T), TargetTriple(TT), TargetCPU(CPU), TargetFS(FS), CodeGenInfo(0), AsmInfo(0), MCRelaxAll(false), @@ -198,11 +52,8 @@ TargetMachine::TargetMachine(const Target &T, MCSaveTempLabels(false), MCUseLoc(true), MCUseCFI(true), - MCUseDwarfDirectory(false) { - // Typically it will be subtargets that will adjust FloatABIType from Default - // to Soft or Hard. - if (UseSoftFloat) - FloatABIType = FloatABI::Soft; + MCUseDwarfDirectory(false), + Options(Options) { } TargetMachine::~TargetMachine() { @@ -258,36 +109,36 @@ void TargetMachine::setDataSections(bool V) { DataSections = V; } -namespace llvm { - /// DisableFramePointerElim - This returns true if frame pointer elimination - /// optimization should be disabled for the given machine function. - bool DisableFramePointerElim(const MachineFunction &MF) { - // Check to see if we should eliminate non-leaf frame pointers and then - // check to see if we should eliminate all frame pointers. - if (NoFramePointerElimNonLeaf && !NoFramePointerElim) { - const MachineFrameInfo *MFI = MF.getFrameInfo(); - return MFI->hasCalls(); - } - - return NoFramePointerElim; +/// DisableFramePointerElim - This returns true if frame pointer elimination +/// optimization should be disabled for the given machine function. +bool TargetOptions::DisableFramePointerElim(const MachineFunction &MF) const { + // Check to see if we should eliminate non-leaf frame pointers and then + // check to see if we should eliminate all frame pointers. + if (NoFramePointerElimNonLeaf && !NoFramePointerElim) { + const MachineFrameInfo *MFI = MF.getFrameInfo(); + return MFI->hasCalls(); } - /// LessPreciseFPMAD - This flag return true when -enable-fp-mad option - /// is specified on the command line. When this flag is off(default), the - /// code generator is not allowed to generate mad (multiply add) if the - /// result is "less precise" than doing those operations individually. - bool LessPreciseFPMAD() { return UnsafeFPMath || LessPreciseFPMADOption; } + return NoFramePointerElim; +} - /// HonorSignDependentRoundingFPMath - Return true if the codegen must assume - /// that the rounding mode of the FPU can change from its default. - bool HonorSignDependentRoundingFPMath() { - return !UnsafeFPMath && HonorSignDependentRoundingFPMathOption; - } +/// LessPreciseFPMAD - This flag return true when -enable-fp-mad option +/// is specified on the command line. When this flag is off(default), the +/// code generator is not allowed to generate mad (multiply add) if the +/// result is "less precise" than doing those operations individually. +bool TargetOptions::LessPreciseFPMAD() const { + return UnsafeFPMath || LessPreciseFPMADOption; +} - /// getTrapFunctionName - If this returns a non-empty string, this means isel - /// should lower Intrinsic::trap to a call to the specified function name - /// instead of an ISD::TRAP node. - StringRef getTrapFunctionName() { - return TrapFuncName; - } +/// HonorSignDependentRoundingFPMath - Return true if the codegen must assume +/// that the rounding mode of the FPU can change from its default. +bool TargetOptions::HonorSignDependentRoundingFPMath() const { + return !UnsafeFPMath && HonorSignDependentRoundingFPMathOption; +} + +/// getTrapFunctionName - If this returns a non-empty string, this means isel +/// should lower Intrinsic::trap to a call to the specified function name +/// instead of an ISD::TRAP node. +StringRef TargetOptions::getTrapFunctionName() const { + return TrapFuncName; } diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 00c5903b47..15894390cf 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -728,7 +728,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { // fastcc with -tailcallopt is intended to provide a guaranteed // tail call optimization. Fastisel doesn't know how to do that. - if (CC == CallingConv::Fast && GuaranteedTailCallOpt) + if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) return false; // Let SDISel handle vararg functions. @@ -1529,7 +1529,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { // fastcc with -tailcallopt is intended to provide a guaranteed // tail call optimization. Fastisel doesn't know how to do that. - if (CC == CallingConv::Fast && GuaranteedTailCallOpt) + if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) return false; PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); @@ -1543,7 +1543,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { // Fast-isel doesn't know about callee-pop yet. if (X86::isCalleePop(CC, Subtarget->is64Bit(), isVarArg, - GuaranteedTailCallOpt)) + TM.Options.GuaranteedTailCallOpt)) return false; // Check whether the function can return without sret-demotion. diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 1053a15b5f..ed3118af05 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -47,7 +47,7 @@ bool X86FrameLowering::hasFP(const MachineFunction &MF) const { const MachineModuleInfo &MMI = MF.getMMI(); const TargetRegisterInfo *RI = TM.getRegisterInfo(); - return (DisableFramePointerElim(MF) || + return (MF.getTarget().Options.DisableFramePointerElim(MF) || RI->needsStackRealignment(MF) || MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken() || @@ -638,10 +638,10 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { // stack pointer (we fit in the Red Zone). if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) && !RegInfo->needsStackRealignment(MF) && - !MFI->hasVarSizedObjects() && // No dynamic alloca. - !MFI->adjustsStack() && // No calls. - !IsWin64 && // Win64 has no Red Zone - !EnableSegmentedStacks) { // Regular stack + !MFI->hasVarSizedObjects() && // No dynamic alloca. + !MFI->adjustsStack() && // No calls. + !IsWin64 && // Win64 has no Red Zone + !MF.getTarget().Options.EnableSegmentedStacks) { // Regular stack uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); if (HasFP) MinSize += SlotSize; StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 454e1224c6..d9bd8fe156 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -256,7 +256,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) if (Subtarget->is64Bit()) { setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); - } else if (!UseSoftFloat) { + } else if (!TM.Options.UseSoftFloat) { // We have an algorithm for SSE2->double, and we turn this into a // 64-bit FILD followed by conditional FADD for other targets. setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); @@ -270,7 +270,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); - if (!UseSoftFloat) { + if (!TM.Options.UseSoftFloat) { // SSE has no i16 to fp conversion, only i32 if (X86ScalarSSEf32) { setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); @@ -313,7 +313,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) if (Subtarget->is64Bit()) { setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); - } else if (!UseSoftFloat) { + } else if (!TM.Options.UseSoftFloat) { // Since AVX is a superset of SSE3, only check for SSE here. if (Subtarget->hasSSE1() && !Subtarget->hasSSE3()) // Expand FP_TO_UINT into a select. @@ -537,14 +537,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? MVT::i64 : MVT::i32, Custom); - else if (EnableSegmentedStacks) + else if (TM.Options.EnableSegmentedStacks) setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? MVT::i64 : MVT::i32, Custom); else setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? MVT::i64 : MVT::i32, Expand); - if (!UseSoftFloat && X86ScalarSSEf64) { + if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { // f32 and f64 use SSE. // Set up the FP register classes. addRegisterClass(MVT::f32, X86::FR32RegisterClass); @@ -576,7 +576,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // cases we handle. addLegalFPImmediate(APFloat(+0.0)); // xorpd addLegalFPImmediate(APFloat(+0.0f)); // xorps - } else if (!UseSoftFloat && X86ScalarSSEf32) { + } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { // Use SSE for f32, x87 for f64. // Set up the FP register classes. addRegisterClass(MVT::f32, X86::FR32RegisterClass); @@ -605,11 +605,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS - if (!UnsafeFPMath) { + if (!TM.Options.UnsafeFPMath) { setOperationAction(ISD::FSIN , MVT::f64 , Expand); setOperationAction(ISD::FCOS , MVT::f64 , Expand); } - } else if (!UseSoftFloat) { + } else if (!TM.Options.UseSoftFloat) { // f32 and f64 in x87. // Set up the FP register classes. addRegisterClass(MVT::f64, X86::RFP64RegisterClass); @@ -620,7 +620,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); - if (!UnsafeFPMath) { + if (!TM.Options.UnsafeFPMath) { setOperationAction(ISD::FSIN , MVT::f64 , Expand); setOperationAction(ISD::FCOS , MVT::f64 , Expand); } @@ -639,7 +639,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FMA, MVT::f32, Expand); // Long double always uses X87. - if (!UseSoftFloat) { + if (!TM.Options.UseSoftFloat) { addRegisterClass(MVT::f80, X86::RFP80RegisterClass); setOperationAction(ISD::UNDEF, MVT::f80, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); @@ -658,7 +658,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) addLegalFPImmediate(TmpFlt2); // FLD1/FCHS } - if (!UnsafeFPMath) { + if (!TM.Options.UnsafeFPMath) { setOperationAction(ISD::FSIN , MVT::f80 , Expand); setOperationAction(ISD::FCOS , MVT::f80 , Expand); } @@ -748,7 +748,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // FIXME: In order to prevent SSE instructions being expanded to MMX ones // with -msoft-float, disable use of MMX as well. - if (!UseSoftFloat && Subtarget->hasMMX()) { + if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass); // No operations on x86mmx supported, everything uses intrinsics. } @@ -785,7 +785,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); - if (!UseSoftFloat && Subtarget->hasXMM()) { + if (!TM.Options.UseSoftFloat && Subtarget->hasXMM()) { addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); setOperationAction(ISD::FADD, MVT::v4f32, Legal); @@ -802,7 +802,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::SETCC, MVT::v4f32, Custom); } - if (!UseSoftFloat && Subtarget->hasXMMInt()) { + if (!TM.Options.UseSoftFloat && Subtarget->hasXMMInt()) { addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM @@ -983,7 +983,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) if (Subtarget->hasSSE42orAVX()) setOperationAction(ISD::SETCC, MVT::v2i64, Custom); - if (!UseSoftFloat && Subtarget->hasAVX()) { + if (!TM.Options.UseSoftFloat && Subtarget->hasAVX()) { addRegisterClass(MVT::v32i8, X86::VR256RegisterClass); addRegisterClass(MVT::v16i16, X86::VR256RegisterClass); addRegisterClass(MVT::v8i32, X86::VR256RegisterClass); @@ -1709,7 +1709,8 @@ bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { /// FuncIsMadeTailCallSafe - Return true if the function is being made into /// a tailcall target by changing its ABI. -static bool FuncIsMadeTailCallSafe(CallingConv::ID CC) { +static bool FuncIsMadeTailCallSafe(CallingConv::ID CC, + bool GuaranteedTailCallOpt) { return GuaranteedTailCallOpt && IsTailCallConvention(CC); } @@ -1723,7 +1724,8 @@ X86TargetLowering::LowerMemArgument(SDValue Chain, unsigned i) const { // Create the nodes corresponding to a load from this parameter slot. ISD::ArgFlagsTy Flags = Ins[i].Flags; - bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv); + bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv, + getTargetMachine().Options.GuaranteedTailCallOpt); bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); EVT ValVT; @@ -1873,7 +1875,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, unsigned StackSize = CCInfo.getNextStackOffset(); // Align stack specially for tail calls. - if (FuncIsMadeTailCallSafe(CallConv)) + if (FuncIsMadeTailCallSafe(CallConv, + MF.getTarget().Options.GuaranteedTailCallOpt)) StackSize = GetAlignedArgumentStackSize(StackSize, DAG); // If the function takes variable number of arguments, make a frame index for @@ -1918,9 +1921,11 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat); assert(!(NumXMMRegs && !Subtarget->hasXMM()) && "SSE register cannot be used when SSE is disabled!"); - assert(!(NumXMMRegs && UseSoftFloat && NoImplicitFloatOps) && + assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && + NoImplicitFloatOps) && "SSE register cannot be used when SSE is disabled!"); - if (UseSoftFloat || NoImplicitFloatOps || !Subtarget->hasXMM()) + if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps || + !Subtarget->hasXMM()) // Kernel mode asks for SSE to be disabled, so don't push them // on the stack. TotalNumXMMRegs = 0; @@ -1998,7 +2003,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, } // Some CCs need callee pop. - if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, GuaranteedTailCallOpt)) { + if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, + MF.getTarget().Options.GuaranteedTailCallOpt)) { FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. } else { FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. @@ -2098,7 +2104,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, // Sibcalls are automatically detected tailcalls which do not require // ABI changes. - if (!GuaranteedTailCallOpt && isTailCall) + if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) IsSibcall = true; if (isTailCall) @@ -2126,7 +2132,8 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, // This is a sibcall. The memory operands are available in caller's // own caller's stack. NumBytes = 0; - else if (GuaranteedTailCallOpt && IsTailCallConvention(CallConv)) + else if (getTargetMachine().Options.GuaranteedTailCallOpt && + IsTailCallConvention(CallConv)) NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); int FPDiff = 0; @@ -2305,7 +2312,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, int FI = 0; // Do not flag preceding copytoreg stuff together with the following stuff. InFlag = SDValue(); - if (GuaranteedTailCallOpt) { + if (getTargetMachine().Options.GuaranteedTailCallOpt) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; if (VA.isRegLoc()) @@ -2485,7 +2492,8 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, // Create the CALLSEQ_END node. unsigned NumBytesForCalleeToPush; - if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, GuaranteedTailCallOpt)) + if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, + getTargetMachine().Options.GuaranteedTailCallOpt)) NumBytesForCalleeToPush = NumBytes; // Callee pops everything else if (!Is64Bit && !IsTailCallConvention(CallConv) && IsStructRet) // If this is a call to a struct-return function, the callee @@ -2643,7 +2651,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, CallingConv::ID CallerCC = CallerF->getCallingConv(); bool CCMatch = CallerCC == CalleeCC; - if (GuaranteedTailCallOpt) { + if (getTargetMachine().Options.GuaranteedTailCallOpt) { if (IsTailCallConvention(CalleeCC) && CCMatch) return true; return false; @@ -9119,7 +9127,7 @@ SDValue X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows() || - EnableSegmentedStacks) && + getTargetMachine().Options.EnableSegmentedStacks) && "This should be used only on Windows targets or when segmented stacks " "are being used"); assert(!Subtarget->isTargetEnvMacho() && "Not implemented"); @@ -9133,7 +9141,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, bool Is64Bit = Subtarget->is64Bit(); EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32; - if (EnableSegmentedStacks) { + if (getTargetMachine().Options.EnableSegmentedStacks) { MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -9269,7 +9277,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { if (ArgMode == 2) { // Sanity Check: Make sure using fp_offset makes sense. - assert(!UseSoftFloat && + assert(!getTargetMachine().Options.UseSoftFloat && !(DAG.getMachineFunction() .getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) && Subtarget->hasXMM()); @@ -12144,7 +12152,7 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB, MachineFunction *MF = BB->getParent(); const BasicBlock *LLVM_BB = BB->getBasicBlock(); - assert(EnableSegmentedStacks); + assert(getTargetMachine().Options.EnableSegmentedStacks); unsigned TlsReg = Is64Bit ? X86::FS : X86::GS; unsigned TlsOffset = Is64Bit ? 0x70 : 0x30; @@ -13024,7 +13032,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, // the operands would cause it to handle comparisons between positive // and negative zero incorrectly. if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { - if (!UnsafeFPMath && + if (!DAG.getTarget().Options.UnsafeFPMath && !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) break; std::swap(LHS, RHS); @@ -13034,7 +13042,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, case ISD::SETOLE: // Converting this to a min would handle comparisons between positive // and negative zero incorrectly. - if (!UnsafeFPMath && + if (!DAG.getTarget().Options.UnsafeFPMath && !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) break; Opcode = X86ISD::FMIN; @@ -13052,7 +13060,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, case ISD::SETOGE: // Converting this to a max would handle comparisons between positive // and negative zero incorrectly. - if (!UnsafeFPMath && + if (!DAG.getTarget().Options.UnsafeFPMath && !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) break; Opcode = X86ISD::FMAX; @@ -13062,7 +13070,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, // the operands would cause it to handle comparisons between positive // and negative zero incorrectly. if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { - if (!UnsafeFPMath && + if (!DAG.getTarget().Options.UnsafeFPMath && !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) break; std::swap(LHS, RHS); @@ -13088,7 +13096,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, // Converting this to a min would handle comparisons between positive // and negative zero incorrectly, and swapping the operands would // cause it to handle NaNs incorrectly. - if (!UnsafeFPMath && + if (!DAG.getTarget().Options.UnsafeFPMath && !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) break; @@ -13098,7 +13106,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, break; case ISD::SETUGT: // Converting this to a min would handle NaNs incorrectly. - if (!UnsafeFPMath && + if (!DAG.getTarget().Options.UnsafeFPMath && (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) break; Opcode = X86ISD::FMIN; @@ -13123,7 +13131,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, // Converting this to a max would handle comparisons between positive // and negative zero incorrectly, and swapping the operands would // cause it to handle NaNs incorrectly. - if (!UnsafeFPMath && + if (!DAG.getTarget().Options.UnsafeFPMath && !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) break; @@ -14087,7 +14095,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, SDValue StoredVal = St->getOperand(1); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - // If we are saving a concatination of two XMM registers, perform two stores. + // If we are saving a concatenation of two XMM registers, perform two stores. // This is better in Sandy Bridge cause one 256-bit mem op is done via two // 128-bit ones. If in the future the cost becomes only one memory access the // first version would be better. @@ -14197,7 +14205,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, const Function *F = DAG.getMachineFunction().getFunction(); bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat); - bool F64IsLegal = !UseSoftFloat && !NoImplicitFloatOps + bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps && Subtarget->hasXMMInt(); if ((VT.isVector() || (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 9d7863c6ba..bf1ed08841 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -452,7 +452,7 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); - return (RealignStack && + return (MF.getTarget().Options.RealignStack && !MFI->hasVarSizedObjects()); } diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp index d7fa8ca574..6e092c755a 100644 --- a/lib/Target/X86/X86Subtarget.cpp +++ b/lib/Target/X86/X86Subtarget.cpp @@ -390,9 +390,6 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU, assert((!In64BitMode || HasX86_64) && "64-bit code requested on a subtarget that doesn't support it!"); - if(EnableSegmentedStacks && !isTargetELF()) - report_fatal_error("Segmented stacks are only implemented on ELF."); - // Stack alignment is 16 bytes on Darwin, FreeBSD, Linux and Solaris (both // 32 and 64 bit) and for all 64-bit targets. if (StackAlignOverride) diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index 1c9f3bd549..126042eeae 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -31,9 +31,10 @@ extern "C" void LLVMInitializeX86Target() { X86_32TargetMachine::X86_32TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : X86TargetMachine(T, TT, CPU, FS, RM, CM, OL, false), + : X86TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false), DataLayout(getSubtargetImpl()->isTargetDarwin() ? "e-p:32:32-f64:32:64-i64:32:64-f80:128:128-f128:128:128-" "n8:16:32-S128" : @@ -52,9 +53,10 @@ X86_32TargetMachine::X86_32TargetMachine(const Target &T, StringRef TT, X86_64TargetMachine::X86_64TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : X86TargetMachine(T, TT, CPU, FS, RM, CM, OL, true), + : X86TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true), DataLayout("e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-f128:128:128-" "n8:16:32:64-S128"), InstrInfo(*this), @@ -67,11 +69,12 @@ X86_64TargetMachine::X86_64TargetMachine(const Target &T, StringRef TT, /// X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL, bool is64Bit) - : LLVMTargetMachine(T, TT, CPU, FS, RM, CM, OL), - Subtarget(TT, CPU, FS, StackAlignmentOverride, is64Bit), + : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), + Subtarget(TT, CPU, FS, Options.StackAlignmentOverride, is64Bit), FrameLowering(*this, Subtarget), ELFWriterInfo(is64Bit, true) { // Determine the PICStyle based on the target selected. @@ -95,8 +98,11 @@ X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT, } // default to hard float ABI - if (FloatABIType == FloatABI::Default) - FloatABIType = FloatABI::Hard; + if (Options.FloatABIType == FloatABI::Default) + this->Options.FloatABIType = FloatABI::Hard; + + if (Options.EnableSegmentedStacks && !Subtarget.isTargetELF()) + report_fatal_error("Segmented stacks are only implemented on ELF."); } //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86TargetMachine.h b/lib/Target/X86/X86TargetMachine.h index 64be4585cd..3ac176937a 100644 --- a/lib/Target/X86/X86TargetMachine.h +++ b/lib/Target/X86/X86TargetMachine.h @@ -38,7 +38,7 @@ class X86TargetMachine : public LLVMTargetMachine { public: X86TargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL, bool is64Bit); @@ -85,7 +85,7 @@ class X86_32TargetMachine : public X86TargetMachine { X86JITInfo JITInfo; public: X86_32TargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); virtual const TargetData *getTargetData() const { return &DataLayout; } @@ -113,7 +113,7 @@ class X86_64TargetMachine : public X86TargetMachine { X86JITInfo JITInfo; public: X86_64TargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); virtual const TargetData *getTargetData() const { return &DataLayout; } diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp index 7f8b169819..5007d041f6 100644 --- a/lib/Target/XCore/XCoreFrameLowering.cpp +++ b/lib/Target/XCore/XCoreFrameLowering.cpp @@ -84,7 +84,8 @@ XCoreFrameLowering::XCoreFrameLowering(const XCoreSubtarget &sti) } bool XCoreFrameLowering::hasFP(const MachineFunction &MF) const { - return DisableFramePointerElim(MF) || MF.getFrameInfo()->hasVarSizedObjects(); + return MF.getTarget().Options.DisableFramePointerElim(MF) || + MF.getFrameInfo()->hasVarSizedObjects(); } void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const { diff --git a/lib/Target/XCore/XCoreTargetMachine.cpp b/lib/Target/XCore/XCoreTargetMachine.cpp index eec3674131..c326ffbbd7 100644 --- a/lib/Target/XCore/XCoreTargetMachine.cpp +++ b/lib/Target/XCore/XCoreTargetMachine.cpp @@ -21,9 +21,10 @@ using namespace llvm; /// XCoreTargetMachine::XCoreTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : LLVMTargetMachine(T, TT, CPU, FS, RM, CM, OL), + : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS), DataLayout("e-p:32:32:32-a0:0:32-f32:32:32-f64:32:32-i1:8:32-i8:8:32-" "i16:16:32-i32:32:32-i64:32:32-n32"), diff --git a/lib/Target/XCore/XCoreTargetMachine.h b/lib/Target/XCore/XCoreTargetMachine.h index 3f2644db98..0159b1e6b7 100644 --- a/lib/Target/XCore/XCoreTargetMachine.h +++ b/lib/Target/XCore/XCoreTargetMachine.h @@ -33,7 +33,7 @@ class XCoreTargetMachine : public LLVMTargetMachine { XCoreSelectionDAGInfo TSInfo; public: XCoreTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, + StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); diff --git a/tools/llc/llc.cpp b/tools/llc/llc.cpp index fdc6fec984..58dafca159 100644 --- a/tools/llc/llc.cpp +++ b/tools/llc/llc.cpp @@ -141,6 +141,134 @@ DisableRedZone("disable-red-zone", cl::desc("Do not emit code that uses the red zone."), cl::init(false)); +static cl::opt<bool> +EnableFPMAD("enable-fp-mad", + cl::desc("Enable less precise MAD instructions to be generated"), + cl::init(false)); + +static cl::opt<bool> +PrintCode("print-machineinstrs", + cl::desc("Print generated machine code"), + cl::init(false)); + +static cl::opt<bool> +DisableFPElim("disable-fp-elim", + cl::desc("Disable frame pointer elimination optimization"), + cl::init(false)); + +static cl::opt<bool> +DisableFPElimNonLeaf("disable-non-leaf-fp-elim", + cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"), + cl::init(false)); + +static cl::opt<bool> +DisableExcessPrecision("disable-excess-fp-precision", + cl::desc("Disable optimizations that may increase FP precision"), + cl::init(false)); + +static cl::opt<bool> +EnableUnsafeFPMath("enable-unsafe-fp-math", + cl::desc("Enable optimizations that may decrease FP precision"), + cl::init(false)); + +static cl::opt<bool> +EnableNoInfsFPMath("enable-no-infs-fp-math", + cl::desc("Enable FP math optimizations that assume no +-Infs"), + cl::init(false)); + +static cl::opt<bool> +EnableNoNaNsFPMath("enable-no-nans-fp-math", + cl::desc("Enable FP math optimizations that assume no NaNs"), + cl::init(false)); + +static cl::opt<bool> +EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math", + cl::Hidden, + cl::desc("Force codegen to assume rounding mode can change dynamically"), + cl::init(false)); + +static cl::opt<bool> +GenerateSoftFloatCalls("soft-float", + cl::desc("Generate software floating point library calls"), + cl::init(false)); + +static cl::opt<llvm::FloatABI::ABIType> +FloatABIForCalls("float-abi", + cl::desc("Choose float ABI type"), + cl::init(FloatABI::Default), + cl::values( + clEnumValN(FloatABI::Default, "default", + "Target default float ABI type"), + clEnumValN(FloatABI::Soft, "soft", + "Soft float ABI (implied by -soft-float)"), + clEnumValN(FloatABI::Hard, "hard", + "Hard float ABI (uses FP registers)"), + clEnumValEnd)); + +static cl::opt<bool> +DontPlaceZerosInBSS("nozero-initialized-in-bss", + cl::desc("Don't place zero-initialized symbols into bss section"), + cl::init(false)); + +static cl::opt<bool> +EnableJITExceptionHandling("jit-enable-eh", + cl::desc("Emit exception handling information"), + cl::init(false)); + +// In debug builds, make this default to true. +#ifdef NDEBUG +#define EMIT_DEBUG false +#else +#define EMIT_DEBUG true +#endif +static cl::opt<bool> +EmitJitDebugInfo("jit-emit-debug", + cl::desc("Emit debug information to debugger"), + cl::init(EMIT_DEBUG)); +#undef EMIT_DEBUG + +static cl::opt<bool> +EmitJitDebugInfoToDisk("jit-emit-debug-to-disk", + cl::Hidden, + cl::desc("Emit debug info objfiles to disk"), + cl::init(false)); + +static cl::opt<bool> +EnableGuaranteedTailCallOpt("tailcallopt", + cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."), + cl::init(false)); + +static cl::opt<unsigned> +OverrideStackAlignment("stack-alignment", + cl::desc("Override default stack alignment"), + cl::init(0)); + +static cl::opt<bool> +EnableRealignStack("realign-stack", + cl::desc("Realign stack if needed"), + cl::init(true)); + +static cl::opt<bool> +DisableSwitchTables(cl::Hidden, "disable-jump-tables", + cl::desc("Do not generate jump tables."), + cl::init(false)); + +static cl::opt<bool> +EnableStrongPHIElim(cl::Hidden, "strong-phi-elim", + cl::desc("Use strong PHI elimination."), + cl::init(false)); + +static cl::opt<std::string> +TrapFuncName("trap-func", cl::Hidden, + cl::desc("Emit a call to trap function rather than a trap instruction"), + cl::init("")); + +static cl::opt<bool> +SegmentedStacks("segmented-stacks", + cl::desc("Use segmented stacks if possible."), + cl::init(false)); + + // GetFileNameRoot - Helper function to get the basename of a filename. static inline std::string GetFileNameRoot(const std::string &InputFilename) { @@ -318,9 +446,34 @@ int main(int argc, char **argv) { case '3': OLvl = CodeGenOpt::Aggressive; break; } + TargetOptions Options; + Options.LessPreciseFPMADOption = EnableFPMAD; + Options.PrintMachineCode = PrintCode; + Options.NoFramePointerElim = DisableFPElim; + Options.NoFramePointerElimNonLeaf = DisableFPElimNonLeaf; + Options.NoExcessFPPrecision = DisableExcessPrecision; + Options.UnsafeFPMath = EnableUnsafeFPMath; + Options.NoInfsFPMath = EnableNoInfsFPMath; + Options.NoNaNsFPMath = EnableNoNaNsFPMath; + Options.HonorSignDependentRoundingFPMathOption = + EnableHonorSignDependentRoundingFPMath; + Options.UseSoftFloat = GenerateSoftFloatCalls; + if (FloatABIForCalls != FloatABI::Default) + Options.FloatABIType = FloatABIForCalls; + Options.NoZerosInBSS = DontPlaceZerosInBSS; + Options.JITExceptionHandling = EnableJITExceptionHandling; + Options.JITEmitDebugInfo = EmitJitDebugInfo; + Options.JITEmitDebugInfoToDisk = EmitJitDebugInfoToDisk; + Options.GuaranteedTailCallOpt = EnableGuaranteedTailCallOpt; + Options.StackAlignmentOverride = OverrideStackAlignment; + Options.RealignStack = EnableRealignStack; + Options.DisableJumpTables = DisableSwitchTables; + Options.TrapFuncName = TrapFuncName; + Options.EnableSegmentedStacks = SegmentedStacks; + std::auto_ptr<TargetMachine> target(TheTarget->createTargetMachine(TheTriple.getTriple(), - MCPU, FeaturesStr, + MCPU, FeaturesStr, Options, RelocModel, CMModel, OLvl)); assert(target.get() && "Could not allocate target machine!"); TargetMachine &Target = *target.get(); @@ -334,6 +487,9 @@ int main(int argc, char **argv) { if (EnableDwarfDirectory) Target.setMCUseDwarfDirectory(true); + if (GenerateSoftFloatCalls) + FloatABIForCalls = FloatABI::Soft; + // Disable .loc support for older OS X versions. if (TheTriple.isMacOSX() && TheTriple.isMacOSXVersionLT(10, 6)) diff --git a/tools/lto/LTOCodeGenerator.cpp b/tools/lto/LTOCodeGenerator.cpp index 9093073784..77d7dfe94e 100644 --- a/tools/lto/LTOCodeGenerator.cpp +++ b/tools/lto/LTOCodeGenerator.cpp @@ -265,7 +265,8 @@ bool LTOCodeGenerator::determineTarget(std::string& errMsg) SubtargetFeatures Features; Features.getDefaultSubtargetFeatures(llvm::Triple(Triple)); std::string FeatureStr = Features.getString(); - _target = march->createTargetMachine(Triple, _mCpu, FeatureStr, + TargetOptions Options; + _target = march->createTargetMachine(Triple, _mCpu, FeatureStr, Options, RelocModel); } return false; diff --git a/tools/lto/LTOModule.cpp b/tools/lto/LTOModule.cpp index 8ea680d53c..0b737293ae 100644 --- a/tools/lto/LTOModule.cpp +++ b/tools/lto/LTOModule.cpp @@ -159,7 +159,9 @@ LTOModule *LTOModule::makeLTOModule(MemoryBuffer *buffer, Features.getDefaultSubtargetFeatures(llvm::Triple(Triple)); std::string FeatureStr = Features.getString(); std::string CPU; - TargetMachine *target = march->createTargetMachine(Triple, CPU, FeatureStr); + TargetOptions Options; + TargetMachine *target = march->createTargetMachine(Triple, CPU, FeatureStr, + Options); LTOModule *Ret = new LTOModule(m.take(), target); if (Ret->ParseSymbols(errMsg)) { delete Ret; |