diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Target/ARM/ARMISelLowering.cpp | 21 | ||||
-rw-r--r-- | lib/Target/ARM/ARMISelLowering.h | 7 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 1 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.h | 13 | ||||
-rw-r--r-- | lib/Target/XCore/XCoreISelLowering.cpp | 10 |
5 files changed, 43 insertions, 9 deletions
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 9de737be99..99135b2311 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -3121,6 +3121,27 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, return SDValue(); } +bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { + if (!Subtarget->hasV6Ops()) + // Pre-v6 does not support unaligned mem access. + return false; + else if (!Subtarget->hasV6Ops()) { + // v6 may or may not support unaligned mem access. + if (!Subtarget->isTargetDarwin()) + return false; + } + + switch (VT.getSimpleVT().SimpleTy) { + default: + return false; + case MVT::i8: + case MVT::i16: + case MVT::i32: + return true; + // FIXME: VLD1 etc with standard alignment is legal. + } +} + static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { if (V < 0) return false; diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 3a90ca3e62..db6d8baaad 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -166,6 +166,11 @@ namespace llvm { virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const; + /// allowsUnalignedMemoryAccesses - Returns true if the target allows + /// unaligned memory accesses. of the specified type. + /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON? + virtual bool allowsUnalignedMemoryAccesses(EVT VT) const; + /// isLegalAddressingMode - Return true if the addressing mode represented /// by AM is legal for this target, for a load/store of the specified type. virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const; @@ -193,6 +198,8 @@ namespace llvm { APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth) const; + + ConstraintType getConstraintType(const std::string &Constraint) const; std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 1543631020..ff9f2bf443 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -957,7 +957,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores maxStoresPerMemcpy = 16; // For @llvm.memcpy -> sequence of stores maxStoresPerMemmove = 3; // For @llvm.memmove -> sequence of stores - allowUnalignedMemoryAccesses = true; // x86 supports it! setPrefLoopAlignment(16); benefitFromCodePlacementOpt = true; } diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 3ac6e51bbb..f3f09f5ac9 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -389,10 +389,15 @@ namespace llvm { /// and store operations as a result of memset, memcpy, and memmove /// lowering. It returns EVT::iAny if SelectionDAG should be responsible for /// determining it. - virtual - EVT getOptimalMemOpType(uint64_t Size, unsigned Align, - bool isSrcConst, bool isSrcStr, - SelectionDAG &DAG) const; + virtual EVT getOptimalMemOpType(uint64_t Size, unsigned Align, + bool isSrcConst, bool isSrcStr, + SelectionDAG &DAG) const; + + /// allowsUnalignedMemoryAccesses - Returns true if the target allows + /// unaligned memory accesses. of the specified type. + virtual bool allowsUnalignedMemoryAccesses(EVT VT) const { + return true; + } /// LowerOperation - Provide custom lowering hooks for some operations. /// diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 0174778a1d..605ed83eed 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -367,9 +367,10 @@ SDValue XCoreTargetLowering:: LowerLOAD(SDValue Op, SelectionDAG &DAG) { LoadSDNode *LD = cast<LoadSDNode>(Op); - assert(LD->getExtensionType() == ISD::NON_EXTLOAD && "Unexpected extension type"); + assert(LD->getExtensionType() == ISD::NON_EXTLOAD && + "Unexpected extension type"); assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); - if (allowsUnalignedMemoryAccesses()) { + if (allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { return SDValue(); } unsigned ABIAlignment = getTargetData()-> @@ -465,7 +466,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) StoreSDNode *ST = cast<StoreSDNode>(Op); assert(!ST->isTruncatingStore() && "Unexpected store type"); assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); - if (allowsUnalignedMemoryAccesses()) { + if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { return SDValue(); } unsigned ABIAlignment = getTargetData()-> @@ -1048,7 +1049,8 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, case ISD::STORE: { // Replace unaligned store of unaligned load with memmove. StoreSDNode *ST = cast<StoreSDNode>(N); - if (!DCI.isBeforeLegalize() || allowsUnalignedMemoryAccesses() || + if (!DCI.isBeforeLegalize() || + allowsUnalignedMemoryAccesses(ST->getMemoryVT()) || ST->isVolatile() || ST->isIndexed()) { break; } |