diff options
author | Alexander Kornienko <alexfh@google.com> | 2013-04-03 14:07:16 +0000 |
---|---|---|
committer | Alexander Kornienko <alexfh@google.com> | 2013-04-03 14:07:16 +0000 |
commit | e133bc868944822bf8961f825d3aa63d6fa48fb7 (patch) | |
tree | ebbd4a8040181471467a9737d90d94dc6b58b316 /lib/Target/Hexagon | |
parent | 647735c781c5b37061ee03d6e9e6c7dda92218e2 (diff) | |
parent | 080e3c523e87ec68ca1ea5db4cd49816028dd8bd (diff) |
Updating branches/google/stable to r178511stable
git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/google/stable@178655 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/Hexagon')
-rw-r--r-- | lib/Target/Hexagon/HexagonHardwareLoops.cpp | 3 | ||||
-rw-r--r-- | lib/Target/Hexagon/HexagonISelDAGToDAG.cpp | 40 | ||||
-rw-r--r-- | lib/Target/Hexagon/HexagonInstrInfo.cpp | 149 | ||||
-rw-r--r-- | lib/Target/Hexagon/HexagonInstrInfo.h | 7 | ||||
-rw-r--r-- | lib/Target/Hexagon/HexagonInstrInfo.td | 525 | ||||
-rw-r--r-- | lib/Target/Hexagon/HexagonInstrInfoV4.td | 1741 | ||||
-rw-r--r-- | lib/Target/Hexagon/HexagonMCInst.h | 41 | ||||
-rw-r--r-- | lib/Target/Hexagon/HexagonNewValueJump.cpp | 2 | ||||
-rw-r--r-- | lib/Target/Hexagon/HexagonRegisterInfo.cpp | 66 | ||||
-rw-r--r-- | lib/Target/Hexagon/HexagonSubtarget.cpp | 17 | ||||
-rw-r--r-- | lib/Target/Hexagon/HexagonTargetMachine.cpp | 49 | ||||
-rw-r--r-- | lib/Target/Hexagon/HexagonVLIWPacketizer.cpp | 177 | ||||
-rw-r--r-- | lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp | 1 |
13 files changed, 807 insertions, 2011 deletions
diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 62aed1353c..178662447a 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -701,7 +701,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, // If the induction variable bump is not a power of 2, quit. // Othwerise we'd need a general integer division. - if (!isPowerOf2_64(abs(IVBump))) + if (!isPowerOf2_64(abs64(IVBump))) return 0; MachineBasicBlock *PH = Loop->getLoopPreheader(); @@ -1430,7 +1430,6 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop( return 0; typedef MachineBasicBlock::instr_iterator instr_iterator; - typedef MachineBasicBlock::pred_iterator pred_iterator; // Verify that all existing predecessors have analyzable branches // (or no branches at all). diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 3a1c48bac9..8fc9ba1ee8 100644 --- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -113,6 +113,46 @@ public: SDNode *SelectAdd(SDNode *N); bool isConstExtProfitable(SDNode *N) const; +// XformMskToBitPosU5Imm - Returns the bit position which +// the single bit 32 bit mask represents. +// Used in Clr and Set bit immediate memops. +SDValue XformMskToBitPosU5Imm(uint32_t Imm) { + int32_t bitPos; + bitPos = Log2_32(Imm); + assert(bitPos >= 0 && bitPos < 32 && + "Constant out of range for 32 BitPos Memops"); + return CurDAG->getTargetConstant(bitPos, MVT::i32); +} + +// XformMskToBitPosU4Imm - Returns the bit position which the single bit 16 bit +// mask represents. Used in Clr and Set bit immediate memops. +SDValue XformMskToBitPosU4Imm(uint16_t Imm) { + return XformMskToBitPosU5Imm(Imm); +} + +// XformMskToBitPosU3Imm - Returns the bit position which the single bit 8 bit +// mask represents. Used in Clr and Set bit immediate memops. +SDValue XformMskToBitPosU3Imm(uint8_t Imm) { + return XformMskToBitPosU5Imm(Imm); +} + +// Return true if there is exactly one bit set in V, i.e., if V is one of the +// following integers: 2^0, 2^1, ..., 2^31. +bool ImmIsSingleBit(uint32_t v) const { + uint32_t c = CountPopulation_64(v); + // Only return true if we counted 1 bit. + return c == 1; +} + +// XformM5ToU5Imm - Return a target constant with the specified value, of type +// i32 where the negative literal is transformed into a positive literal for +// use in -= memops. +inline SDValue XformM5ToU5Imm(signed Imm) { + assert( (Imm >= -31 && Imm <= -1) && "Constant out of range for Memops"); + return CurDAG->getTargetConstant( - Imm, MVT::i32); +} + + // XformU7ToU7M1Imm - Return a target constant decremented by 1, in range // [1..128], used in cmpb.gtu instructions. inline SDValue XformU7ToU7M1Imm(signed Imm) { diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index 4c0f93c6cd..60b12ac01c 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -537,6 +537,15 @@ MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, return(0); } +MachineInstr* +HexagonInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, + int FrameIx, uint64_t Offset, + const MDNode *MDPtr, + DebugLoc DL) const { + MachineInstrBuilder MIB = BuildMI(MF, DL, get(Hexagon::DBG_VALUE)) + .addImm(0).addImm(Offset).addMetadata(MDPtr); + return &*MIB; +} unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const { @@ -1881,6 +1890,13 @@ bool HexagonInstrInfo::isPredicated(const MachineInstr *MI) const { return ((F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask); } +bool HexagonInstrInfo::isPredicatedNew(const MachineInstr *MI) const { + const uint64_t F = MI->getDesc().TSFlags; + + assert(isPredicated(MI)); + return ((F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask); +} + bool HexagonInstrInfo::DefinesPredicate(MachineInstr *MI, std::vector<MachineOperand> &Pred) const { @@ -1949,6 +1965,10 @@ isValidOffset(const int Opcode, const int Offset) const { // the given "Opcode". If "Offset" is not in the correct range, "ADD_ri" is // inserted to calculate the final address. Due to this reason, the function // assumes that the "Offset" has correct alignment. + // We used to assert if the offset was not properly aligned, however, + // there are cases where a misaligned pointer recast can cause this + // problem, and we need to allow for it. The front end warns of such + // misaligns with respect to load size. switch(Opcode) { @@ -1958,7 +1978,6 @@ isValidOffset(const int Opcode, const int Offset) const { case Hexagon::STriw_indexed: case Hexagon::STriw: case Hexagon::STriw_f: - assert((Offset % 4 == 0) && "Offset has incorrect alignment"); return (Offset >= Hexagon_MEMW_OFFSET_MIN) && (Offset <= Hexagon_MEMW_OFFSET_MAX); @@ -1968,14 +1987,12 @@ isValidOffset(const int Opcode, const int Offset) const { case Hexagon::STrid: case Hexagon::STrid_indexed: case Hexagon::STrid_f: - assert((Offset % 8 == 0) && "Offset has incorrect alignment"); return (Offset >= Hexagon_MEMD_OFFSET_MIN) && (Offset <= Hexagon_MEMD_OFFSET_MAX); case Hexagon::LDrih: case Hexagon::LDriuh: case Hexagon::STrih: - assert((Offset % 2 == 0) && "Offset has incorrect alignment"); return (Offset >= Hexagon_MEMH_OFFSET_MIN) && (Offset <= Hexagon_MEMH_OFFSET_MAX); @@ -1990,48 +2007,28 @@ isValidOffset(const int Opcode, const int Offset) const { return (Offset >= Hexagon_ADDI_OFFSET_MIN) && (Offset <= Hexagon_ADDI_OFFSET_MAX); - case Hexagon::MEMw_ADDi_indexed_MEM_V4 : - case Hexagon::MEMw_SUBi_indexed_MEM_V4 : - case Hexagon::MEMw_ADDr_indexed_MEM_V4 : - case Hexagon::MEMw_SUBr_indexed_MEM_V4 : - case Hexagon::MEMw_ANDr_indexed_MEM_V4 : - case Hexagon::MEMw_ORr_indexed_MEM_V4 : - case Hexagon::MEMw_ADDi_MEM_V4 : - case Hexagon::MEMw_SUBi_MEM_V4 : - case Hexagon::MEMw_ADDr_MEM_V4 : - case Hexagon::MEMw_SUBr_MEM_V4 : - case Hexagon::MEMw_ANDr_MEM_V4 : - case Hexagon::MEMw_ORr_MEM_V4 : - assert ((Offset % 4) == 0 && "MEMOPw offset is not aligned correctly." ); + case Hexagon::MemOPw_ADDi_V4 : + case Hexagon::MemOPw_SUBi_V4 : + case Hexagon::MemOPw_ADDr_V4 : + case Hexagon::MemOPw_SUBr_V4 : + case Hexagon::MemOPw_ANDr_V4 : + case Hexagon::MemOPw_ORr_V4 : return (0 <= Offset && Offset <= 255); - case Hexagon::MEMh_ADDi_indexed_MEM_V4 : - case Hexagon::MEMh_SUBi_indexed_MEM_V4 : - case Hexagon::MEMh_ADDr_indexed_MEM_V4 : - case Hexagon::MEMh_SUBr_indexed_MEM_V4 : - case Hexagon::MEMh_ANDr_indexed_MEM_V4 : - case Hexagon::MEMh_ORr_indexed_MEM_V4 : - case Hexagon::MEMh_ADDi_MEM_V4 : - case Hexagon::MEMh_SUBi_MEM_V4 : - case Hexagon::MEMh_ADDr_MEM_V4 : - case Hexagon::MEMh_SUBr_MEM_V4 : - case Hexagon::MEMh_ANDr_MEM_V4 : - case Hexagon::MEMh_ORr_MEM_V4 : - assert ((Offset % 2) == 0 && "MEMOPh offset is not aligned correctly." ); + case Hexagon::MemOPh_ADDi_V4 : + case Hexagon::MemOPh_SUBi_V4 : + case Hexagon::MemOPh_ADDr_V4 : + case Hexagon::MemOPh_SUBr_V4 : + case Hexagon::MemOPh_ANDr_V4 : + case Hexagon::MemOPh_ORr_V4 : return (0 <= Offset && Offset <= 127); - case Hexagon::MEMb_ADDi_indexed_MEM_V4 : - case Hexagon::MEMb_SUBi_indexed_MEM_V4 : - case Hexagon::MEMb_ADDr_indexed_MEM_V4 : - case Hexagon::MEMb_SUBr_indexed_MEM_V4 : - case Hexagon::MEMb_ANDr_indexed_MEM_V4 : - case Hexagon::MEMb_ORr_indexed_MEM_V4 : - case Hexagon::MEMb_ADDi_MEM_V4 : - case Hexagon::MEMb_SUBi_MEM_V4 : - case Hexagon::MEMb_ADDr_MEM_V4 : - case Hexagon::MEMb_SUBr_MEM_V4 : - case Hexagon::MEMb_ANDr_MEM_V4 : - case Hexagon::MEMb_ORr_MEM_V4 : + case Hexagon::MemOPb_ADDi_V4 : + case Hexagon::MemOPb_SUBi_V4 : + case Hexagon::MemOPb_ADDr_V4 : + case Hexagon::MemOPb_SUBr_V4 : + case Hexagon::MemOPb_ANDr_V4 : + case Hexagon::MemOPb_ORr_V4 : return (0 <= Offset && Offset <= 63); // LDri_pred and STriw_pred are pseudo operations, so it has to take offset of @@ -2087,44 +2084,33 @@ isMemOp(const MachineInstr *MI) const { switch (MI->getOpcode()) { default: return false; - case Hexagon::MEMw_ADDi_indexed_MEM_V4 : - case Hexagon::MEMw_SUBi_indexed_MEM_V4 : - case Hexagon::MEMw_ADDr_indexed_MEM_V4 : - case Hexagon::MEMw_SUBr_indexed_MEM_V4 : - case Hexagon::MEMw_ANDr_indexed_MEM_V4 : - case Hexagon::MEMw_ORr_indexed_MEM_V4 : - case Hexagon::MEMw_ADDi_MEM_V4 : - case Hexagon::MEMw_SUBi_MEM_V4 : - case Hexagon::MEMw_ADDr_MEM_V4 : - case Hexagon::MEMw_SUBr_MEM_V4 : - case Hexagon::MEMw_ANDr_MEM_V4 : - case Hexagon::MEMw_ORr_MEM_V4 : - case Hexagon::MEMh_ADDi_indexed_MEM_V4 : - case Hexagon::MEMh_SUBi_indexed_MEM_V4 : - case Hexagon::MEMh_ADDr_indexed_MEM_V4 : - case Hexagon::MEMh_SUBr_indexed_MEM_V4 : - case Hexagon::MEMh_ANDr_indexed_MEM_V4 : - case Hexagon::MEMh_ORr_indexed_MEM_V4 : - case Hexagon::MEMh_ADDi_MEM_V4 : - case Hexagon::MEMh_SUBi_MEM_V4 : - case Hexagon::MEMh_ADDr_MEM_V4 : - case Hexagon::MEMh_SUBr_MEM_V4 : - case Hexagon::MEMh_ANDr_MEM_V4 : - case Hexagon::MEMh_ORr_MEM_V4 : - case Hexagon::MEMb_ADDi_indexed_MEM_V4 : - case Hexagon::MEMb_SUBi_indexed_MEM_V4 : - case Hexagon::MEMb_ADDr_indexed_MEM_V4 : - case Hexagon::MEMb_SUBr_indexed_MEM_V4 : - case Hexagon::MEMb_ANDr_indexed_MEM_V4 : - case Hexagon::MEMb_ORr_indexed_MEM_V4 : - case Hexagon::MEMb_ADDi_MEM_V4 : - case Hexagon::MEMb_SUBi_MEM_V4 : - case Hexagon::MEMb_ADDr_MEM_V4 : - case Hexagon::MEMb_SUBr_MEM_V4 : - case Hexagon::MEMb_ANDr_MEM_V4 : - case Hexagon::MEMb_ORr_MEM_V4 : - return true; + case Hexagon::MemOPw_ADDi_V4 : + case Hexagon::MemOPw_SUBi_V4 : + case Hexagon::MemOPw_ADDr_V4 : + case Hexagon::MemOPw_SUBr_V4 : + case Hexagon::MemOPw_ANDr_V4 : + case Hexagon::MemOPw_ORr_V4 : + case Hexagon::MemOPh_ADDi_V4 : + case Hexagon::MemOPh_SUBi_V4 : + case Hexagon::MemOPh_ADDr_V4 : + case Hexagon::MemOPh_SUBr_V4 : + case Hexagon::MemOPh_ANDr_V4 : + case Hexagon::MemOPh_ORr_V4 : + case Hexagon::MemOPb_ADDi_V4 : + case Hexagon::MemOPb_SUBi_V4 : + case Hexagon::MemOPb_ADDr_V4 : + case Hexagon::MemOPb_SUBr_V4 : + case Hexagon::MemOPb_ANDr_V4 : + case Hexagon::MemOPb_ORr_V4 : + case Hexagon::MemOPb_SETBITi_V4: + case Hexagon::MemOPh_SETBITi_V4: + case Hexagon::MemOPw_SETBITi_V4: + case Hexagon::MemOPb_CLRBITi_V4: + case Hexagon::MemOPh_CLRBITi_V4: + case Hexagon::MemOPw_CLRBITi_V4: + return true; } + return false; } @@ -2383,6 +2369,13 @@ isConditionalStore (const MachineInstr* MI) const { } } +// Returns true, if any one of the operands is a dot new +// insn, whether it is predicated dot new or register dot new. +bool HexagonInstrInfo::isDotNewInst (const MachineInstr* MI) const { + return (isNewValueInst(MI) || + (isPredicated(MI) && isPredicatedNew(MI))); +} + unsigned HexagonInstrInfo::getAddrMode(const MachineInstr* MI) const { const uint64_t F = MI->getDesc().TSFlags; diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h index d2f059aa79..5df13a88b5 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.h +++ b/lib/Target/Hexagon/HexagonInstrInfo.h @@ -127,6 +127,7 @@ public: const BranchProbability &Probability) const; virtual bool isPredicated(const MachineInstr *MI) const; + virtual bool isPredicatedNew(const MachineInstr *MI) const; virtual bool DefinesPredicate(MachineInstr *MI, std::vector<MachineOperand> &Pred) const; virtual bool @@ -140,6 +141,11 @@ public: isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumCycles, const BranchProbability &Probability) const; + virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, + int FrameIx, + uint64_t Offset, + const MDNode *MDPtr, + DebugLoc DL) const; virtual DFAPacketizer* CreateTargetScheduleState(const TargetMachine *TM, const ScheduleDAG *DAG) const; @@ -170,6 +176,7 @@ public: bool isConditionalLoad (const MachineInstr* MI) const; bool isConditionalStore(const MachineInstr* MI) const; bool isNewValueInst(const MachineInstr* MI) const; + bool isDotNewInst(const MachineInstr* MI) const; bool isDeallocRet(const MachineInstr *MI) const; unsigned getInvertedPredicatedOpcode(const int Opc) const; bool isExtendable(const MachineInstr* MI) const; diff --git a/lib/Target/Hexagon/HexagonInstrInfo.td b/lib/Target/Hexagon/HexagonInstrInfo.td index d7bab200f9..74dc0ca72a 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.td +++ b/lib/Target/Hexagon/HexagonInstrInfo.td @@ -446,38 +446,58 @@ def MUX_ii : ALU32_ii<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Ext:$src2, s8ExtPred:$src2, s8ImmPred:$src3)))]>; -// Shift halfword. -let isPredicable = 1 in -def ASLH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = aslh($src1)", - [(set (i32 IntRegs:$dst), (shl 16, (i32 IntRegs:$src1)))]>; +// ALU32 - aslh, asrh, sxtb, sxth, zxtb, zxth +multiclass ALU32_2op_Pbase<string mnemonic, bit isNot, bit isPredNew> { + let isPredicatedNew = isPredNew in + def NAME : ALU32Inst<(outs IntRegs:$dst), + (ins PredRegs:$src1, IntRegs:$src2), + !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ", + ") $dst = ")#mnemonic#"($src2)">, + Requires<[HasV4T]>; +} -let isPredicable = 1 in -def ASRH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = asrh($src1)", - [(set (i32 IntRegs:$dst), (sra 16, (i32 IntRegs:$src1)))]>; +multiclass ALU32_2op_Pred<string mnemonic, bit PredNot> { + let isPredicatedFalse = PredNot in { + defm _c#NAME : ALU32_2op_Pbase<mnemonic, PredNot, 0>; + // Predicate new + defm _cdn#NAME : ALU32_2op_Pbase<mnemonic, PredNot, 1>; + } +} -// Sign extend. -let isPredicable = 1 in -def SXTB : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = sxtb($src1)", - [(set (i32 IntRegs:$dst), (sext_inreg (i32 IntRegs:$src1), i8))]>; +multiclass ALU32_2op_base<string mnemonic> { + let BaseOpcode = mnemonic in { + let isPredicable = 1, neverHasSideEffects = 1 in + def NAME : ALU32Inst<(outs IntRegs:$dst), + (ins IntRegs:$src1), + "$dst = "#mnemonic#"($src1)">; -let isPredicable = 1 in -def SXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = sxth($src1)", - [(set (i32 IntRegs:$dst), (sext_inreg (i32 IntRegs:$src1), i16))]>; - -// Zero extend. -let isPredicable = 1, neverHasSideEffects = 1 in -def ZXTB : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = zxtb($src1)", - []>; + let Predicates = [HasV4T], validSubTargets = HasV4SubT, isPredicated = 1, + neverHasSideEffects = 1 in { + defm Pt_V4 : ALU32_2op_Pred<mnemonic, 0>; + defm NotPt_V4 : ALU32_2op_Pred<mnemonic, 1>; + } + } +} + +defm ASLH : ALU32_2op_base<"aslh">, PredNewRel; +defm ASRH : ALU32_2op_base<"asrh">, PredNewRel; +defm SXTB : ALU32_2op_base<"sxtb">, PredNewRel; +defm SXTH : ALU32_2op_base<"sxth">, PredNewRel; +defm ZXTB : ALU32_2op_base<"zxtb">, PredNewRel; +defm ZXTH : ALU32_2op_base<"zxth">, PredNewRel; + +def : Pat <(shl (i32 IntRegs:$src1), (i32 16)), + (ASLH IntRegs:$src1)>; + +def : Pat <(sra (i32 IntRegs:$src1), (i32 16)), + (ASRH IntRegs:$src1)>; + +def : Pat <(sext_inreg (i32 IntRegs:$src1), i8), + (SXTB IntRegs:$src1)>; + +def : Pat <(sext_inreg (i32 IntRegs:$src1), i16), + (SXTH IntRegs:$src1)>; -let isPredicable = 1, neverHasSideEffects = 1 in -def ZXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = zxth($src1)", - []>; //===----------------------------------------------------------------------===// // ALU32/PERM - //===----------------------------------------------------------------------===// @@ -488,29 +508,30 @@ def ZXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), //===----------------------------------------------------------------------===// // Conditional combine. -let neverHasSideEffects = 1, isPredicated = 1 in +let neverHasSideEffects = 1, isPredicated = 1 in { def COMBINE_rr_cPt : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1) $dst = combine($src2, $src3)", []>; -let neverHasSideEffects = 1, isPredicated = 1 in +let isPredicatedFalse = 1 in def COMBINE_rr_cNotPt : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1) $dst = combine($src2, $src3)", []>; -let neverHasSideEffects = 1, isPredicated = 1 in +let isPredicatedNew = 1 in def COMBINE_rr_cdnPt : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1.new) $dst = combine($src2, $src3)", []>; -let neverHasSideEffects = 1, isPredicated = 1 in +let isPredicatedNew = 1, isPredicatedFalse = 1 in def COMBINE_rr_cdnNotPt : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1.new) $dst = combine($src2, $src3)", []>; +} // Compare. defm CMPGTU : CMP32_rr_ri_u9<"cmp.gtu", "CMPGTU", setugt>, ImmRegRel; @@ -1009,20 +1030,6 @@ def : Pat < (i64 (load (add IntRegs:$src1, s11_3ExtPred:$offset))), (LDrid_indexed IntRegs:$src1, s11_3ExtPred:$offset) >; } -let neverHasSideEffects = 1 in -def LDrid_GP : LDInst2<(outs DoubleRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst = memd(#$global+$offset)", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def LDd_GP : LDInst2<(outs DoubleRegs:$dst), - (ins globaladdress:$global), - "$dst = memd(#$global)", - []>, - Requires<[NoV4T]>; - //===----------------------------------------------------------------------===// // Post increment load // Make sure that in post increment load, the first operand is always the post @@ -1095,27 +1102,6 @@ let AddedComplexity = 20 in def : Pat < (i32 (extloadi8 (add IntRegs:$src1, s11_0ImmPred:$offset))), (i32 (LDrib_indexed IntRegs:$src1, s11_0ImmPred:$offset)) >; -let neverHasSideEffects = 1 in -def LDrib_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst = memb(#$global+$offset)", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def LDb_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst = memb(#$global)", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def LDub_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst = memub(#$global)", - []>, - Requires<[NoV4T]>; - def : Pat < (i32 (extloadi16 ADDRriS11_1:$addr)), (i32 (LDrih ADDRriS11_1:$addr))>; @@ -1123,27 +1109,6 @@ let AddedComplexity = 20 in def : Pat < (i32 (extloadi16 (add IntRegs:$src1, s11_1ImmPred:$offset))), (i32 (LDrih_indexed IntRegs:$src1, s11_1ImmPred:$offset)) >; -let neverHasSideEffects = 1 in -def LDrih_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst = memh(#$global+$offset)", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def LDh_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst = memh(#$global)", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def LDuh_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst = memuh(#$global)", - []>, - Requires<[NoV4T]>; - let AddedComplexity = 10 in def : Pat < (i32 (zextloadi1 ADDRriS11_0:$addr)), (i32 (LDriub ADDRriS11_0:$addr))>; @@ -1152,21 +1117,6 @@ let AddedComplexity = 20 in def : Pat < (i32 (zextloadi1 (add IntRegs:$src1, s11_0ImmPred:$offset))), (i32 (LDriub_indexed IntRegs:$src1, s11_0ImmPred:$offset))>; -let neverHasSideEffects = 1 in -def LDriub_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst = memub(#$global+$offset)", - []>, - Requires<[NoV4T]>; - -// Load unsigned halfword. -let neverHasSideEffects = 1 in -def LDriuh_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst = memuh(#$global+$offset)", - []>, - Requires<[NoV4T]>; - // Load predicate. let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13, isPseudo = 1, Defs = [R10,R11,D5], neverHasSideEffects = 1 in @@ -1175,21 +1125,6 @@ def LDriw_pred : LDInst2<(outs PredRegs:$dst), "Error; should not emit", []>; -// Indexed load. -let neverHasSideEffects = 1 in -def LDriw_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst = memw(#$global+$offset)", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def LDw_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst = memw(#$global)", - []>, - Requires<[NoV4T]>; - // Deallocate stack frame. let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in { def DEALLOCFRAME : LDInst2<(outs), (ins), @@ -1423,28 +1358,8 @@ def SUBri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1, // ST + //===----------------------------------------------------------------------===// /// -/// Assumptions::: ****** DO NOT IGNORE ******** -/// 1. Make sure that in post increment store, the zero'th operand is always the -/// post increment operand. -/// 2. Make sure that the store value operand(Rt/Rtt) in a store is always the -/// last operand. -/// // Store doubleword. -let neverHasSideEffects = 1 in -def STrid_GP : STInst2<(outs), - (ins globaladdress:$global, u16Imm:$offset, DoubleRegs:$src), - "memd(#$global+$offset) = $src", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def STd_GP : STInst2<(outs), - (ins globaladdress:$global, DoubleRegs:$src), - "memd(#$global) = $src", - []>, - Requires<[NoV4T]>; - //===----------------------------------------------------------------------===// // Post increment store //===----------------------------------------------------------------------===// @@ -1655,36 +1570,6 @@ def : Pat<(store (i64 DoubleRegs:$src1), (add IntRegs:$src2, (i64 DoubleRegs:$src1))>; } -// memb(gp+#u16:0)=Rt -let neverHasSideEffects = 1 in -def STrib_GP : STInst2<(outs), - (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), - "memb(#$global+$offset) = $src", - []>, - Requires<[NoV4T]>; - -// memb(#global)=Rt -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def STb_GP : STInst2<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memb(#$global) = $src", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1 in -def STrih_GP : STInst2<(outs), - (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), - "memh(#$global+$offset) = $src", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def STh_GP : STInst2<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memh(#$global) = $src", - []>, - Requires<[NoV4T]>; - // memh(Rx++#s4:1)=Rt.H // Store word. @@ -1695,20 +1580,6 @@ def STriw_pred : STInst2<(outs), "Error; should not emit", []>; -let neverHasSideEffects = 1 in -def STriw_GP : STInst2<(outs), - (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), - "memw(#$global+$offset) = $src", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def STw_GP : STInst2<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memw(#$global) = $src", - []>, - Requires<[NoV4T]>; - // Allocate stack frame. let Defs = [R29, R30], Uses = [R31, R30], neverHasSideEffects = 1 in { def ALLOCFRAME : STInst2<(outs), @@ -2183,68 +2054,26 @@ def : Pat<(HexagonTCRet (i32 IntRegs:$dst)), // Atomic load and store support // 8 bit atomic load -def : Pat<(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)), - (i32 (LDub_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_load_8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (i32 (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - def : Pat<(atomic_load_8 ADDRriS11_0:$src1), (i32 (LDriub ADDRriS11_0:$src1))>; def : Pat<(atomic_load_8 (add (i32 IntRegs:$src1), s11_0ImmPred:$offset)), (i32 (LDriub_indexed (i32 IntRegs:$src1), s11_0ImmPred:$offset))>; - - // 16 bit atomic load -def : Pat<(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)), - (i32 (LDuh_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_load_16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (i32 (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - def : Pat<(atomic_load_16 ADDRriS11_1:$src1), (i32 (LDriuh ADDRriS11_1:$src1))>; def : Pat<(atomic_load_16 (add (i32 IntRegs:$src1), s11_1ImmPred:$offset)), (i32 (LDriuh_indexed (i32 IntRegs:$src1), s11_1ImmPred:$offset))>; - - -// 32 bit atomic load -def : Pat<(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)), - (i32 (LDw_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_load_32 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (i32 (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - def : Pat<(atomic_load_32 ADDRriS11_2:$src1), (i32 (LDriw ADDRriS11_2:$src1))>; def : Pat<(atomic_load_32 (add (i32 IntRegs:$src1), s11_2ImmPred:$offset)), (i32 (LDriw_indexed (i32 IntRegs:$src1), s11_2ImmPred:$offset))>; - // 64 bit atomic load -def : Pat<(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)), - (i64 (LDd_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_load_64 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (i64 (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - def : Pat<(atomic_load_64 ADDRriS11_3:$src1), (i64 (LDrid ADDRriS11_3:$src1))>; @@ -2252,30 +2081,6 @@ def : Pat<(atomic_load_64 (add (i32 IntRegs:$src1), s11_3ImmPred:$offset)), (i64 (LDrid_indexed (i32 IntRegs:$src1), s11_3ImmPred:$offset))>; -// 64 bit atomic store -def : Pat<(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global), - (i64 DoubleRegs:$src1)), - (STd_GP tglobaladdr:$global, (i64 DoubleRegs:$src1))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_store_64 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset), - (i64 DoubleRegs:$src1)), - (STrid_GP tglobaladdr:$global, u16ImmPred:$offset, - (i64 DoubleRegs:$src1))>, Requires<[NoV4T]>; - -// 8 bit atomic store -def : Pat<(atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global), - (i32 IntRegs:$src1)), - (STb_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_store_8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset), - (i32 IntRegs:$src1)), - (STrib_GP tglobaladdr:$global, u16ImmPred:$offset, - (i32 IntRegs:$src1))>, Requires<[NoV4T]>; - def : Pat<(atomic_store_8 ADDRriS11_0:$src2, (i32 IntRegs:$src1)), (STrib ADDRriS11_0:$src2, (i32 IntRegs:$src1))>; @@ -2285,18 +2090,6 @@ def : Pat<(atomic_store_8 (add (i32 IntRegs:$src2), s11_0ImmPred:$offset), (i32 IntRegs:$src1))>; -// 16 bit atomic store -def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global), - (i32 IntRegs:$src1)), - (STh_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_store_16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset), - (i32 IntRegs:$src1)), - (STrih_GP tglobaladdr:$global, u16ImmPred:$offset, - (i32 IntRegs:$src1))>, Requires<[NoV4T]>; - def : Pat<(atomic_store_16 ADDRriS11_1:$src2, (i32 IntRegs:$src1)), (STrih ADDRriS11_1:$src2, (i32 IntRegs:$src1))>; @@ -2305,20 +2098,6 @@ def : Pat<(atomic_store_16 (i32 IntRegs:$src1), (STrih_indexed (i32 IntRegs:$src2), s11_1ImmPred:$offset, (i32 IntRegs:$src1))>; - -// 32 bit atomic store -def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global), - (i32 IntRegs:$src1)), - (STw_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_store_32 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset), - (i32 IntRegs:$src1)), - (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, - (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - def : Pat<(atomic_store_32 ADDRriS11_2:$src2, (i32 IntRegs:$src1)), (STriw ADDRriS11_2:$src2, (i32 IntRegs:$src1))>; @@ -2387,198 +2166,8 @@ def : Pat <(brcond (not PredRegs:$src1), bb:$offset), def : Pat <(and PredRegs:$src1, (not PredRegs:$src2)), (i1 (AND_pnotp (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>; -// Map from store(globaladdress + x) -> memd(#foo + x). -let AddedComplexity = 100 in -def : Pat <(store (i64 DoubleRegs:$src1), - (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (STrid_GP tglobaladdr:$global, u16ImmPred:$offset, - (i64 DoubleRegs:$src1))>, Requires<[NoV4T]>; - -// Map from store(globaladdress) -> memd(#foo). -let AddedComplexity = 100 in -def : Pat <(store (i64 DoubleRegs:$src1), - (HexagonCONST32_GP tglobaladdr:$global)), - (STd_GP tglobaladdr:$global, (i64 DoubleRegs:$src1))>, - Requires<[NoV4T]>; - -// Map from store(globaladdress + x) -> memw(#foo + x). -let AddedComplexity = 100 in -def : Pat <(store (i32 IntRegs:$src1), - (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -// Map fro |