diff options
author | Sirish Pande <spande@codeaurora.org> | 2012-04-16 17:05:06 +0000 |
---|---|---|
committer | Sirish Pande <spande@codeaurora.org> | 2012-04-16 17:05:06 +0000 |
commit | 87eb92d913c2e3cdeb08b0a22250cd6c3214a3ff (patch) | |
tree | 91864143b0763c9812c345e6e1c694dc29e4a333 /lib/Target/Hexagon | |
parent | 57ca13ecc4bde825fce2987132a078c4b6b68ed5 (diff) |
Hexagon V5 (Floating Point) Support.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@154829 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/Hexagon')
19 files changed, 3365 insertions, 1468 deletions
diff --git a/lib/Target/Hexagon/HexagonCallingConv.td b/lib/Target/Hexagon/HexagonCallingConv.td index bd9608bdb0..e61b2a7a58 100644 --- a/lib/Target/Hexagon/HexagonCallingConv.td +++ b/lib/Target/Hexagon/HexagonCallingConv.td @@ -17,8 +17,8 @@ // Hexagon 32-bit C return-value convention. def RetCC_Hexagon32 : CallingConv<[ - CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>, - CCIfType<[i64], CCAssignToReg<[D0, D1, D2]>>, + CCIfType<[i32, f32], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>, + CCIfType<[i64, f64], CCAssignToReg<[D0, D1, D2]>>, // Alternatively, they are assigned to the stack in 4-byte aligned units. CCAssignToStack<4, 4> @@ -27,8 +27,8 @@ def RetCC_Hexagon32 : CallingConv<[ // Hexagon 32-bit C Calling convention. def CC_Hexagon32 : CallingConv<[ // All arguments get passed in integer registers if there is space. - CCIfType<[i32, i16, i8], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>, - CCIfType<[i64], CCAssignToReg<[D0, D1, D2]>>, + CCIfType<[f32, i32, i16, i8], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>, + CCIfType<[f64, i64], CCAssignToReg<[D0, D1, D2]>>, // Alternatively, they are assigned to the stack in 4-byte aligned units. CCAssignToStack<4, 4> diff --git a/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp b/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp index 2100474460..a96da9a3ce 100644 --- a/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp +++ b/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp @@ -7,9 +7,9 @@ // //===----------------------------------------------------------------------===// // The Hexagon processor has no instructions that load or store predicate -// registers directly. So, when these registers must be spilled a general -// purpose register must be found and the value copied to/from it from/to -// the predicate register. This code currently does not use the register +// registers directly. So, when these registers must be spilled a general +// purpose register must be found and the value copied to/from it from/to +// the predicate register. This code currently does not use the register // scavenger mechanism available in the allocator. There are two registers // reserved to allow spilling/restoring predicate registers. One is used to // hold the predicate value. The other is used when stack frame offsets are @@ -84,7 +84,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) { int SrcReg = MI->getOperand(2).getReg(); assert(Hexagon::PredRegsRegClass.contains(SrcReg) && "Not a predicate register"); - if (!TII->isValidOffset(Hexagon::STriw, Offset)) { + if (!TII->isValidOffset(Hexagon::STriw_indexed, Offset)) { if (!TII->isValidOffset(Hexagon::ADD_ri, Offset)) { BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::CONST32_Int_Real), @@ -95,7 +95,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) { BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd), HEXAGON_RESERVED_REG_2).addReg(SrcReg); BuildMI(*MBB, MII, MI->getDebugLoc(), - TII->get(Hexagon::STriw)) + TII->get(Hexagon::STriw_indexed)) .addReg(HEXAGON_RESERVED_REG_1) .addImm(0).addReg(HEXAGON_RESERVED_REG_2); } else { @@ -103,7 +103,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) { HEXAGON_RESERVED_REG_1).addReg(FP).addImm(Offset); BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd), HEXAGON_RESERVED_REG_2).addReg(SrcReg); - BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw)) + BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw_indexed)) .addReg(HEXAGON_RESERVED_REG_1) .addImm(0) .addReg(HEXAGON_RESERVED_REG_2); @@ -111,7 +111,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) { } else { BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd), HEXAGON_RESERVED_REG_2).addReg(SrcReg); - BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw)). + BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw_indexed)). addReg(FP).addImm(Offset).addReg(HEXAGON_RESERVED_REG_2); } MII = MBB->erase(MI); diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 9df965efc1..5b9512ffbb 100644 --- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -90,7 +90,9 @@ public: SDNode *SelectMul(SDNode *N); SDNode *SelectZeroExtend(SDNode *N); SDNode *SelectIntrinsicWOChain(SDNode *N); + SDNode *SelectIntrinsicWChain(SDNode *N); SDNode *SelectConstant(SDNode *N); + SDNode *SelectConstantFP(SDNode *N); SDNode *SelectAdd(SDNode *N); // Include the pieces autogenerated from the target description. @@ -318,6 +320,8 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl) { else if (LoadedVT == MVT::i32) Opcode = Hexagon::LDriw_indexed; else if (LoadedVT == MVT::i16) Opcode = Hexagon::LDrih_indexed; else if (LoadedVT == MVT::i8) Opcode = Hexagon::LDrib_indexed; + else if (LoadedVT == MVT::f32) Opcode = Hexagon::LDriw_indexed_f; + else if (LoadedVT == MVT::f64) Opcode = Hexagon::LDrid_indexed_f; else assert (0 && "unknown memory type"); // Build indexed load. @@ -375,7 +379,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD, }; ReplaceUses(Froms, Tos, 3); return Result_2; - } + } SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32); SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32); SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32, @@ -636,7 +640,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) { // Figure out the opcode. if (StoredVT == MVT::i64) Opcode = Hexagon::STrid; - else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw; + else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw_indexed; else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih; else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib; else assert (0 && "unknown memory type"); @@ -693,6 +697,8 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST, else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw_indexed; else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih_indexed; else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib_indexed; + else if (StoredVT == MVT::f32) Opcode = Hexagon::STriw_indexed_f; + else if (StoredVT == MVT::f64) Opcode = Hexagon::STrid_indexed_f; else assert (0 && "unknown memory type"); SDValue Ops[] = {SDValue(NewBase,0), @@ -723,7 +729,7 @@ SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) { if (AM != ISD::UNINDEXED) { return SelectIndexedStore(ST, dl); } - + return SelectBaseOffsetStore(ST, dl); } @@ -752,7 +758,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) { if (MulOp0.getOpcode() == ISD::SIGN_EXTEND) { SDValue Sext0 = MulOp0.getOperand(0); if (Sext0.getNode()->getValueType(0) != MVT::i32) { - SelectCode(N); + return SelectCode(N); } OP0 = Sext0; @@ -761,7 +767,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) { if (LD->getMemoryVT() != MVT::i32 || LD->getExtensionType() != ISD::SEXTLOAD || LD->getAddressingMode() != ISD::UNINDEXED) { - SelectCode(N); + return SelectCode(N); } SDValue Chain = LD->getChain(); @@ -1158,6 +1164,25 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) { return SelectCode(N); } +// +// Map floating point constant values. +// +SDNode *HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) { + DebugLoc dl = N->getDebugLoc(); + ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N); + APFloat APF = CN->getValueAPF(); + if (N->getValueType(0) == MVT::f32) { + return CurDAG->getMachineNode(Hexagon::TFRI_f, dl, MVT::f32, + CurDAG->getTargetConstantFP(APF.convertToFloat(), MVT::f32)); + } + else if (N->getValueType(0) == MVT::f64) { + return CurDAG->getMachineNode(Hexagon::CONST64_Float_Real, dl, MVT::f64, + CurDAG->getTargetConstantFP(APF.convertToDouble(), MVT::f64)); + } + + return SelectCode(N); +} + // // Map predicate true (encoded as -1 in LLVM) to a XOR. @@ -1234,6 +1259,9 @@ SDNode *HexagonDAGToDAGISel::Select(SDNode *N) { case ISD::Constant: return SelectConstant(N); + case ISD::ConstantFP: + return SelectConstantFP(N); + case ISD::ADD: return SelectAdd(N); diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index d6da0d0911..9639dafa09 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -101,12 +101,12 @@ CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT, State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo)); return false; } - if (LocVT == MVT::i32) { + if (LocVT == MVT::i32 || LocVT == MVT::f32) { ofst = State.AllocateStack(4, 4); State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo)); return false; } - if (LocVT == MVT::i64) { + if (LocVT == MVT::i64 || LocVT == MVT::f64) { ofst = State.AllocateStack(8, 8); State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo)); return false; @@ -140,12 +140,12 @@ CC_Hexagon (unsigned ValNo, MVT ValVT, LocInfo = CCValAssign::AExt; } - if (LocVT == MVT::i32) { + if (LocVT == MVT::i32 || LocVT == MVT::f32) { if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State)) return false; } - if (LocVT == MVT::i64) { + if (LocVT == MVT::i64 || LocVT == MVT::f64) { if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State)) return false; } @@ -215,12 +215,12 @@ static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT, LocInfo = CCValAssign::AExt; } - if (LocVT == MVT::i32) { + if (LocVT == MVT::i32 || LocVT == MVT::f32) { if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State)) return false; } - if (LocVT == MVT::i64) { + if (LocVT == MVT::i64 || LocVT == MVT::f64) { if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State)) return false; } @@ -232,7 +232,7 @@ static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) { - if (LocVT == MVT::i32) { + if (LocVT == MVT::i32 || LocVT == MVT::f32) { if (unsigned Reg = State.AllocateReg(Hexagon::R0)) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; @@ -247,7 +247,7 @@ static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT, static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) { - if (LocVT == MVT::i64) { + if (LocVT == MVT::i64 || LocVT == MVT::f64) { if (unsigned Reg = State.AllocateReg(Hexagon::D0)) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; @@ -837,12 +837,13 @@ const { // 1. int, long long, ptr args that get allocated in register. // 2. Large struct that gets an register to put its address in. EVT RegVT = VA.getLocVT(); - if (RegVT == MVT::i8 || RegVT == MVT::i16 || RegVT == MVT::i32) { + if (RegVT == MVT::i8 || RegVT == MVT::i16 || + RegVT == MVT::i32 || RegVT == MVT::f32) { unsigned VReg = RegInfo.createVirtualRegister(Hexagon::IntRegsRegisterClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); - } else if (RegVT == MVT::i64) { + } else if (RegVT == MVT::i64 || RegVT == MVT::f64) { unsigned VReg = RegInfo.createVirtualRegister(Hexagon::DoubleRegsRegisterClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); @@ -916,14 +917,34 @@ HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { SDValue HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + SDValue CC = Op.getOperand(4); + SDValue TrueVal = Op.getOperand(2); + SDValue FalseVal = Op.getOperand(3); + DebugLoc dl = Op.getDebugLoc(); SDNode* OpNode = Op.getNode(); + EVT SVT = OpNode->getValueType(0); - SDValue Cond = DAG.getNode(ISD::SETCC, Op.getDebugLoc(), MVT::i1, - Op.getOperand(2), Op.getOperand(3), - Op.getOperand(4)); - return DAG.getNode(ISD::SELECT, Op.getDebugLoc(), OpNode->getValueType(0), - Cond, Op.getOperand(0), - Op.getOperand(1)); + SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i1, LHS, RHS, CC); + return DAG.getNode(ISD::SELECT, dl, SVT, Cond, TrueVal, FalseVal); +} + +SDValue +HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { + EVT ValTy = Op.getValueType(); + + DebugLoc dl = Op.getDebugLoc(); + ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); + SDValue Res; + if (CP->isMachineConstantPoolEntry()) + Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), ValTy, + CP->getAlignment()); + else + Res = DAG.getTargetConstantPool(CP->getConstVal(), ValTy, + CP->getAlignment()); + return DAG.getNode(HexagonISD::CONST32, dl, ValTy, Res); } SDValue @@ -1008,8 +1029,16 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine : TargetLowering(targetmachine, new HexagonTargetObjectFile()), TM(targetmachine) { + const HexagonRegisterInfo* QRI = TM.getRegisterInfo(); + // Set up the register classes. addRegisterClass(MVT::i32, Hexagon::IntRegsRegisterClass); + + if (QRI->Subtarget.hasV5TOps()) { + addRegisterClass(MVT::f32, Hexagon::IntRegsRegisterClass); + addRegisterClass(MVT::f64, Hexagon::DoubleRegsRegisterClass); + } + addRegisterClass(MVT::i64, Hexagon::DoubleRegsRegisterClass); addRegisterClass(MVT::i1, Hexagon::PredRegsRegisterClass); @@ -1026,32 +1055,16 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine // // Library calls for unsupported operations // - setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2"); - setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf"); setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf"); setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf"); - setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf"); - setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf"); - setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf"); - setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf"); - setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi"); - setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi"); setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti"); - - setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi"); - setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi"); setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti"); - setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf"); - setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi"); setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti"); - setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi"); setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti"); - setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2"); - setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3"); setOperationAction(ISD::SDIV, MVT::i32, Expand); setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3"); @@ -1080,92 +1093,184 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3"); setOperationAction(ISD::FDIV, MVT::f64, Expand); - setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2"); - setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand); + setOperationAction(ISD::FSQRT, MVT::f32, Expand); + setOperationAction(ISD::FSQRT, MVT::f64, Expand); + setOperationAction(ISD::FSIN, MVT::f32, Expand); + setOperationAction(ISD::FSIN, MVT::f64, Expand); + + if (QRI->Subtarget.hasV5TOps()) { + // Hexagon V5 Support. + setOperationAction(ISD::FADD, MVT::f32, Legal); + setOperationAction(ISD::FADD, MVT::f64, Legal); + setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal); + setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal); + setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal); + setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal); + setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal); + + setCondCodeAction(ISD::SETOGE, MVT::f32, Legal); + setCondCodeAction(ISD::SETOGE, MVT::f64, Legal); + setCondCodeAction(ISD::SETUGE, MVT::f32, Legal); + setCondCodeAction(ISD::SETUGE, MVT::f64, Legal); + + setCondCodeAction(ISD::SETOGT, MVT::f32, Legal); + setCondCodeAction(ISD::SETOGT, MVT::f64, Legal); + setCondCodeAction(ISD::SETUGT, MVT::f32, Legal); + setCondCodeAction(ISD::SETUGT, MVT::f64, Legal); + + setCondCodeAction(ISD::SETOLE, MVT::f32, Legal); + setCondCodeAction(ISD::SETOLE, MVT::f64, Legal); + setCondCodeAction(ISD::SETOLT, MVT::f32, Legal); + setCondCodeAction(ISD::SETOLT, MVT::f64, Legal); + + setOperationAction(ISD::ConstantFP, MVT::f32, Legal); + setOperationAction(ISD::ConstantFP, MVT::f64, Legal); + + setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote); + setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote); + setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); + setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); + + setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote); + setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote); + setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote); + setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote); + + setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); + setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); + setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); + setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); + + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); + setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); + setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); + setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); + + setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal); + setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal); + setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal); + setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal); + + setOperationAction(ISD::FABS, MVT::f32, Legal); + setOperationAction(ISD::FABS, MVT::f64, Expand); + + setOperationAction(ISD::FNEG, MVT::f32, Legal); + setOperationAction(ISD::FNEG, MVT::f64, Expand); + } else { - setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf"); - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); + // Expand fp<->uint. + setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand); + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); - setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3"); - setOperationAction(ISD::FADD, MVT::f64, Expand); + setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); + setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); - setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3"); - setOperationAction(ISD::FADD, MVT::f32, Expand); + setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf"); + setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf"); - setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3"); - setOperationAction(ISD::FADD, MVT::f32, Expand); + setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf"); + setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf"); - setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2"); - setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); + setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf"); + setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf"); - setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi"); - setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand); + setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf"); + setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf"); - setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi"); - setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand); + setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi"); + setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi"); - setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf"); - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); + setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi"); + setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi"); - setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2"); - setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); + setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi"); + setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi"); - setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2"); - setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); + setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3"); + setOperationAction(ISD::FADD, MVT::f64, Expand); - setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2"); - setCondCodeAction(ISD::SETOGT, MVT::f32, Expand); + setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3"); + setOperationAction(ISD::FADD, MVT::f32, Expand); - setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2"); - setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); + setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2"); + setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand); - setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2"); - setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); + setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2"); + setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); - setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2"); - setCondCodeAction(ISD::SETOLT, MVT::f64, Expand); + setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2"); + setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); - setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2"); - setCondCodeAction(ISD::SETOLT, MVT::f32, Expand); + setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2"); + setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); - setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3"); - setOperationAction(ISD::SREM, MVT::i32, Expand); + setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2"); + setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); + + setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2"); + setCondCodeAction(ISD::SETOGT, MVT::f32, Expand); + + setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2"); + setCondCodeAction(ISD::SETOGT, MVT::f64, Expand); + + setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi"); + setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand); + + setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi"); + setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand); - setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3"); - setOperationAction(ISD::FMUL, MVT::f64, Expand); + setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2"); + setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); - setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3"); - setOperationAction(ISD::MUL, MVT::f32, Expand); + setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2"); + setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); - setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2"); - setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); + setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2"); + setCondCodeAction(ISD::SETOLT, MVT::f64, Expand); - setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2"); + setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2"); + setCondCodeAction(ISD::SETOLT, MVT::f32, Expand); + setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3"); + setOperationAction(ISD::FMUL, MVT::f64, Expand); - setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3"); - setOperationAction(ISD::SUB, MVT::f64, Expand); + setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3"); + setOperationAction(ISD::MUL, MVT::f32, Expand); - setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3"); - setOperationAction(ISD::SUB, MVT::f32, Expand); + setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2"); + setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); - setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2"); - setOperationAction(ISD::FP_ROUND, MVT::f64, Expand); + setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2"); - setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2"); - setCondCodeAction(ISD::SETUO, MVT::f64, Expand); + setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3"); + setOperationAction(ISD::SUB, MVT::f64, Expand); - setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2"); - setCondCodeAction(ISD::SETO, MVT::f64, Expand); + setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3"); + setOperationAction(ISD::SUB, MVT::f32, Expand); - setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2"); - setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); + setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2"); + setOperationAction(ISD::FP_ROUND, MVT::f64, Expand); - setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2"); - setCondCodeAction(ISD::SETO, MVT::f32, Expand); + setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2"); + setCondCodeAction(ISD::SETUO, MVT::f64, Expand); - setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2"); - setCondCodeAction(ISD::SETUO, MVT::f32, Expand); + setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2"); + setCondCodeAction(ISD::SETO, MVT::f64, Expand); + + setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2"); + setCondCodeAction(ISD::SETO, MVT::f32, Expand); + + setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2"); + setCondCodeAction(ISD::SETUO, MVT::f32, Expand); + + setOperationAction(ISD::FABS, MVT::f32, Expand); + setOperationAction(ISD::FABS, MVT::f64, Expand); + setOperationAction(ISD::FNEG, MVT::f32, Expand); + setOperationAction(ISD::FNEG, MVT::f64, Expand); + } + + setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3"); + setOperationAction(ISD::SREM, MVT::i32, Expand); setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal); setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal); @@ -1206,20 +1311,33 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine setOperationAction(ISD::BSWAP, MVT::i64, Expand); - // Expand fp<->uint. - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); - - // Hexagon has no select or setcc: expand to SELECT_CC. - setOperationAction(ISD::SELECT, MVT::f32, Expand); - setOperationAction(ISD::SELECT, MVT::f64, Expand); - // Lower SELECT_CC to SETCC and SELECT. setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); - // This is a workaround documented in DAGCombiner.cpp:2892 We don't - // support SELECT_CC on every type. - setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); + + if (QRI->Subtarget.hasV5TOps()) { + + // We need to make the operation type of SELECT node to be Custom, + // such that we don't go into the infinite loop of + // select -> setcc -> select_cc -> select loop. + setOperationAction(ISD::SELECT, MVT::f32, Custom); + setOperationAction(ISD::SELECT, MVT::f64, Custom); + + setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); + setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); + setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); + + } else { + + // Hexagon has no select or setcc: expand to SELECT_CC. + setOperationAction(ISD::SELECT, MVT::f32, Expand); + setOperationAction(ISD::SELECT, MVT::f64, Expand); + + // This is a workaround documented in DAGCombiner.cpp:2892 We don't + // support SELECT_CC on every type. + setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); + + } setOperationAction(ISD::BR_CC, MVT::Other, Expand); setOperationAction(ISD::BRIND, MVT::Other, Expand); @@ -1305,22 +1423,22 @@ const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { default: return 0; - case HexagonISD::CONST32: return "HexagonISD::CONST32"; + case HexagonISD::CONST32: return "HexagonISD::CONST32"; case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC"; - case HexagonISD::CMPICC: return "HexagonISD::CMPICC"; - case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC"; - case HexagonISD::BRICC: return "HexagonISD::BRICC"; - case HexagonISD::BRFCC: return "HexagonISD::BRFCC"; - case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC"; - case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC"; - case HexagonISD::Hi: return "HexagonISD::Hi"; - case HexagonISD::Lo: return "HexagonISD::Lo"; - case HexagonISD::FTOI: return "HexagonISD::FTOI"; - case HexagonISD::ITOF: return "HexagonISD::ITOF"; - case HexagonISD::CALL: return "HexagonISD::CALL"; - case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG"; - case HexagonISD::BR_JT: return "HexagonISD::BR_JT"; - case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN"; + case HexagonISD::CMPICC: return "HexagonISD::CMPICC"; + case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC"; + case HexagonISD::BRICC: return "HexagonISD::BRICC"; + case HexagonISD::BRFCC: return "HexagonISD::BRFCC"; + case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC"; + case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC"; + case HexagonISD::Hi: return "HexagonISD::Hi"; + case HexagonISD::Lo: return "HexagonISD::Lo"; + case HexagonISD::FTOI: return "HexagonISD::FTOI"; + case HexagonISD::ITOF: return "HexagonISD::ITOF"; + case HexagonISD::CALL: return "HexagonISD::CALL"; + case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG"; + case HexagonISD::BR_JT: return "HexagonISD::BR_JT"; + case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN"; } } @@ -1345,9 +1463,10 @@ SDValue HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { default: llvm_unreachable("Should not custom lower this!"); + case ISD::ConstantPool: return LowerConstantPool(Op, DAG); // Frame & Return address. Currently unimplemented. - case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); - case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); + case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); + case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); case ISD::GlobalTLSAddress: llvm_unreachable("TLS not implemented for Hexagon."); case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG); @@ -1357,9 +1476,10 @@ HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::BR_JT: return LowerBR_JT(Op, DAG); case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); - case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); + case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); + case ISD::SELECT: return Op; case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); - case ISD::INLINEASM: return LowerINLINEASM(Op, DAG); + case ISD::INLINEASM: return LowerINLINEASM(Op, DAG); } } @@ -1402,8 +1522,10 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(const case MVT::i32: case MVT::i16: case MVT::i8: + case MVT::f32: return std::make_pair(0U, Hexagon::IntRegsRegisterClass); case MVT::i64: + case MVT::f64: return std::make_pair(0U, Hexagon::DoubleRegsRegisterClass); } default: @@ -1414,6 +1536,14 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(const return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); } +/// isFPImmLegal - Returns true if the target can instruction select the +/// specified FP immediate natively. If false, the legalizer will +/// materialize the FP immediate as a load from a constant pool. +bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { + const HexagonRegisterInfo* QRI = TM.getRegisterInfo(); + return QRI->Subtarget.hasV5TOps(); +} + /// isLegalAddressingMode - Return true if the addressing mode represented by /// AM is legal for this target, for a load/store of the specified type. bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM, diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h index 4208bcb2fd..b7e5055998 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.h +++ b/lib/Target/Hexagon/HexagonISelLowering.h @@ -27,6 +27,7 @@ namespace llvm { CONST32, CONST32_GP, // For marking data present in GP. + FCONST32, SETCC, ADJDYNALLOC, ARGEXTEND, @@ -48,6 +49,7 @@ namespace llvm { BR_JT, // Jump table. BARRIER, // Memory barrier. WrapperJT, + WrapperCP, TC_RETURN }; } @@ -128,6 +130,7 @@ namespace llvm { MachineBasicBlock *BB) const; SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; virtual EVT getSetCCResultType(EVT VT) const { return MVT::i1; } @@ -150,6 +153,7 @@ namespace llvm { /// mode is legal for a load/store of any legal type. /// TODO: Handle pre/postinc as well. virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const; + virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; /// isLegalICmpImmediate - Return true if the specified immediate is legal /// icmp immediate, that is the target has icmp instructions which can diff --git a/lib/Target/Hexagon/HexagonInstrFormats.td b/lib/Target/Hexagon/HexagonInstrFormats.td index 48f0f01bb4..fc6c763cdb 100644 --- a/lib/Target/Hexagon/HexagonInstrFormats.td +++ b/lib/Target/Hexagon/HexagonInstrFormats.td @@ -166,7 +166,7 @@ class MInst<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of the instruction class NOT CHANGED. // Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4. class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern, - string cstr> + string cstr> : InstHexagon<outs, ins, asmstr, pattern, cstr, M, TypeXTYPE> { bits<5> rd; bits<5> rs; @@ -189,7 +189,7 @@ class SInst<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of the instruction class NOT CHANGED. // Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4. class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern, - string cstr> + string cstr> : InstHexagon<outs, ins, asmstr, pattern, cstr, S, TypeXTYPE> { // : InstHexagon<outs, ins, asmstr, pattern, cstr, S> { // : InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T, XTYPE_V4, S)> { diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index 1dfdff0e34..9640cad285 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -34,23 +34,23 @@ using namespace llvm; /// Constants for Hexagon instructions. /// const int Hexagon_MEMW_OFFSET_MAX = 4095; -const int Hexagon_MEMW_OFFSET_MIN = 4096; +const int Hexagon_MEMW_OFFSET_MIN = -4096; const int Hexagon_MEMD_OFFSET_MAX = 8191; -const int Hexagon_MEMD_OFFSET_MIN = 8192; +const int Hexagon_MEMD_OFFSET_MIN = -8192; const int Hexagon_MEMH_OFFSET_MAX = 2047; -const int Hexagon_MEMH_OFFSET_MIN = 2048; +const int Hexagon_MEMH_OFFSET_MIN = -2048; const int Hexagon_MEMB_OFFSET_MAX = 1023; -const int Hexagon_MEMB_OFFSET_MIN = 1024; +const int Hexagon_MEMB_OFFSET_MIN = -1024; const int Hexagon_ADDI_OFFSET_MAX = 32767; -const int Hexagon_ADDI_OFFSET_MIN = 32768; +const int Hexagon_ADDI_OFFSET_MIN = -32768; const int Hexagon_MEMD_AUTOINC_MAX = 56; -const int Hexagon_MEMD_AUTOINC_MIN = 64; +const int Hexagon_MEMD_AUTOINC_MIN = -64; const int Hexagon_MEMW_AUTOINC_MAX = 28; -const int Hexagon_MEMW_AUTOINC_MIN = 32; +const int Hexagon_MEMW_AUTOINC_MIN = -32; const int Hexagon_MEMH_AUTOINC_MAX = 14; -const int Hexagon_MEMH_AUTOINC_MIN = 16; +const int Hexagon_MEMH_AUTOINC_MIN = -16; const int Hexagon_MEMB_AUTOINC_MAX = 7; -const int Hexagon_MEMB_AUTOINC_MIN = 8; +const int Hexagon_MEMB_AUTOINC_MIN = -8; @@ -415,7 +415,6 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align); - if (RC == Hexagon::IntRegsRegisterClass) { BuildMI(MBB, I, DL, get(Hexagon::LDriw), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO); @@ -454,9 +453,9 @@ unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const { const TargetRegisterClass *TRC; if (VT == MVT::i1) { TRC = Hexagon::PredRegsRegisterClass; - } else if (VT == MVT::i32) { + } else if (VT == MVT::i32 || VT == MVT::f32) { TRC = Hexagon::IntRegsRegisterClass; - } else if (VT == MVT::i64) { + } else if (VT == MVT::i64 || VT == MVT::f64) { TRC = Hexagon::DoubleRegsRegisterClass; } else { llvm_unreachable("Cannot handle this register class"); @@ -727,6 +726,12 @@ bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const { // TFR_FI case Hexagon::TFR_FI_immext_V4: + + // TFRI_F + case Hexagon::TFRI_f: + case Hexagon::TFRI_cPt_f: + case Hexagon::TFRI_cNotPt_f: + case Hexagon::CONST64_Float_Real: return true; default: @@ -2059,9 +2064,6 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const { case Hexagon::LDriub: return !invertPredicate ? Hexagon::LDriub_cPt : Hexagon::LDriub_cNotPt; - case Hexagon::LDriubit: - return !invertPredicate ? Hexagon::LDriub_cPt : - Hexagon::LDriub_cNotPt; // Load Indexed. case Hexagon::LDrid_indexed: return !invertPredicate ? Hexagon::LDrid_indexed_cPt : @@ -2254,13 +2256,17 @@ isValidOffset(const int Opcode, const int Offset) const { switch(Opcode) { case Hexagon::LDriw: + case Hexagon::LDriw_f: case Hexagon::STriw: + case Hexagon::STriw_f: assert((Offset % 4 == 0) && "Offset has incorrect alignment"); return (Offset >= Hexagon_MEMW_OFFSET_MIN) && (Offset <= Hexagon_MEMW_OFFSET_MAX); case Hexagon::LDrid: + case Hexagon::LDrid_f: case Hexagon::STrid: + case Hexagon::STrid_f: assert((Offset % 8 == 0) && "Offset has incorrect alignment"); return (Offset >= Hexagon_MEMD_OFFSET_MIN) && (Offset <= Hexagon_MEMD_OFFSET_MAX); @@ -2268,7 +2274,6 @@ isValidOffset(const int Opcode, const int Offset) const { case Hexagon::LDrih: case Hexagon::LDriuh: case Hexagon::STrih: - case Hexagon::LDrih_ae: assert((Offset % 2 == 0) && "Offset has incorrect alignment"); return (Offset >= Hexagon_MEMH_OFFSET_MIN) && (Offset <= Hexagon_MEMH_OFFSET_MAX); @@ -2276,9 +2281,6 @@ isValidOffset(const int Opcode, const int Offset) const { case Hexagon::LDrib: case Hexagon::STrib: case Hexagon::LDriub: - case Hexagon::LDriubit: - case Hexagon::LDrib_ae: - case Hexagon::LDriub_ae: return (Offset >= Hexagon_MEMB_OFFSET_MIN) && (Offset <= Hexagon_MEMB_OFFSET_MAX); diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h index 1f2c6cbfa5..9682c05ea1 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.h +++ b/lib/Target/Hexagon/HexagonInstrInfo.h @@ -107,6 +107,8 @@ public: unsigned createVR(MachineFunction* MF, MVT VT) const; + virtual bool isExtendable(const MachineInstr* MI) const; + virtual bool isExtended(const MachineInstr* MI) const; virtual bool isPredicable(MachineInstr *MI) const; virtual bool PredicateInstruction(MachineInstr *MI, @@ -136,6 +138,10 @@ public: isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumCycles, const BranchProbability &Probability) const; + unsigned getInvertedPredicatedOpcode(const int Opcode) const; + unsigned getImmExtForm(const MachineInstr* MI) const; + unsigned getNormalBranchForm(const MachineInstr* MI) const; + virtual DFAPacketizer* CreateTargetScheduleState(const TargetMachine *TM, const ScheduleDAG *DAG) const; @@ -160,21 +166,16 @@ public: bool isS8_Immediate(const int value) const; bool isS6_Immediate(const int value) const; - bool isSaveCalleeSavedRegsCall(const MachineInstr* MI) const; bool isConditionalTransfer(const MachineInstr* MI) const; - bool isConditionalALU32 (const MachineInstr* MI) const; - bool isConditionalLoad (const MachineInstr* MI) const; + bool isConditionalALU32(const MachineInstr* MI) const; + bool isConditionalLoad(const MachineInstr* MI) const; bool isConditionalStore(const MachineInstr* MI) const; bool isDeallocRet(const MachineInstr *MI) const; - unsigned getInvertedPredicatedOpcode(const int Opc) const; - bool isExtendable(const MachineInstr* MI) const; - bool isExtended(const MachineInstr* MI) const; - bool isPostIncrement(const MachineInstr* MI) const; - bool isNewValueStore(const MachineInstr* MI) const; - bool isNewValueJump(const MachineInstr* MI) const; bool isNewValueJumpCandidate(const MachineInstr *MI) const; - unsigned getImmExtForm(const MachineInstr* MI) const; - unsigned getNormalBranchForm(const MachineInstr* MI) const; + bool isNewValueJump(const MachineInstr* MI) const; + bool isNewValueStore(const MachineInstr* MI) const; + bool isPostIncrement(const MachineInstr* MI) const; + bool isSaveCalleeSavedRegsCall(const MachineInstr* MI) const; private: int getMatchingCondBranchOpcode(int Opc, bool sense) const; diff --git a/lib/Target/Hexagon/HexagonInstrInfo.td b/lib/Target/Hexagon/HexagonInstrInfo.td index 80390dc966..9a98d4848d 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.td +++ b/lib/Target/Hexagon/HexagonInstrInfo.td @@ -25,7 +25,10 @@ def HasV3TOnly : Predicate<"Subtarget.hasV3TOpsOnly()">; def NoV3T : Predicate<"!Subtarget.hasV3TOps()">; def HasV4T : Predicate<"Subtarget.hasV4TOps()">; def NoV4T : Predicate<"!Subtarget.hasV4TOps()">; +def HasV5T : Predicate<"Subtarget.hasV5TOps()">; +def NoV5T : Predicate<"!Subtarget.hasV5TOps()">; def UseMEMOP : Predicate<"Subtarget.useMemOps()">; +def IEEERndNearV5T : Predicate<"Subtarget.modeIEEERndNear()">; // Addressing modes. def ADDRrr : ComplexPattern<i32, 2, "SelectADDRrr", [], []>; @@ -84,10 +87,12 @@ def symbolLo32 : Operand<i32> { multiclass ALU32_rr_ri<string OpcStr, SDNode OpNode> { def rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), - [(set IntRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>; + [(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$b), + (i32 IntRegs:$c)))]>; def ri : ALU32_ri<(outs IntRegs:$dst), (ins s10Imm:$b, IntRegs:$c), !strconcat("$dst = ", !strconcat(OpcStr, "(#$b, $c)")), - [(set IntRegs:$dst, (OpNode s10Imm:$b, IntRegs:$c))]>; + [(set (i32 IntRegs:$dst), (OpNode s10Imm:$b, + (i32 IntRegs:$c)))]>; } // Multi-class for compare ops. @@ -95,42 +100,50 @@ let isCompare = 1 in { multiclass CMP64_rr<string OpcStr, PatFrag OpNode> { def rr : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$b, DoubleRegs:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), - [(set PredRegs:$dst, (OpNode DoubleRegs:$b, DoubleRegs:$c))]>; + [(set (i1 PredRegs:$dst), + (OpNode (i64 DoubleRegs:$b), (i64 DoubleRegs:$c)))]>; } multiclass CMP32_rr<string OpcStr, PatFrag OpNode> { def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>; + [(set (i1 PredRegs:$dst), + (OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>; } multiclass CMP32_rr_ri_s10<string OpcStr, PatFrag OpNode> { def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>; + [(set (i1 PredRegs:$dst), + (OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>; def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s10Imm:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, s10ImmPred:$c))]>; + [(set (i1 PredRegs:$dst), + (OpNode (i32 IntRegs:$b), s10ImmPred:$c))]>; } multiclass CMP32_rr_ri_u9<string OpcStr, PatFrag OpNode> { def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>; + [(set (i1 PredRegs:$dst), + (OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>; def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u9Imm:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, u9ImmPred:$c))]>; + [(set (i1 PredRegs:$dst), + (OpNode (i32 IntRegs:$b), u9ImmPred:$c))]>; } multiclass CMP32_ri_u8<string OpcStr, PatFrag OpNode> { def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u8Imm:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, u8ImmPred:$c))]>; + [(set (i1 PredRegs:$dst), (OpNode (i32 IntRegs:$b), + u8ImmPred:$c))]>; } multiclass CMP32_ri_s8<string OpcStr, PatFrag OpNode> { def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s8Imm:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, s8ImmPred:$c))]>; + [(set (i1 PredRegs:$dst), (OpNode (i32 IntRegs:$b), + s8ImmPred:$c))]>; } } @@ -150,56 +163,63 @@ multiclass CMP32_ri_s8<string OpcStr, PatFrag OpNode> { // ALU32/ALU + //===----------------------------------------------------------------------===// // Add. -let isPredicable = 1 in +let isCommutable = 1, isPredicable = 1 in def ADD_rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = add($src1, $src2)", - [(set IntRegs:$dst, (add IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; let isPredicable = 1 in def ADD_ri : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1, s16Imm:$src2), "$dst = add($src1, #$src2)", - [(set IntRegs:$dst, (add IntRegs:$src1, s16ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1), + s16ImmPred:$src2))]>; // Logical operations. let isPredicable = 1 in def XOR_rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = xor($src1, $src2)", - [(set IntRegs:$dst, (xor IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (xor (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; -let isPredicable = 1 in +let isCommutable = 1, isPredicable = 1 in def AND_rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = and($src1, $src2)", - [(set IntRegs:$dst, (and IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; def OR_ri : ALU32_ri<(outs IntRegs:$dst), - (ins IntRegs:$src1, s8Imm:$src2), + (ins IntRegs:$src1, s10Imm:$src2), "$dst = or($src1, #$src2)", - [(set IntRegs:$dst, (or IntRegs:$src1, s8ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1), + s10ImmPred:$src2))]>; def NOT_rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = not($src1)", - [(set IntRegs:$dst, (not IntRegs:$src1))]>; + [(set (i32 IntRegs:$dst), (not (i32 IntRegs:$src1)))]>; def AND_ri : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1, s10Imm:$src2), "$dst = and($src1, #$src2)", - [(set IntRegs:$dst, (and IntRegs:$src1, s10ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1), + s10ImmPred:$src2))]>; -let isPredicable = 1 in +let isCommutable = 1, isPredicable = 1 in def OR_rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = or($src1, $src2)", - [(set IntRegs:$dst, (or IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; // Negate. def NEG : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = neg($src1)", - [(set IntRegs:$dst, (ineg IntRegs:$src1))]>; + [(set (i32 IntRegs:$dst), (ineg (i32 IntRegs:$src1)))]>; // Nop. let neverHasSideEffects = 1 in def NOP : ALU32_rr<(outs), (ins), @@ -211,13 +231,20 @@ let isPredicable = 1 in def SUB_rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = sub($src1, $src2)", - [(set IntRegs:$dst, (sub IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (sub (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; + +// Rd32=sub(#s10,Rs32) +def SUB_ri : ALU32_ri<(outs IntRegs:$dst), + (ins s10Imm:$src1, IntRegs:$src2), + "$dst = sub(#$src1, $src2)", + [(set IntRegs:$dst, (sub s10ImmPred:$src1, IntRegs:$src2))]>; // Transfer immediate. -let isReMaterializable = 1, isPredicable = 1 in +let isMoveImm = 1, isReMaterializable = 1, isPredicable = 1 in def TFRI : ALU32_ri<(outs IntRegs:$dst), (ins s16Imm:$src1), "$dst = #$src1", - [(set IntRegs:$dst, s16ImmPred:$src1)]>; + [(set (i32 IntRegs:$dst), s16ImmPred:$src1)]>; // Transfer register. let neverHasSideEffects = 1, isPredicable = 1 in @@ -225,6 +252,12 @@ def TFR : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = $src1", []>; +let neverHasSideEffects = 1, isPredicable = 1 in +def TFR64 : ALU32_ri<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), + "$dst = $src1", + []>; + + // Transfer control register. let neverHasSideEffects = 1 in def TFCR : CRInst<(outs CRRegs:$dst), (ins IntRegs:$src1), @@ -262,48 +295,52 @@ def VMUX_prr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, def MUX_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst = mux($src1, $src2, $src3)", - [(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2, - IntRegs:$src3))]>; + [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1), + (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))]>; def MUX_ir : ALU32_ir<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Imm:$src2, IntRegs:$src3), "$dst = mux($src1, #$src2, $src3)", - [(set IntRegs:$dst, (select PredRegs:$src1, - s8ImmPred:$src2, IntRegs:$src3))]>; + [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1), + s8ImmPred:$src2, + (i32 IntRegs:$src3))))]>; def MUX_ri : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3), "$dst = mux($src1, $src2, #$src3)", - [(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2, - s8ImmPred:$src3))]>; + [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1), + (i32 IntRegs:$src2), + s8ImmPred:$src3)))]>; def MUX_ii : ALU32_ii<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Imm:$src2, s8Imm:$src3), "$dst = mux($src1, #$src2, #$src3)", - [(set IntRegs:$dst, (select PredRegs:$src1, s8ImmPred:$src2, - s8ImmPred:$src3))]>; + [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1), + s8ImmPred:$src2, + s8ImmPred:$src3)))]>; // Shift halfword. let isPredicable = 1 in def ASLH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = aslh($src1)", - [(set IntRegs:$dst, (shl 16, IntRegs:$src1))]>; + [(set (i32 IntRegs:$dst), (shl 16, (i32 IntRegs:$src1)))]>; let isPredicable = 1 in def ASRH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = asrh($src1)", - [(set IntRegs:$dst, (sra 16, IntRegs:$src1))]>; + [(set (i32 IntRegs:$dst), (sra 16, (i32 IntRegs:$src1)))]>; // Sign extend. let isPredicable = 1 in def SXTB : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = sxtb($src1)", - [(set IntRegs:$dst, (sext_inreg IntRegs:$src1, i8))]>; + [(set (i32 IntRegs:$dst), (sext_inreg (i32 IntRegs:$src1), i8))]>; let isPredicable = 1 in def SXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = sxth($src1)", - [(set IntRegs:$dst, (sext_inreg IntRegs:$src1, i16))]>; + [(set (i32 IntRegs:$dst), (sext_inreg (i32 IntRegs:$src1), i16))]>; // Zero extend. let isPredicable = 1, neverHasSideEffects = 1 in @@ -503,7 +540,6 @@ def SUB_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst), // Conditional transfer. - let neverHasSideEffects = 1, isPredicated = 1 in def TFR_cPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2), "if ($src1) $dst = $src2", @@ -515,6 +551,19 @@ def TFR_cNotPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, "if (!$src1) $dst = $src2", []>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def TFR64_cPt : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, + DoubleRegs:$src2), + "if ($src1) $dst = $src2", + []>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def TFR64_cNotPt : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, + DoubleRegs:$src2), + "if (!$src1) $dst = $src2", + []>; + let neverHasSideEffects = 1, isPredicated = 1 in def TFRI_cPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, s12Imm:$src2), "if ($src1) $dst = #$src2", @@ -582,8 +631,8 @@ defm CMPGEU : CMP32_ri_u8<"cmp.geu", setuge>; def ADD64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = add($src1, $src2)", - [(set DoubleRegs:$dst, (add DoubleRegs:$src1, - DoubleRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (add (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2)))]>; // Add halfword. @@ -596,40 +645,43 @@ defm CMPGTU64 : CMP64_rr<"cmp.gtu", setugt>; def AND_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = and($src1, $src2)", - [(set DoubleRegs:$dst, (and DoubleRegs:$src1, - DoubleRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (and (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2)))]>; def OR_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = or($src1, $src2)", - [(set DoubleRegs:$dst, (or DoubleRegs:$src1, DoubleRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (or (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2)))]>; def XOR_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = xor($src1, $src2)", - [(set DoubleRegs:$dst, (xor DoubleRegs:$src1, - DoubleRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (xor (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2)))]>; // Maximum. def MAXw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = max($src2, $src1)", - [(set IntRegs:$dst, (select (i1 (setlt IntRegs:$src2, - IntRegs:$src1)), - IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), + (i32 (select (i1 (setlt (i32 IntRegs:$src2), + (i32 IntRegs:$src1))), + (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>; // Minimum. def MINw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = min($src2, $src1)", - [(set IntRegs:$dst, (select (i1 (setgt IntRegs:$src2, - IntRegs:$src1)), - IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), + (i32 (select (i1 (setgt (i32 IntRegs:$src2), + (i32 IntRegs:$src1))), + (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>; // Subtract. def SUB64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = sub($src1, $src2)", - [(set DoubleRegs:$dst, (sub DoubleRegs:$src1, - DoubleRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (sub (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2)))]>; // Subtract halfword. @@ -694,7 +746,8 @@ def TFR_64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), // Logical operations on predicates. def AND_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2), "$dst = and($src1, $src2)", - [(set PredRegs:$dst, (and PredRegs:$src1, PredRegs:$src2))]>; + [(set (i1 PredRegs:$dst), (and (i1 PredRegs:$src1), + (i1 PredRegs:$src2)))]>; let neverHasSideEffects = 1 in def AND_pnotp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, @@ -733,15 +786,17 @@ def MASK_p : SInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1), def NOT_p : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1), "$dst = not($src1)", - [(set PredRegs:$dst, (not PredRegs:$src1))]>; + [(set (i1 PredRegs:$dst), (not (i1 PredRegs:$src1)))]>; def OR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2), "$dst = or($src1, $src2)", - [(set PredRegs:$dst, (or PredRegs:$src1, PredRegs:$src2))]>; + [(set (i1 PredRegs:$dst), (or (i1 PredRegs:$src1), + (i1 PredRegs:$src2)))]>; def XOR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2), "$dst = xor($src1, $src2)", - [(set PredRegs:$dst, (xor PredRegs:$src1, PredRegs:$src2))]>; + [(set (i1 PredRegs:$dst), (xor (i1 PredRegs:$src1), + (i1 PredRegs:$src2)))]>; // User control register transfer. @@ -767,7 +822,7 @@ let isBranch = 1, isTerminator=1, Defs = [PC], def JMP_c : JInst< (outs), (ins PredRegs:$src, brtarget:$offset), "if ($src) jump $offset", - [(brcond PredRegs:$src, bb:$offset)]>; + [(brcond (i1 PredRegs:$src), bb:$offset)]>; } // if (!p0) jump @@ -833,7 +888,7 @@ def retflag : SDNode<"HexagonISD::RET_FLAG", SDTNone, [SDNPHasChain, SDNPOptInGlue]>; // Jump to address from register. -let isReturn = 1, isTerminator = 1, isBarrier = 1, +let isPredicable =1, isReturn = 1, isTerminator = 1, isBarrier = 1, Defs = [PC], Uses = [R31] in { def JMPR: JRInst<(outs), (ins), "jumpr r31", @@ -841,7 +896,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, } // Jump to address from register. -let isReturn = 1, isTerminator = 1, isBarrier = 1, +let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicated = 1, Defs = [PC], Uses = [R31] in { def JMPR_cPt: JRInst<(outs), (ins PredRegs:$src1), "if ($src1) jumpr r31", @@ -849,7 +904,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, } // Jump to address from register. -let isReturn = 1, isTerminator = 1, isBarrier = 1, +let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicated = 1, Defs = [PC], Uses = [R31] in { def JMPR_cNotPt: JRInst<(outs), (ins PredRegs:$src1), "if (!$src1) jumpr r31", @@ -872,26 +927,29 @@ let isPredicable = 1 in def LDrid : LDInst<(outs DoubleRegs:$dst), (ins MEMri:$addr), "$dst = memd($addr)", - [(set DoubleRegs:$dst, (load ADDRriS11_3:$addr))]>; + [(set (i64 DoubleRegs:$dst), (i64 (load ADDRriS11_3:$addr)))]>; let isPredicable = 1, AddedComplexity = 20 in def LDrid_indexed : LDInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, s11_3Imm:$offset), - "$dst=memd($src1+#$offset)", - [(set DoubleRegs:$dst, (load (add IntRegs:$src1, - s11_3ImmPred:$offset)))]>; + "$dst = memd($src1+#$offset)", + [(set (i64 DoubleRegs:$dst), + (i64 (load (add (i32 IntRegs:$src1), + s11_3ImmPred:$offset))))]>; let mayLoad = 1, neverHasSideEffects = 1 in def LDrid_GP : LDInst<(outs DoubleRegs:$dst), (ins globaladdress:$global, u16Imm:$offset), - "$dst=memd(#$global+$offset)", - []>; + "$dst = memd(#$global+$offset)", + []>, + Requires<[NoV4T]>; let mayLoad = 1, neverHasSideEffects = 1 in def LDd_GP : LDInst<(outs DoubleRegs:$dst), (ins globaladdress:$global), - "$dst=memd(#$global)", - []>; + "$dst = memd(#$global)", + []>, + Requires<[NoV4T]>; let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in def POST_LDrid : LDInstPI<(outs DoubleRegs:$dst, IntRegs:$dst2), @@ -901,67 +959,67 @@ def POST_LDrid : LDInstPI<(outs DoubleRegs:$dst, IntRegs:$dst2), "$src1 = $dst2">; // Load doubleword conditionally. -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrid_cPt : LDInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1) $dst = memd($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrid_cNotPt : LDInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1) $dst = memd($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrid_indexed_cPt : LDInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3), - "if ($src1) $dst=memd($src2+#$src3)", + "if ($src1) $dst = memd($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrid_indexed_cNotPt : LDInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3), - "if (!$src1) $dst=memd($src2+#$src3)", + "if (!$src1) $dst = memd($src2+#$src3)", []>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in def POST_LDrid_cPt : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3), "if ($src1) $dst1 = memd($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in def POST_LDrid_cNotPt : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3), "if (!$src1) $dst1 = memd($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrid_cdnPt : LDInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1.new) $dst = memd($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrid_cdnNotPt : LDInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1.new) $dst = memd($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrid_indexed_cdnPt : LDInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3), - "if ($src1.new) $dst=memd($src2+#$src3)", + "if ($src1.new) $dst = memd($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrid_indexed_cdnNotPt : LDInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3), - "if (!$src1.new) $dst=memd($src2+#$src3)", + "if (!$src1.new) $dst = memd($src2+#$src3)", []>; @@ -970,47 +1028,46 @@ let isPredicable = 1 in def LDrib : LDInst<(outs IntRegs:$dst), (ins MEMri:$addr), "$dst = memb($addr)", - [(set IntRegs:$dst, (sextloadi8 ADDRriS11_0:$addr))]>; + [(set (i32 IntRegs:$dst), (i32 (sextloadi8 ADDRriS11_0:$addr)))]>; -def LDrib_ae : LDInst<(outs IntRegs:$dst), - (ins MEMri:$addr), - "$dst = memb($addr)", - [(set IntRegs:$dst, (extloadi8 ADDRriS11_0:$addr))]>; +// Load byte any-extend +def : Pat < (i32 (extloadi8 ADDRriS11_0:$addr)), + (i32 (LDrib ADDRriS11_0:$addr)) >; // Indexed load byte. let isPredicable = 1, AddedComplexity = 20 in def LDrib_indexed : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s11_0Imm:$offset), - "$dst=memb($src1+#$offset)", - [(set IntRegs:$dst, (sextloadi8 (add IntRegs:$src1, - s11_0ImmPred:$offset)))]>; - + "$dst = memb($src1+#$offset)", + [(set (i32 IntRegs:$dst), + (i32 (sextloadi8 (add (i32 IntRegs:$src1), + s11_0ImmPred:$offset))))]>; // Indexed load byte any-extend. let AddedComplexity = 20 in -def LDrib_ae_indexed : LDInst<(outs IntRegs:$dst), - (ins IntRegs:$src1, s11_0Imm:$offset), - "$dst=memb($src1+#$offset)", - [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1, - s11_0ImmPred:$offset)))]>; +def : Pat < (i32 (extloadi8 (add IntRegs:$src1, s11_0ImmPred:$offset))), + (i32 (LDrib_indexed IntRegs:$src1, s11_0ImmPred:$offset)) >; let mayLoad = 1, neverHasSideEffects = 1 in def LDrib_GP : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global, u16Imm:$offset), - "$dst=memb(#$global+$offset)", - []>; + "$dst = memb(#$global+$offset)", + []>, + Requires<[NoV4T]>; let mayLoad = 1, neverHasSideEffects = 1 in def LDb_GP : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst=memb(#$global)", - []>; + "$dst = memb(#$global)", + []>, + Requires<[NoV4T]>; let mayLoad = 1, neverHasSideEffects = 1 in def LDub_GP : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst=memub(#$global)", - []>; + "$dst = memub(#$global)", + []>, + Requires<[NoV4T]>; let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in def POST_LDrib : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), @@ -1020,63 +1077,63 @@ def POST_LDrib : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), "$src1 = $dst2">; // Load byte conditionally. -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrib_cPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1) $dst = memb($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrib_cNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1) $dst = memb($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrib_indexed_cPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if ($src1) $dst = memb($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrib_indexed_cNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if (!$src1) $dst = memb($src2+#$src3)", []>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in def POST_LDrib_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3), "if ($src1) $dst1 = memb($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in def POST_LDrib_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3), "if (!$src1) $dst1 = memb($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrib_cdnPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1.new) $dst = memb($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrib_cdnNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1.new) $dst = memb($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrib_indexed_cdnPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if ($src1.new) $dst = memb($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrib_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if (!$src1.new) $dst = memb($src2+#$src3)", @@ -1088,45 +1145,43 @@ let isPredicable = 1 in def LDrih : LDInst<(outs IntRegs:$dst), (ins MEMri:$addr), "$dst = memh($addr)", - [(set IntRegs:$dst, (sextloadi16 ADDRriS11_1:$addr))]>; + [(set (i32 IntRegs:$dst), (i32 (sextloadi16 ADDRriS11_1:$addr)))]>; let isPredicable = 1, AddedComplexity = 20 in def LDrih_indexed : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s11_1Imm:$offset), - "$dst=memh($src1+#$offset)", - [(set IntRegs:$dst, (sextloadi16 (add IntRegs:$src1, - s11_1ImmPred:$offset)))] >; + "$dst = memh($src1+#$offset)", + [(set (i32 IntRegs:$dst), + (i32 (sextloadi16 (add (i32 IntRegs:$src1), + s11_1ImmPred:$offset))))] >; -def LDrih_ae : LDInst<(outs IntRegs:$dst), - (ins MEMri:$addr), - "$dst = memh($addr)", - [(set IntRegs:$dst, (extloadi16 ADDRriS11_1:$addr))]>; +def : Pat < (i32 (extloadi16 ADDRriS11_1:$addr)), + (i32 (LDrih ADDRriS11_1:$addr))>; let AddedComplexity = 20 in -def LDrih_ae_indexed : LDInst<(outs IntRegs:$dst), - (ins IntRegs:$src1, s11_1Imm:$offset), - "$dst=memh($src1+#$offset)", - [(set IntRegs:$dst, (extloadi16 (add IntRegs:$src1, - s11_1ImmPred:$offset)))] >; +def : Pat < (i32 (extloadi16 (add IntRegs:$src1, s11_1ImmPred:$offset))), + (i32 (LDrih_indexed IntRegs:$src1, s11_1ImmPred:$offset)) >; let mayLoad = 1, neverHasSideEffects = 1 in def LDrih_GP : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global, u16Imm:$offset), - "$dst=memh(#$global+$offset)", - []>; + "$dst = memh(#$global+$offset)", + []>, + Requires<[NoV4T]>; let mayLoad = 1, neverHasSideEffects = 1 in def LDh_GP : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst=memh(#$global)", - []>; + "$dst = memh(#$global)", + []>, + Requires<[NoV4T]>; let mayLoad = 1, neverHasSideEffects = 1 in def LDuh_GP : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst=memuh(#$global)", - []>; - + "$dst = memuh(#$global)", + []>, + Requires<[NoV4T]>; let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in def POST_LDrih : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), @@ -1136,63 +1191,63 @@ def POST_LDrih : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), "$src1 = $dst2">; // Load halfword conditionally. -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrih_cPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1) $dst = memh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrih_cNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1) $dst = memh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrih_indexed_cPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if ($src1) $dst = memh($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrih_indexed_cNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if (!$src1) $dst = memh($src2+#$src3)", []>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in def POST_LDrih_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3), "if ($src1) $dst1 = memh($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in def POST_LDrih_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3), "if (!$src1) $dst1 = memh($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrih_cdnPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1.new) $dst = memh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrih_cdnNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1.new) $dst = memh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrih_indexed_cdnPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if ($src1.new) $dst = memh($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDrih_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if (!$src1.new) $dst = memh($src2+#$src3)", @@ -1203,46 +1258,29 @@ let isPredicable = 1 in def LDriub : LDInst<(outs IntRegs:$dst), (ins MEMri:$addr), "$dst = memub($addr)", - [(set IntRegs:$dst, (zextloadi8 ADDRriS11_0:$addr))]>; + [(set (i32 IntRegs:$dst), (i32 (zextloadi8 ADDRriS11_0:$addr)))]>; -let isPredicable = 1 in -def LDriubit : LDInst<(outs IntRegs:$dst), - (ins MEMri:$addr), - "$dst = memub($addr)", - [(set IntRegs:$dst, (zextloadi1 ADDRriS11_0:$addr))]>; +def : Pat < (i32 (zextloadi1 ADDRriS11_0:$addr)), + (i32 (LDriub ADDRriS11_0:$addr))>; let isPredicable = 1, AddedComplexity = 20 in def LDriub_indexed : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s11_0Imm:$offset), - "$dst=memub($src1+#$offset)", - [(set IntRegs:$dst, (zextloadi8 (add IntRegs:$src1, - s11_0ImmPred:$offset)))]>; + "$dst = memub($src1+#$offset)", + [(set (i32 IntRegs:$dst), + (i32 (zextloadi8 (add (i32 IntRegs:$src1), + s11_0ImmPred:$offset))))]>; let AddedComplexity = 20 in -def LDriubit_indexed : LDInst<(outs IntRegs:$dst), - (ins IntRegs:$src1, s11_0Imm:$offset), - "$dst=memub($src1+#$offset)", - [(set IntRegs:$dst, (zextloadi1 (add IntRegs:$src1, - s11_0ImmPred:$offset)))]>; - -def LDriub_ae : LDInst<(outs IntRegs:$dst), - (ins MEMri:$addr), - "$dst = memub($addr)", - [(set IntRegs:$dst, (extloadi8 ADDRriS11_0:$addr))]>; - - -let AddedComplexity = 20 in -def LDriub_ae_indexed : LDInst<(outs IntRegs:$dst), - (ins IntRegs:$src1, s11_0Imm:$offset), - "$dst=memub($src1+#$offset)", - [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1, - s11_0ImmPred:$offset)))]>; +def : Pat < (i32 (zextloadi1 (add IntRegs:$src1, s11_0ImmPred:$offset))), + (i32 (LDriub_indexed IntRegs:$src1, s11_0ImmPred:$offset))>; let mayLoad = 1, neverHasSideEffects = 1 in def LDriub_GP : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global, u16Imm:$offset), - "$dst=memub(#$global+$offset)", - []>; + "$dst = memub(#$global+$offset)", + []>, + Requires<[NoV4T]>; let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in def POST_LDriub : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), @@ -1252,63 +1290,63 @@ def POST_LDriub : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), "$src1 = $dst2">; // Load unsigned byte conditionally. -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriub_cPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1) $dst = memub($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriub_cNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1) $dst = memub($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriub_indexed_cPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if ($src1) $dst = memub($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriub_indexed_cNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if (!$src1) $dst = memub($src2+#$src3)", []>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in def POST_LDriub_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3), "if ($src1) $dst1 = memub($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in def POST_LDriub_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3), "if (!$src1) $dst1 = memub($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriub_cdnPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1.new) $dst = memub($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriub_cdnNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1.new) $dst = memub($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriub_indexed_cdnPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if ($src1.new) $dst = memub($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriub_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if (!$src1.new) $dst = memub($src2+#$src3)", @@ -1319,35 +1357,23 @@ let isPredicable = 1 in def LDriuh : LDInst<(outs IntRegs:$dst), (ins MEMri:$addr), "$dst = memuh($addr)", - [(set IntRegs:$dst, (zextloadi16 ADDRriS11_1:$addr))]>; + [(set (i32 IntRegs:$dst), (i32 (zextloadi16 ADDRriS11_1:$addr)))]>; // Indexed load unsigned halfword. let isPredicable = 1, AddedComplexity = 20 in def LDriuh_indexed : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s11_1Imm:$offset), - "$dst=memuh($src1+#$offset)", - [(set IntRegs:$dst, (zextloadi16 (add IntRegs:$src1, - s11_1ImmPred:$offset)))]>; - -def LDriuh_ae : LDInst<(outs IntRegs:$dst), - (ins MEMri:$addr), - "$dst = memuh($addr)", - [(set IntRegs:$dst, (extloadi16 ADDRriS11_1:$addr))]>; - - -// Indexed load unsigned halfword any-extend. -let AddedComplexity = 20 in -def LDriuh_ae_indexed : LDInst<(outs IntRegs:$dst), - (ins IntRegs:$src1, s11_1Imm:$offset), - "$dst=memuh($src1+#$offset)", - [(set IntRegs:$dst, (extloadi16 (add IntRegs:$src1, - s11_1ImmPred:$offset)))] >; + "$dst = memuh($src1+#$offset)", + [(set (i32 IntRegs:$dst), + (i32 (zextloadi16 (add (i32 IntRegs:$src1), + s11_1ImmPred:$offset))))]>; let mayLoad = 1, neverHasSideEffects = 1 in def LDriuh_GP : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global, u16Imm:$offset), - "$dst=memuh(#$global+$offset)", - []>; + "$dst = memuh(#$global+$offset)", + []>, + Requires<[NoV4T]>; let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in def POST_LDriuh : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), @@ -1357,63 +1383,63 @@ def POST_LDriuh : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), "$src1 = $dst2">; // Load unsigned halfword conditionally. -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriuh_cPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1) $dst = memuh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriuh_cNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1) $dst = memuh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriuh_indexed_cPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if ($src1) $dst = memuh($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriuh_indexed_cNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if (!$src1) $dst = memuh($src2+#$src3)", []>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in def POST_LDriuh_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3), "if ($src1) $dst1 = memuh($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in def POST_LDriuh_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3), "if (!$src1) $dst1 = memuh($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriuh_cdnPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1.new) $dst = memuh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriuh_cdnNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1.new) $dst = memuh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriuh_indexed_cdnPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if ($src1.new) $dst = memuh($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriuh_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if (!$src1.new) $dst = memuh($src2+#$src3)", @@ -1424,10 +1450,10 @@ def LDriuh_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst), let isPredicable = 1 in def LDriw : LDInst<(outs IntRegs:$dst), (ins MEMri:$addr), "$dst = memw($addr)", - [(set IntRegs:$dst, (load ADDRriS11_2:$addr))]>; + [(set IntRegs:$dst, (i32 (load ADDRriS11_2:$addr)))]>; // Load predicate. -let mayLoad = 1, Defs = [R10,R11] in +let mayLoad = 1, Defs = [R10,R11,D5], neverHasSideEffects = 1 in def LDriw_pred : LDInst<(outs PredRegs:$dst), (ins MEMri:$addr), "Error; should not emit", @@ -1437,21 +1463,23 @@ def LDriw_pred : LDInst<(outs PredRegs:$dst), let isPredicable = 1, AddedComplexity = 20 in def LDriw_indexed : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s11_2Imm:$offset), - "$dst=memw($src1+#$offset)", - [(set IntRegs:$dst, (load (add IntRegs:$src1, - s11_2ImmPred:$offset)))]>; + "$dst = memw($src1+#$offset)", + [(set IntRegs:$dst, (i32 (load (add IntRegs:$src1, + s11_2ImmPred:$offset))))]>; let mayLoad = 1, neverHasSideEffects = 1 in def LDriw_GP : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global, u16Imm:$offset), - "$dst=memw(#$global+$offset)", - []>; + "$dst = memw(#$global+$offset)", + []>, + Requires<[NoV4T]>; let mayLoad = 1, neverHasSideEffects = 1 in def LDw_GP : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst=memw(#$global)", - []>; + "$dst = memw(#$global)", + []>, + Requires<[NoV4T]>; let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in def POST_LDriw : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), @@ -1462,66 +1490,66 @@ def POST_LDriw : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), // Load word conditionally. -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriw_cPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1) $dst = memw($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriw_cNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1) $dst = memw($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriw_indexed_cPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3), - "if ($src1) $dst=memw($src2+#$src3)", + "if ($src1) $dst = memw($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriw_indexed_cNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3), - "if (!$src1) $dst=memw($src2+#$src3)", + "if (!$src1) $dst = memw($src2+#$src3)", []>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in def POST_LDriw_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3), "if ($src1) $dst1 = memw($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in def POST_LDriw_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3), "if (!$src1) $dst1 = memw($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriw_cdnPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1.new) $dst = memw($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriw_cdnNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1.new) $dst = memw($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriw_indexed_cdnPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3), - "if ($src1.new) $dst=memw($src2+#$src3)", + "if ($src1.new) $dst = memw($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in def LDriw_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3), - "if (!$src1.new) $dst=memw($src2+#$src3)", + "if (!$src1.new) $dst = memw($src2+#$src3)", []>; // Deallocate stack frame. @@ -1557,13 +1585,14 @@ let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in { // Rd=+mpyi(Rs,#u8) def MPYI_riu : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u8Imm:$src2), "$dst =+ mpyi($src1, #$src2)", - [(set IntRegs:$dst, (mul IntRegs:$src1, u8ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1), + u8ImmPred:$src2))]>; // Rd=-mpyi(Rs,#u8) def MPYI_rin : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, n8Imm:$src2), "$dst =- mpyi($src1, #$src2)", - [(set IntRegs:$dst, - (mul IntRegs:$src1, n8ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1), + n8ImmPred:$src2))]>; // Rd=mpyi(Rs,#m9) // s9 is NOT the same as m9 - but it works.. so far. @@ -1571,35 +1600,40 @@ def MPYI_rin : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, n8Imm:$src2), // depending on the value of m9. See Arch Spec. def MPYI_ri : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Imm:$src2), "$dst = mpyi($src1, #$src2)", - [(set IntRegs:$dst, (mul IntRegs:$src1, s9ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1), + s9ImmPred:$src2))]>; // Rd=mpyi(Rs,Rt) def MPYI : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = mpyi($src1, $src2)", - [(set IntRegs:$dst, (mul IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; // Rx+=mpyi(Rs,#u8) def MPYI_acc_ri : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u8Imm:$src3), "$dst += mpyi($src2, #$src3)", - [(set IntRegs:$dst, - (add (mul IntRegs:$src2, u8ImmPred:$src3), IntRegs:$src1))], + [(set (i32 IntRegs:$dst), + (add (mul (i32 IntRegs:$src2), u8ImmPred:$src3), + (i32 IntRegs:$src1)))], "$src1 = $dst">; // Rx+=mpyi(Rs,Rt) def MPYI_acc_rr : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst += mpyi($src2, $src3)", - [(set IntRegs:$dst, - (add (mul IntRegs:$src2, IntRegs:$src3), IntRegs:$src1))], + [(set (i32 IntRegs:$dst), + (add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)), + (i32 IntRegs:$src1)))], "$src1 = $dst">; // Rx-=mpyi(Rs,#u8) def MPYI_sub_ri : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u8Imm:$src3), "$dst -= mpyi($src2, #$src3)", - [(set IntRegs:$dst, - (sub IntRegs:$src1, (mul IntRegs:$src2, u8ImmPred:$src3)))], + [(set (i32 IntRegs:$dst), + (sub (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2), + u8ImmPred:$src3)))], "$src1 = $dst">; // Multiply and use upper result. @@ -1608,27 +1642,30 @@ def MPYI_sub_ri : MInst_acc<(outs IntRegs:$dst), // Rd=mpy(Rs,Rt) def MPY : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = mpy($src1, $src2)", - [(set IntRegs:$dst, (mulhs IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (mulhs (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; // Rd=mpy(Rs,Rt):rnd // Rd=mpyu(Rs,Rt) def MPYU : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = mpyu($src1, $src2)", - [(set IntRegs:$dst, (mulhu IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (mulhu (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; // Multiply and use full result. // Rdd=mpyu(Rs,Rt) def MPYU64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = mpyu($src1, $src2)", - [(set DoubleRegs:$dst, (mul (i64 (anyext IntRegs:$src1)), - (i64 (anyext IntRegs:$src2))))]>; + [(set (i64 DoubleRegs:$dst), + (mul (i64 (anyext (i32 IntRegs:$src1))), + (i64 (anyext (i32 IntRegs:$src2)))))]>; // Rdd=mpy(Rs,Rt) def MPY64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = mpy($src1, $src2)", - [(set DoubleRegs:$dst, (mul (i64 (sext IntRegs:$src1)), - (i64 (sext IntRegs:$src2))))]>; - + [(set (i64 DoubleRegs:$dst), + (mul (i64 (sext (i32 IntRegs:$src1))), + (i64 (sext (i32 IntRegs:$src2)))))]>; // Multiply and accumulate, use full result. // Rxx[+-]=mpy(Rs,Rt) @@ -1636,18 +1673,20 @@ def MPY64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), def MPY64_acc : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst += mpy($src2, $src3)", - [(set DoubleRegs:$dst, - (add (mul (i64 (sext IntRegs:$src2)), (i64 (sext IntRegs:$src3))), - DoubleRegs:$src1))], + [(set (i64 DoubleRegs:$dst), + (add (mul (i64 (sext (i32 IntRegs:$src2))), + (i64 (sext (i32 IntRegs:$src3)))), + (i64 DoubleRegs:$src1)))], "$src1 = $dst">; // Rxx-=mpy(Rs,Rt) def MPY64_sub : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst -= mpy($src2, $src3)", - [(set DoubleRegs:$dst, - (sub DoubleRegs:$src1, - (mul (i64 (sext IntRegs:$src2)), (i64 (sext IntRegs:$src3)))))], + [(set (i64 DoubleRegs:$dst), + (sub (i64 DoubleRegs:$src1), + (mul (i64 (sext (i32 IntRegs:$src2))), + (i64 (sext (i32 IntRegs:$src3))))))], "$src1 = $dst">; // Rxx[+-]=mpyu(Rs,Rt) @@ -1655,47 +1694,52 @@ def MPY64_sub : MInst_acc<(outs DoubleRegs:$dst), def MPYU64_acc : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst += mpyu($src2, $src3)", - [(set DoubleRegs:$dst, (add (mul (i64 (anyext IntRegs:$src2)), - (i64 (anyext IntRegs:$src3))), - DoubleRegs:$src1))],"$src1 = $dst">; + [(set (i64 DoubleRegs:$dst), + (add (mul (i64 (anyext (i32 IntRegs:$src2))), + (i64 (anyext (i32 IntRegs:$src3)))), + (i64 DoubleRegs:$src1)))], "$src1 = $dst">; // Rxx-=mpyu(Rs,Rt) def MPYU64_sub : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst += mpyu($src2, $src3)", - [(set DoubleRegs:$dst, - (sub DoubleRegs:$src1, - (mul (i64 (anyext IntRegs:$src2)), - (i64 (anyext IntRegs:$src3)))))], + [(set (i64 DoubleRegs:$dst), + (sub (i64 DoubleRegs:$src1), + (mul (i64 (anyext (i32 IntRegs:$src2))), + (i64 (anyext (i32 IntRegs:$src3))))))], "$src1 = $dst">; def ADDrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst += add($src2, $src3)", - [(set IntRegs:$dst, (add (add IntRegs:$src2, IntRegs:$src3), - IntRegs:$src1))], + [(set (i32 IntRegs:$dst), (add (add (i32 IntRegs:$src2), + (i32 IntRegs:$src3)), + (i32 IntRegs:$src1)))], "$src1 = $dst">; def ADDri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1, IntRegs:$src2, s8Imm:$src3), "$dst += add($src2, #$src3)", - [(set IntRegs:$dst, (add (add IntRegs:$src2, s8ImmPred:$src3), - IntRegs:$src1))], + [(set (i32 IntRegs:$dst), (add (add (i32 IntRegs:$src2), + s8ImmPred:$src3), + (i32 IntRegs:$src1)))], "$src1 = $dst">; def SUBrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst -= add($src2, $src3)", - [(set IntRegs:$dst, (sub IntRegs:$src1, (add IntRegs:$src2, - IntRegs:$src3)))], + [(set (i32 IntRegs:$dst), + (sub (i32 IntRegs:$src1), (add (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">; def SUBri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1, IntRegs:$src2, s8Imm:$src3), "$dst -= add($src2, #$src3)", - [(set IntRegs:$dst, (sub IntRegs:$src1, - (add IntRegs:$src2, s8ImmPred:$src3)))], + [(set (i32 IntRegs:$dst), (sub (i32 IntRegs:$src1), + (add (i32 IntRegs:$src2), + s8ImmPred:$src3)))], "$src1 = $dst">; //===----------------------------------------------------------------------===// @@ -1738,48 +1782,60 @@ let isPredicable = 1 in def STrid : STInst<(outs), (ins MEMri:$addr, DoubleRegs:$src1), "memd($addr) = $src1", - [(store DoubleRegs:$src1, ADDRriS11_3:$addr)]>; + [(store (i64 DoubleRegs:$src1), ADDRriS11_3:$addr)]>; // Indexed store double word. let AddedComplexity = 10, isPredicable = 1 in def STrid_indexed : STInst<(outs), (ins IntRegs:$src1, s11_3Imm:$src2, DoubleRegs:$src3), "memd($src1+#$src2) = $src3", - [(store DoubleRegs:$src3, - (add IntRegs:$src1, s11_3ImmPred:$src2))]>; + [(store (i64 DoubleRegs:$src3), + (add (i32 IntRegs:$src1), s11_3ImmPred:$src2))]>; let mayStore = 1, neverHasSideEffects = 1 in def STrid_GP : STInst<(outs), (ins globaladdress:$global, u16Imm:$offset, DoubleRegs:$src), "memd(#$global+$offset) = $src", - []>; + []>, + Requires<[NoV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STd_GP : STInst<(outs), + (ins globaladdress:$global, DoubleRegs:$src), + "memd(#$global) = $src", + []>, + Requires<[NoV4T]>; let hasCtrlDep = 1, isPredicable = 1 in def POST_STdri : STInstPI<(outs IntRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2, s4Imm:$offset), "memd($src2++#$offset) = $src1", [(set IntRegs:$dst, - (post_store DoubleRegs:$src1, IntRegs:$src2, s4_3ImmPred:$offset))], + (post_store (i64 DoubleRegs:$src1), (i32 IntRegs:$src2), + s4_3ImmPred:$offset))], "$src2 = $dst">; // Store doubleword conditionally. // if ([!]Pv) memd(Rs+#u6:3)=Rtt // if (Pv) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrid_cPt : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2), "if ($src1) memd($addr) = $src2", []>; // if (!Pv) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrid_cNotPt : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2), "if (!$src1) memd($addr) = $src2", []>; // if (Pv) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrid_indexed_cPt : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3, DoubleRegs:$src4), @@ -1787,7 +1843,8 @@ def STrid_indexed_cPt : STInst<(outs), []>; // if (!Pv) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrid_indexed_cNotPt : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3, DoubleRegs:$src4), @@ -1796,7 +1853,8 @@ def STrid_indexed_cNotPt : STInst<(outs), // if ([!]Pv) memd(Rx++#s4:3)=Rtt // if (Pv) memd(Rx++#s4:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def POST_STdri_cPt : STInstPI<(outs IntRegs:$dst), (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3, s4_3Imm:$offset), @@ -1821,27 +1879,29 @@ let isPredicable = 1 in def STrib : STInst<(outs), (ins MEMri:$addr, IntRegs:$src1), "memb($addr) = $src1", - [(truncstorei8 IntRegs:$src1, ADDRriS11_0:$addr)]>; + [(truncstorei8 (i32 IntRegs:$src1), ADDRriS11_0:$addr)]>; let AddedComplexity = 10, isPredicable = 1 in def STrib_indexed : STInst<(outs), (ins IntRegs:$src1, s11_0Imm:$src2, IntRegs:$src3), "memb($src1+#$src2) = $src3", - [(truncstorei8 IntRegs:$src3, (add IntRegs:$src1, - s11_0ImmPred:$src2))]>; + [(truncstorei8 (i32 IntRegs:$src3), (add (i32 IntRegs:$src1), + s11_0ImmPred:$src2))]>; // memb(gp+#u16:0)=Rt let mayStore = 1, neverHasSideEffects = 1 in def STrib_GP : STInst<(outs), (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), "memb(#$global+$offset) = $src", - []>; + []>, + Requires<[NoV4T]>; let mayStore = 1, neverHasSideEffects = 1 in -def STb_GP : STInst<(outs), +def STb_GP : STInst<(outs), (ins globaladdress:$global, IntRegs:$src), "memb(#$global) = $src", - []>; + []>, + Requires<[NoV4T]>; // memb(Rx++#s4:0)=Rt let hasCtrlDep = 1, isPredicable = 1 in @@ -1850,35 +1910,35 @@ def POST_STbri : STInstPI<(outs IntRegs:$dst), (ins IntRegs:$src1, s4Imm:$offset), "memb($src2++#$offset) = $src1", [(set IntRegs:$dst, - (post_truncsti8 IntRegs:$src1, IntRegs:$src2, + (post_truncsti8 (i32 IntRegs:$src1), (i32 IntRegs:$src2), s4_0ImmPred:$offset))], "$src2 = $dst">; // Store byte conditionally. // if ([!]Pv) memb(Rs+#u6:0)=Rt // if (Pv) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in def STrib_cPt : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1) memb($addr) = $src2", []>; // if (!Pv) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in def STrib_cNotPt : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1) memb($addr) = $src2", []>; // if (Pv) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in def STrib_indexed_cPt : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if ($src1) memb($src2+#$src3) = $src4", []>; // if (!Pv) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in def STrib_indexed_cNotPt : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if (!$src1) memb($src2+#$src3) = $src4", @@ -1906,27 +1966,29 @@ let isPredicable = 1 in def STrih : STInst<(outs), (ins MEMri:$addr, IntRegs:$src1), "memh($addr) = $src1", - [(truncstorei16 IntRegs:$src1, ADDRriS11_1:$addr)]>; + [(truncstorei16 (i32 IntRegs:$src1), ADDRriS11_1:$addr)]>; let AddedComplexity = 10, isPredicable = 1 in def STrih_indexed : STInst<(outs), (ins IntRegs:$src1, s11_1Imm:$src2, IntRegs:$src3), "memh($src1+#$src2) = $src3", - [(truncstorei16 IntRegs:$src3, (add IntRegs:$src1, - s11_1ImmPred:$src2))]>; + [(truncstorei16 (i32 IntRegs:$src3), (add (i32 IntRegs:$src1), + s11_1ImmPred:$src2))]>; let mayStore = 1, neverHasSideEffects = 1 in def STrih_GP : STInst<(outs), (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), "memh(#$global+$offset) = $src", - []>; + []>, + Requires<[NoV4T]>; let mayStore = 1, neverHasSideEffects = 1 in def STh_GP : STInst<(outs), (ins globaladdress:$global, IntRegs:$src), "memh(#$global) = $src", - []>; + []>, + Requires<[NoV4T]>; // memh(Rx++#s4:1)=Rt.H // memh(Rx++#s4:1)=Rt @@ -1935,35 +1997,35 @@ def POST_SThri : STInstPI<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, s4Imm:$offset), "memh($src2++#$offset) = $src1", [(set IntRegs:$dst, - (post_truncsti16 IntRegs:$src1, IntRegs:$src2, + (post_truncsti16 (i32 IntRegs:$src1), (i32 IntRegs:$src2), s4_1ImmPred:$offset))], "$src2 = $dst">; // Store halfword conditionally. // if ([!]Pv) memh(Rs+#u6:1)=Rt // if (Pv) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in def STrih_cPt : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1) memh($addr) = $src2", []>; // if (!Pv) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in def STrih_cNotPt : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1) memh($addr) = $src2", []>; // if (Pv) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in def STrih_indexed_cPt : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if ($src1) memh($src2+#$src3) = $src4", []>; // if (!Pv) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in def STrih_indexed_cNotPt : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if (!$src1) memh($src2+#$src3) = $src4", @@ -1987,7 +2049,7 @@ def POST_SThri_cNotPt : STInstPI<(outs IntRegs:$dst), // Store word. // Store predicate. -let Defs = [R10,R11] in +let mayStore = 1, Defs = [R10,R11,D5], neverHasSideEffects = 1 in def STriw_pred : STInst<(outs), (ins MEMri:$addr, PredRegs:$src1), "Error; should not emit", @@ -1998,53 +2060,63 @@ let isPredicable = 1 in def STriw : STInst<(outs), (ins MEMri:$addr, IntRegs:$src1), "memw($addr) = $src1", - [(store IntRegs:$src1, ADDRriS11_2:$addr)]>; + [(store (i32 IntRegs:$src1), ADDRriS11_2:$addr)]>; let AddedComplexity = 10, isPredicable = 1 in def STriw_indexed : STInst<(outs), (ins IntRegs:$src1, s11_2Imm:$src2, IntRegs:$src3), "memw($src1+#$src2) = $src3", - [(store IntRegs:$src3, (add IntRegs:$src1, s11_2ImmPred:$src2))]>; + [(store (i32 IntRegs:$src3), + (add (i32 IntRegs:$src1), s11_2ImmPred:$src2))]>; let mayStore = 1, neverHasSideEffects = 1 in def STriw_GP : STInst<(outs), (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), "memw(#$global+$offset) = $src", - []>; + []>, + Requires<[NoV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STw_GP : STInst<(outs), + (ins globaladdress:$global, IntRegs:$src), + "memw(#$global) = $src", + []>, + Requires<[NoV4T]>; let hasCtrlDep = 1, isPredicable = 1 in def POST_STwri : STInstPI<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, s4Imm:$offset), "memw($src2++#$offset) = $src1", [(set IntRegs:$dst, - (post_store IntRegs:$src1, IntRegs:$src2, s4_2ImmPred:$offset))], + (post_store (i32 IntRegs:$src1), (i32 IntRegs:$src2), + s4_2ImmPred:$offset))], "$src2 = $dst">; // Store word conditionally. // if ([!]Pv) memw(Rs+#u6:2)=Rt // if (Pv) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in def STriw_cPt : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1) memw($addr) = $src2", []>; // if (!Pv) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in def STriw_cNotPt : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1) memw($addr) = $src2", []>; // if (Pv) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in def STriw_indexed_cPt : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if ($src1) memw($src2+#$src3) = $src4", []>; // if (!Pv) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in def STriw_indexed_cNotPt : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if (!$src1) memw($src2+#$src3) = $src4", @@ -2084,13 +2156,13 @@ let Defs = [R29, R30], Uses = [R31, R30], neverHasSideEffects = 1 in { // Logical NOT. def NOT_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), "$dst = not($src1)", - [(set DoubleRegs:$dst, (not DoubleRegs:$src1))]>; + [(set (i64 DoubleRegs:$dst), (not (i64 DoubleRegs:$src1)))]>; // Sign extend word to doubleword. def SXTW : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1), "$dst = sxtw($src1)", - [(set DoubleRegs:$dst, (sext IntRegs:$src1))]>; + [(set (i64 DoubleRegs:$dst), (sext (i32 IntRegs:$src1)))]>; //===----------------------------------------------------------------------===// // STYPE/ALU - //===----------------------------------------------------------------------===// @@ -2098,6 +2170,70 @@ def SXTW : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1), //===----------------------------------------------------------------------===// // STYPE/BIT + //===----------------------------------------------------------------------===// +// clrbit. +def CLRBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + "$dst = clrbit($src1, #$src2)", + [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1), + (not + (shl 1, u5ImmPred:$src2))))]>; + +def CLRBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + "$dst = clrbit($src1, #$src2)", + []>; + +// Map from r0 = and(r1, 2147483647) to r0 = clrbit(r1, #31). +def : Pat <(and (i32 IntRegs:$src1), 2147483647), + (CLRBIT_31 (i32 IntRegs:$src1), 31)>; + +// setbit. +def SETBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + "$dst = setbit($src1, #$src2)", + [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1), + (shl 1, u5ImmPred:$src2)))]>; + +// Map from r0 = or(r1, -2147483648) to r0 = setbit(r1, #31). +def SETBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + "$dst = setbit($src1, #$src2)", + []>; + +def : Pat <(or (i32 IntRegs:$src1), -2147483648), + (SETBIT_31 (i32 IntRegs:$src1), 31)>; + +// togglebit. +def TOGBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + "$dst = setbit($src1, #$src2)", + [(set (i32 IntRegs:$dst), (xor (i32 IntRegs:$src1), + (shl 1, u5ImmPred:$src2)))]>; + +// Map from r0 = xor(r1, -2147483648) to r0 = togglebit(r1, #31). +def TOGBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + "$dst = togglebit($src1, #$src2)", + []>; + +def : Pat <(xor (i32 IntRegs:$src1), -2147483648), + (TOGBIT_31 (i32 IntRegs:$src1), 31)>; + +//===----------------------------------------------------------------------===// +// STYPE/BIT - +//===----------------------------------------------------------------------===// + + +//===----------------------------------------------------------------------===// +// STYPE/COMPLEX + +//===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// STYPE/COMPLEX - +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// STYPE/PERM + +//===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// STYPE/PERM - +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// STYPE/PRED + //===----------------------------------------------------------------------===// // STYPE/BIT - //===----------------------------------------------------------------------===// @@ -2123,12 +2259,12 @@ def SXTW : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1), // Predicate transfer. let neverHasSideEffects = 1 in def TFR_RsPd : SInst<(outs IntRegs:$dst), (ins PredRegs:$src1), - "$dst = $src1 // Should almost never emit this", + "$dst = $src1 /* Should almost never emit this. */", []>; def TFR_PdRs : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1), - "$dst = $src1 // Should almost never emit!", - [(set PredRegs:$dst, (trunc IntRegs:$src1))]>; + "$dst = $src1 /* Should almost never emit this. */", + [(set (i1 PredRegs:$dst), (trunc (i32 IntRegs:$src1)))]>; //===----------------------------------------------------------------------===// // STYPE/PRED - //===----------------------------------------------------------------------===// @@ -2139,23 +2275,33 @@ def TFR_PdRs : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1), // Shift by immediate. def ASR_ri : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), "$dst = asr($src1, #$src2)", - [(set IntRegs:$dst, (sra IntRegs:$src1, u5ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (sra (i32 IntRegs:$src1), + u5ImmPred:$src2))]>; def ASRd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2), "$dst = asr($src1, #$src2)", - [(set DoubleRegs:$dst, (sra DoubleRegs:$src1, u6ImmPred:$src2))]>; + [(set (i64 DoubleRegs:$dst), (sra (i64 DoubleRegs:$src1), + u6ImmPred:$src2))]>; def ASL : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), "$dst = asl($src1, #$src2)", - [(set IntRegs:$dst, (shl IntRegs:$src1, u5ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1), + u5ImmPred:$src2))]>; + +def ASLd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2), + "$dst = asl($src1, #$src2)", + [(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1), + u6ImmPred:$src2))]>; def LSR_ri : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), "$dst = lsr($src1, #$src2)", - [(set IntRegs:$dst, (srl IntRegs:$src1, u5ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (srl (i32 IntRegs:$src1), + u5ImmPred:$src2))]>; def LSRd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2), "$dst = lsr($src1, #$src2)", - [(set DoubleRegs:$dst, (srl DoubleRegs:$src1, u6ImmPred:$src2))]>; + [(set (i64 DoubleRegs:$dst), (srl (i64 DoubleRegs:$src1), + u6ImmPred:$src2))]>; def LSRd_ri_acc : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, @@ -2174,40 +2320,56 @@ def ASR_rr_acc : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, [], "$src1 = $dst">; // Shift by immediate and add. +let AddedComplexity = 100 in def ADDASL : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u3Imm:$src3), "$dst = addasl($src1, $src2, #$src3)", - [(set IntRegs:$dst, (add IntRegs:$src1, - (shl IntRegs:$src2, - u3ImmPred:$src3)))]>; + [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u3ImmPred:$src3)))]>; // Shift by register. def ASL_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = asl($src1, $src2)", - [(set IntRegs:$dst, (shl IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; def ASR_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = asr($src1, $src2)", - [(set IntRegs:$dst, (sra IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (sra (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; +def LSL_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), + "$dst = lsl($src1, $src2)", + [(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; def LSR_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = lsr($src1, $src2)", - [(set IntRegs:$dst, (srl IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (srl (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; + +def ASLd : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2), + "$dst = asl($src1, $src2)", + [(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1), + (i32 IntRegs:$src2)))]>; def LSLd : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2), "$dst = lsl($src1, $src2)", - [(set DoubleRegs:$dst, (shl DoubleRegs:$src1, IntRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1), + (i32 IntRegs:$src2)))]>; def ASRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2), "$dst = asr($src1, $src2)", - [(set DoubleRegs:$dst, (sra DoubleRegs:$src1, IntRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (sra (i64 DoubleRegs:$src1), + (i32 IntRegs:$src2)))]>; def LSRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2), "$dst = lsr($src1, $src2)", - [(set DoubleRegs:$dst, (srl DoubleRegs:$src1, IntRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (srl (i64 DoubleRegs:$src1), + (i32 IntRegs:$src2)))]>; //===----------------------------------------------------------------------===// // STYPE/SHIFT - @@ -2238,8 +2400,8 @@ def SDHexagonBARRIER: SDTypeProfile<0, 0, []>; def HexagonBARRIER: SDNode<"HexagonISD::BARRIER", SDHexagonBARRIER, [SDNPHasChain]>; -let hasSideEffects = 1 in -def BARRIER : STInst<(outs), (ins), +let hasSideEffects = 1, isHexagonSolo = 1 in +def BARRIER : SYSInst<(outs), (ins), "barrier", [(HexagonBARRIER)]>; @@ -2251,47 +2413,50 @@ def BARRIER : STInst<(outs), (ins), let isReMaterializable = 1 in def TFRI64 : ALU64_rr<(outs DoubleRegs:$dst), (ins s8Imm64:$src1), "$dst = #$src1", - [(set DoubleRegs:$dst, s8Imm64Pred:$src1)]>; + [(set (i64 DoubleRegs:$dst), s8Imm64Pred:$src1)]>; // Pseudo instruction to encode a set of conditional transfers. // This instruction is used instead of a mux and trades-off codesize // for performance. We conduct this transformation optimistically in // the hope that these instructions get promoted to dot-new transfers. -let AddedComplexity = 100 in +let AddedComplexity = 100, isPredicated = 1 in def TFR_condset_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "Error; should not emit", - [(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2, - IntRegs:$src3))]>; - -let AddedComplexity = 100 in + [(set (i32 IntRegs:$dst), + (i32 (select (i1 PredRegs:$src1), + (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))]>; +let AddedComplexity = 100, isPredicated = 1 in def TFR_condset_ri : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, s12Imm:$src3), "Error; should not emit", - [(set IntRegs:$dst, - (select PredRegs:$src1, IntRegs:$src2, s12ImmPred:$src3))]>; + [(set (i32 IntRegs:$dst), + (i32 (select (i1 PredRegs:$src1), (i32 IntRegs:$src2), + s12ImmPred:$src3)))]>; -let AddedComplexity = 100 in +let AddedComplexity = 100, isPredicated = 1 in def TFR_condset_ir : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, s12Imm:$src2, IntRegs:$src3), "Error; should not emit", - [(set IntRegs:$dst, - (select PredRegs:$src1, s12ImmPred:$src2, IntRegs:$src3))]>; + [(set (i32 IntRegs:$dst), + (i32 (select (i1 PredRegs:$src1), s12ImmPred:$src2, + (i32 IntRegs:$src3))))]>; -let AddedComplexity = 100 in +let AddedComplexity = 100, isPredicated = 1 in def TFR_condset_ii : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, s12Imm:$src2, s12Imm:$src3), "Error; should not emit", - [(set IntRegs:$dst, (select PredRegs:$src1, - s12ImmPred:$src2, - s12ImmPred:$src3))]>; + [(set (i32 IntRegs:$dst), + (i32 (select (i1 PredRegs:$src1), s12ImmPred:$src2, + s12ImmPred:$src3)))]>; // Generate frameindex addresses. let isReMaterializable = 1 in def TFR_FI : ALU32_ri<(outs IntRegs:$dst), (ins FrameIndex:$src1), "$dst = add($src1)", - [(set IntRegs:$dst, ADDRri:$src1)]>; + [(set (i32 IntRegs:$dst), ADDRri:$src1)]>; // // CR - Type. @@ -2309,70 +2474,116 @@ def LOOP0_r : CRInst<(outs), (ins brtarget:$offset, IntRegs:$src2), } let isBranch = 1, isTerminator = 1, neverHasSideEffects = 1, - Defs = [PC, LC0], Uses = [SA0, LC0] in { -def ENDLOOP0 : CRInst<(outs), (ins brtarget:$offset), + Defs = [PC, LC0], Uses = [SA0, LC0] in { +def ENDLOOP0 : Marker<(outs), (ins brtarget:$offset), ":endloop0", []>; } // Support for generating global address. // Taken from X86InstrInfo.td. -def SDTHexagonCONST32 : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, - SDTCisPtrTy<0>]>; +def SDTHexagonCONST32 : SDTypeProfile<1, 1, [ + SDTCisVT<0, i32>, + SDTCisVT<1, i32>, + SDTCisPtrTy<0>]>; def HexagonCONST32 : SDNode<"HexagonISD::CONST32", SDTHexagonCONST32>; def HexagonCONST32_GP : SDNode<"HexagonISD::CONST32_GP", SDTHexagonCONST32>; +// HI/LO Instructions +let isReMaterializable = 1, mayLoad = 1, neverHasSideEffects = 1 in +def LO : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), + "$dst.l = #LO($global)", + []>; + +let isReMaterializable = 1, mayLoad = 1, neverHasSideEffects = 1 in +def HI : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), + "$dst.h = #HI($global)", + []>; + +let isReMaterializable = 1, mayLoad = 1, neverHasSideEffects = 1 in +def LOi : LDInst<(outs IntRegs:$dst), (ins i32imm:$imm_value), + "$dst.l = #LO($imm_value)", + []>; + + +let isReMaterializable = 1, mayLoad = 1, neverHasSideEffects = 1 in +def HIi : LDInst<(outs IntRegs:$dst), (ins i32imm:$imm_value), + "$dst.h = #HI($imm_value)", + []>; + +let isReMaterializable = 1, mayLoad = 1, neverHasSideEffects = 1 in +def LO_jt : LDInst<(outs IntRegs:$dst), (ins jumptablebase:$jt), + "$dst.l = #LO($jt)", + []>; + +let isReMaterializable = 1, mayLoad = 1, neverHasSideEffects = 1 in +def HI_jt : LDInst<(outs IntRegs:$dst), (ins jumptablebase:$jt), + "$dst.h = #HI($jt)", + []>; + + +let isReMaterializable = 1, mayLoad = 1, neverHasSideEffects = 1 in +def LO_label : LDInst<(outs IntRegs:$dst), (ins bblabel:$label), + "$dst.l = #LO($label)", + []>; + +let isReMaterializable = 1, mayLoad = 1 , neverHasSideEffects = 1 in +def HI_label : LDInst<(outs IntRegs:$dst), (ins bblabel:$label), + "$dst.h = #HI($label)", + []>; // This pattern is incorrect. When we add small data, we should change // this pattern to use memw(#foo). +// This is for sdata. let isMoveImm = 1 in def CONST32 : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), "$dst = CONST32(#$global)", - [(set IntRegs:$dst, - (load (HexagonCONST32 tglobaltlsaddr:$global)))]>; + [(set (i32 IntRegs:$dst), + (load (HexagonCONST32 tglobaltlsaddr:$global)))]>; +// This is for non-sdata. let isReMaterializable = 1, isMoveImm = 1 in def CONST32_set : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), "$dst = CONST32(#$global)", - [(set IntRegs:$dst, - (HexagonCONST32 tglobaladdr:$global))]>; + [(set (i32 IntRegs:$dst), + (HexagonCONST32 tglobaladdr:$global))]>; let isReMaterializable = 1, isMoveImm = 1 in def CONST32_set_jt : LDInst<(outs IntRegs:$dst), (ins jumptablebase:$jt), "$dst = CONST32(#$jt)", - [(set IntRegs:$dst, - (HexagonCONST32 tjumptable:$jt))]>; + [(set (i32 IntRegs:$dst), + (HexagonCONST32 tjumptable:$jt))]>; let isReMaterializable = 1, isMoveImm = 1 in def CONST32GP_set : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), "$dst = CONST32(#$global)", - [(set IntRegs:$dst, - (HexagonCONST32_GP tglobaladdr:$global))]>; + [(set (i32 IntRegs:$dst), + (HexagonCONST32_GP tglobaladdr:$global))]>; let isReMaterializable = 1, isMoveImm = 1 in def CONST32_Int_Real : LDInst<(outs IntRegs:$dst), (ins i32imm:$global), "$dst = CONST32(#$global)", - [(set IntRegs:$dst, imm:$global) ]>; + [(set (i32 IntRegs:$dst), imm:$global) ]>; let isReMaterializable = 1, isMoveImm = 1 in def CONST32_Label : LDInst<(outs IntRegs:$dst), (ins bblabel:$label), "$dst = CONST32($label)", - [(set IntRegs:$dst, (HexagonCONST32 bbl:$label))]>; + [(set (i32 IntRegs:$dst), (HexagonCONST32 bbl:$label))]>; let isReMaterializable = 1, isMoveImm = 1 in def CONST64_Int_Real : LDInst<(outs DoubleRegs:$dst), (ins i64imm:$global), "$dst = CONST64(#$global)", - [(set DoubleRegs:$dst, imm:$global) ]>; + [(set (i64 DoubleRegs:$dst), imm:$global) ]>; def TFR_PdFalse : SInst<(outs PredRegs:$dst), (ins), "$dst = xor($dst, $dst)", - [(set PredRegs:$dst, 0)]>; + [(set (i1 PredRegs:$dst), 0)]>; def MPY_trsext : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), - "$dst = mpy($src1, $src2)", - [(set IntRegs:$dst, - (trunc (i64 (srl (i64 (mul (i64 (sext IntRegs:$src1)), - (i64 (sext IntRegs:$src2)))), - (i32 32)))))]>; + "$dst = mpy($src1, $src2)", + [(set (i32 IntRegs:$dst), + (trunc (i64 (srl (i64 (mul (i64 (sext (i32 IntRegs:$src1))), + (i64 (sext (i32 IntRegs:$src2))))), + (i32 32)))))]>; // Pseudo instructions. def SDT_SPCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>; @@ -2446,8 +2657,8 @@ let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1, "jumpr $dst // TAILCALL", []>; } // Map call instruction. -def : Pat<(call IntRegs:$dst), - (CALLR IntRegs:$dst)>, Requires<[HasV2TOnly]>; +def : Pat<(call (i32 IntRegs:$dst)), + (CALLR (i32 IntRegs:$dst))>, Requires<[HasV2TOnly]>; def : Pat<(call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>, Requires<[HasV2TOnly]>; def : Pat<(call texternalsym:$dst), @@ -2457,309 +2668,515 @@ def : Pat<(HexagonTCRet tglobaladdr:$dst), (TCRETURNtg tglobaladdr:$dst)>; def : Pat<(HexagonTCRet texternalsym:$dst), (TCRETURNtext texternalsym:$dst)>; -def : Pat<(HexagonTCRet IntRegs:$dst), - (TCRETURNR IntRegs:$dst)>; +def : Pat<(HexagonTCRet (i32 IntRegs:$dst)), + (TCRETURNR (i32 IntRegs:$dst))>; + +// Atomic load and store support +// 8 bit atomic load +def : Pat<(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)), + (i32 (LDub_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (i32 (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_8 ADDRriS11_0:$src1), + (i32 (LDriub ADDRriS11_0:$src1))>; + +def : Pat<(atomic_load_8 (add (i32 IntRegs:$src1), s11_0ImmPred:$offset)), + (i32 (LDriub_indexed (i32 IntRegs:$src1), s11_0ImmPred:$offset))>; + + + +// 16 bit atomic load +def : Pat<(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)), + (i32 (LDuh_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (i32 (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_16 ADDRriS11_1:$src1), + (i32 (LDriuh ADDRriS11_1:$src1))>; + +def : Pat<(atomic_load_16 (add (i32 IntRegs:$src1), s11_1ImmPred:$offset)), + (i32 (LDriuh_indexed (i32 IntRegs:$src1), s11_1ImmPred:$offset))>; + + + +// 32 bit atomic load +def : Pat<(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)), + (i32 (LDw_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_32 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (i32 (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_32 ADDRriS11_2:$src1), + (i32 (LDriw ADDRriS11_2:$src1))>; + +def : Pat<(atomic_load_32 (add (i32 IntRegs:$src1), s11_2ImmPred:$offset)), + (i32 (LDriw_indexed (i32 IntRegs:$src1), s11_2ImmPred:$offset))>; + + +// 64 bit atomic load +def : Pat<(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)), + (i64 (LDd_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_64 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (i64 (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_64 ADDRriS11_3:$src1), + (i64 (LDrid ADDRriS11_3:$src1))>; + +def : Pat<(atomic_load_64 (add (i32 IntRegs:$src1), s11_3ImmPred:$offset)), + (i64 (LDrid_indexed (i32 IntRegs:$src1), s11_3ImmPred:$offset))>; + + +// 64 bit atomic store +def : Pat<(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global), + (i64 DoubleRegs:$src1)), + (STd_GP tglobaladdr:$global, (i64 DoubleRegs:$src1))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_store_64 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset), + (i64 DoubleRegs:$src1)), + (STrid_GP tglobaladdr:$global, u16ImmPred:$offset, + (i64 DoubleRegs:$src1))>, Requires<[NoV4T]>; + +// 8 bit atomic store +def : Pat<(atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global), + (i32 IntRegs:$src1)), + (STb_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_store_8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset), + (i32 IntRegs:$src1)), + (STrib_GP tglobaladdr:$global, u16ImmPred:$offset, + (i32 IntRegs:$src1))>, Requires<[NoV4T]>; + +def : Pat<(atomic_store_8 ADDRriS11_0:$src2, (i32 IntRegs:$src1)), + (STrib ADDRriS11_0:$src2, (i32 IntRegs:$src1))>; + +def : Pat<(atomic_store_8 (add (i32 IntRegs:$src2), s11_0ImmPred:$offset), + (i32 IntRegs:$src1)), + (STrib_indexed (i32 IntRegs:$src2), s11_0ImmPred:$offset, + (i32 IntRegs:$src1))>; + -// Map from r0 = and(r1, 65535) to r0 = zxth(r1). -def : Pat <(and IntRegs:$src1, 65535), - (ZXTH IntRegs:$src1)>; +// 16 bit atomic store +def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global), + (i32 IntRegs:$src1)), + (STh_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_store_16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset), + (i32 IntRegs:$src1)), + (STrih_GP tglobaladdr:$global, u16ImmPred:$offset, + (i32 IntRegs:$src1))>, Requires<[NoV4T]>; + +def : Pat<(atomic_store_16 ADDRriS11_1:$src2, (i32 IntRegs:$src1)), + (STrih ADDRriS11_1:$src2, (i32 IntRegs:$src1))>; + +def : Pat<(atomic_store_16 (i32 IntRegs:$src1), + (add (i32 IntRegs:$src2), s11_1ImmPred:$offset)), + (STrih_indexed (i32 IntRegs:$src2), s11_1ImmPred:$offset, + (i32 IntRegs:$src1))>; + + +// 32 bit atomic store +def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global), + (i32 IntRegs:$src1)), + (STw_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_store_32 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset), + (i32 IntRegs:$src1)), + (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_store_32 ADDRriS11_2:$src2, (i32 IntRegs:$src1)), + (STriw ADDRriS11_2:$src2, (i32 IntRegs:$src1))>; + +def : Pat<(atomic_store_32 (add (i32 IntRegs:$src2), s11_2ImmPred:$offset), + (i32 IntRegs:$src1)), + (STriw_indexed (i32 IntRegs:$src2), s11_2ImmPred:$offset, + (i32 IntRegs:$src1))>; + + + + +def : Pat<(atomic_store_64 ADDRriS11_3:$src2, (i64 DoubleRegs:$src1)), + (STrid ADDRriS11_3:$src2, (i64 DoubleRegs:$src1))>; + +def : Pat<(atomic_store_64 (add (i32 IntRegs:$src2), s11_3ImmPred:$offset), + (i64 DoubleRegs:$src1)), + (STrid_indexed (i32 IntRegs:$src2), s11_3ImmPred:$offset, + (i64 DoubleRegs:$src1))>; + +// Map from r0 = and(r1, 65535) to r0 = zxth(r1) +def : Pat <(and (i32 IntRegs:$src1), 65535), + (ZXTH (i32 IntRegs:$src1))>; // Map from r0 = and(r1, 255) to r0 = zxtb(r1). -def : Pat <(and IntRegs:$src1, 255), - (ZXTB IntRegs:$src1)>; +def : Pat <(and (i32 IntRegs:$src1), 255), + (ZXTB (i32 IntRegs:$src1))>; // Map Add(p1, true) to p1 = not(p1). // Add(p1, false) should never be produced, // if it does, it got to be mapped to NOOP. -def : Pat <(add PredRegs:$src1, -1), - (NOT_p PredRegs:$src1)>; +def : Pat <(add (i1 PredRegs:$src1), -1), + (NOT_p (i1 PredRegs:$src1))>; // Map from p0 = setlt(r0, r1) r2 = mux(p0, r3, r4) => // p0 = cmp.lt(r0, r1), r0 = mux(p0, r2, r1). -def : Pat <(select (i1 (setlt IntRegs:$src1, IntRegs:$src2)), IntRegs:$src3, - IntRegs:$src4), - (TFR_condset_rr (CMPLTrr IntRegs:$src1, IntRegs:$src2), IntRegs:$src4, - IntRegs:$src3)>, Requires<[HasV2TOnly]>; +def : Pat <(select (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i32 IntRegs:$src3), + (i32 IntRegs:$src4)), + (i32 (TFR_condset_rr (CMPLTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), + (i32 IntRegs:$src4), (i32 IntRegs:$src3)))>, + Requires<[HasV2TOnly]>; // Map from p0 = pnot(p0); r0 = mux(p0, #i, #j) => r0 = mux(p0, #j, #i). -def : Pat <(select (not PredRegs:$src1), s8ImmPred:$src2, s8ImmPred:$src3), - (TFR_condset_ii PredRegs:$src1, s8ImmPred:$src3, s8ImmPred:$src2)>; +def : Pat <(select (not (i1 PredRegs:$src1)), s8ImmPred:$src2, s8ImmPred:$src3), + (i32 (TFR_condset_ii (i1 PredRegs:$src1), s8ImmPred:$src3, + s8ImmPred:$src2))>; + +// Map from p0 = pnot(p0); r0 = select(p0, #i, r1) +// => r0 = TFR_condset_ri(p0, r1, #i) +def : Pat <(select (not (i1 PredRegs:$src1)), s12ImmPred:$src2, + (i32 IntRegs:$src3)), + (i32 (TFR_condset_ri (i1 PredRegs:$src1), (i32 IntRegs:$src3), + s12ImmPred:$src2))>; + +// Map from p0 = pnot(p0); r0 = mux(p0, r1, #i) +// => r0 = TFR_condset_ir(p0, #i, r1) +def : Pat <(select (not PredRegs:$src1), IntRegs:$src2, s12ImmPred:$src3), + (i32 (TFR_condset_ir (i1 PredRegs:$src1), s12ImmPred:$src3, + (i32 IntRegs:$src2)))>; // Map from p0 = pnot(p0); if (p0) jump => if (!p0) jump. def : Pat <(brcond (not PredRegs:$src1), bb:$offset), - (JMP_cNot PredRegs:$src1, bb:$offset)>; + (JMP_cNot (i1 PredRegs:$src1), bb:$offset)>; // Map from p2 = pnot(p2); p1 = and(p0, p2) => p1 = and(p0, !p2). def : Pat <(and PredRegs:$src1, (not PredRegs:$src2)), - (AND_pnotp PredRegs:$src1, PredRegs:$src2)>; + (i1 (AND_pnotp (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>; // Map from store(globaladdress + x) -> memd(#foo + x). let AddedComplexity = 100 in -def : Pat <(store DoubleRegs:$src1, +def : Pat <(store (i64 DoubleRegs:$src1), (add (HexagonCONST32_GP tglobaladdr:$global), u16ImmPred:$offset)), - (STrid_GP tglobaladdr:$global, u16ImmPred:$offset, DoubleRegs:$src1)>; + (STrid_GP tglobaladdr:$global, u16ImmPred:$offset, + (i64 DoubleRegs:$src1))>, Requires<[NoV4T]>; -// Map from store(globaladdress) -> memd(#foo + 0). +// Map from store(globaladdress) -> memd(#foo). let AddedComplexity = 100 in -def : Pat <(store DoubleRegs:$src1, (HexagonCONST32_GP tglobaladdr:$global)), - (STrid_GP tglobaladdr:$global, 0, DoubleRegs:$src1)>; +def : Pat <(store (i64 DoubleRegs:$src1), + (HexagonCONST32_GP tglobaladdr:$global)), + (STd_GP tglobaladdr:$global, (i64 DoubleRegs:$src1))>, + Requires<[NoV4T]>; // Map from store(globaladdress + x) -> memw(#foo + x). let AddedComplexity = 100 in -def : Pat <(store IntRegs:$src1, (add (HexagonCONST32_GP tglobaladdr:$global), +def : Pat <(store (i32 IntRegs:$src1), + (add (HexagonCONST32_GP tglobaladdr:$global), u16ImmPred:$offset)), - (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, IntRegs:$src1)>; + (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; // Map from store(globaladdress) -> memw(#foo + 0). let AddedComplexity = 100 in -def : Pat <(store IntRegs:$src1, (HexagonCONST32_GP tglobaladdr:$global)), - (STriw_GP tglobaladdr:$global, 0, IntRegs:$src1)>; +def : Pat <(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), + (STriw_GP tglobaladdr:$global, 0, (i32 IntRegs:$src1))>; -// Map from store(globaladdress) -> memw(#foo + 0). +// Map from store(globaladdress) -> memw(#foo). let AddedComplexity = 100 in -def : Pat <(store IntRegs:$src1, (HexagonCONST32_GP tglobaladdr:$global)), - (STriw_GP tglobaladdr:$global, 0, IntRegs:$src1)>; +def : Pat <(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), + (STriw_GP tglobaladdr:$global, 0, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; // Map from store(globaladdress + x) -> memh(#foo + x). let AddedComplexity = 100 in -def : Pat <(truncstorei16 IntRegs:$src1, +def : Pat <(truncstorei16 (i32 IntRegs:$src1), (add (HexagonCONST32_GP tglobaladdr:$global), u16ImmPred:$offset)), - (STrih_GP tglobaladdr:$global, u16ImmPred:$offset, IntRegs:$src1)>; + (STrih_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; // Map from store(globaladdress) -> memh(#foo). let AddedComplexity = 100 in -def : Pat <(truncstorei16 IntRegs:$src1, +def : Pat <(truncstorei16 (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), - (STh_GP tglobaladdr:$global, IntRegs:$src1)>; + (STh_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; // Map from store(globaladdress + x) -> memb(#foo + x). let AddedComplexity = 100 in -def : Pat <(truncstorei8 IntRegs:$src1, +def : Pat <(truncstorei8 (i32 IntRegs:$src1), (add (HexagonCONST32_GP tglobaladdr:$global), u16ImmPred:$offset)), - (STrib_GP tglobaladdr:$global, u16ImmPred:$offset, IntRegs:$src1)>; + (STrib_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; // Map from store(globaladdress) -> memb(#foo). let AddedComplexity = 100 in -def : Pat <(truncstorei8 IntRegs:$src1, +def : Pat <(truncstorei8 (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), - (STb_GP tglobaladdr:$global, IntRegs:$src1)>; + (STb_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; // Map from load(globaladdress + x) -> memw(#foo + x). let AddedComplexity = 100 in -def : Pat <(load (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset)>; +def : Pat <(i32 (load (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memw(#foo + 0). +// Map from load(globaladdress) -> memw(#foo). let AddedComplexity = 100 in -def : Pat <(load (HexagonCONST32_GP tglobaladdr:$global)), - (LDw_GP tglobaladdr:$global)>; +def : Pat <(i32 (load (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDw_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; // Map from load(globaladdress + x) -> memd(#foo + x). let AddedComplexity = 100 in def : Pat <(i64 (load (add (HexagonCONST32_GP tglobaladdr:$global), u16ImmPred:$offset))), - (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset)>; + (i64 (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; // Map from load(globaladdress) -> memw(#foo + 0). let AddedComplexity = 100 in def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))), - (LDd_GP tglobaladdr:$global)>; - + (i64 (LDd_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; -// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress + 0), Pd = Rd. +// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd. let AddedComplexity = 100 in def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))), - (TFR_PdRs (LDrib_GP tglobaladdr:$global, 0))>; + (i1 (TFR_PdRs (i32 (LDb_GP tglobaladdr:$global))))>, + Requires<[NoV4T]>; // Map from load(globaladdress + x) -> memh(#foo + x). let AddedComplexity = 100 in -def : Pat <(sextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (LDrih_GP tglobaladdr:$global, u16ImmPred:$offset)>; +def : Pat <(i32 (extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDrih_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memh(#foo + 0). +// Map from load(globaladdress + x) -> memh(#foo + x). let AddedComplexity = 100 in -def : Pat <(sextloadi16 (HexagonCONST32_GP tglobaladdr:$global)), - (LDrih_GP tglobaladdr:$global, 0)>; +def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDrih_GP tglobaladdr:$global, 0))>, + Requires<[NoV4T]>; // Map from load(globaladdress + x) -> memuh(#foo + x). let AddedComplexity = 100 in -def : Pat <(zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset)>; +def : Pat <(i32 (zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memuh(#foo + 0). +// Map from load(globaladdress) -> memuh(#foo). let AddedComplexity = 100 in -def : Pat <(zextloadi16 (HexagonCONST32_GP tglobaladdr:$global)), - (LDriuh_GP tglobaladdr:$global, 0)>; +def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDriuh_GP tglobaladdr:$global, 0))>, + Requires<[NoV4T]>; -// Map from load(globaladdress + x) -> memuh(#foo + x). +// Map from load(globaladdress) -> memh(#foo). let AddedComplexity = 100 in -def : Pat <(extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset)>; +def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDh_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memuh(#foo + 0). -let AddedComplexity = 100 in -def : Pat <(extloadi16 (HexagonCONST32_GP tglobaladdr:$global)), - (LDriuh_GP tglobaladdr:$global, 0)>; -// Map from load(globaladdress + x) -> memub(#foo + x). +// Map from load(globaladdress) -> memuh(#foo). let AddedComplexity = 100 in -def : Pat <(zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset)>; +def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDuh_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memuh(#foo + 0). +// Map from load(globaladdress + x) -> memb(#foo + x). let AddedComplexity = 100 in -def : Pat <(zextloadi8 (HexagonCONST32_GP tglobaladdr:$global)), - (LDriub_GP tglobaladdr:$global, 0)>; +def : Pat <(i32 (extloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; // Map from load(globaladdress + x) -> memb(#foo + x). let AddedComplexity = 100 in -def : Pat <(sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset)>; +def : Pat <(i32 (sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; + +// Map from load(globaladdress + x) -> memub(#foo + x). +let AddedComplexity = 100 in +def : Pat <(i32 (zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; // Map from load(globaladdress) -> memb(#foo). let AddedComplexity = 100 in -def : Pat <(extloadi8 (HexagonCONST32_GP tglobaladdr:$global)), - (LDb_GP tglobaladdr:$global)>; +def : Pat <(i32 (extloadi8 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; // Map from load(globaladdress) -> memb(#foo). let AddedComplexity = 100 in -def : Pat <(sextloadi8 (HexagonCONST32_GP tglobaladdr:$global)), - (LDb_GP tglobaladdr:$global)>; +def : Pat <(i32 (sextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; // Map from load(globaladdress) -> memub(#foo). let AddedComplexity = 100 in -def : Pat <(zextloadi8 (HexagonCONST32_GP tglobaladdr:$global)), - (LDub_GP tglobaladdr:$global)>; +def : Pat <(i32 (zextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDub_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; // When the Interprocedural Global Variable optimizer realizes that a // certain global variable takes only two constant values, it shrinks the // global to a boolean. Catch those loads here in the following 3 patterns. let AddedComplexity = 100 in -def : Pat <(extloadi1 (HexagonCONST32_GP tglobaladdr:$global)), - (LDb_GP tglobaladdr:$global)>; - -let AddedComplexity = 100 in -def : Pat <(sextloadi1 (HexagonCONST32_GP tglobaladdr:$global)), - (LDb_GP tglobaladdr:$global)>; - -let AddedComplexity = 100 in -def : Pat <(zextloadi1 (HexagonCONST32_GP tglobaladdr:$global)), - (LDub_GP tglobaladdr:$global)>; - -// Map from load(globaladdress) -> memh(#foo). -let AddedComplexity = 100 in -def : Pat <(extloadi16 (HexagonCONST32_GP tglobaladdr:$global)), - (LDh_GP tglobaladdr:$global)>; +def : Pat <(i32 (extloadi1 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memh(#foo). let AddedComplexity = 100 in -def : Pat <(sextloadi16 (HexagonCONST32_GP tglobaladdr:$global)), - (LDh_GP tglobaladdr:$global)>; +def : Pat <(i32 (sextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memuh(#foo). let AddedComplexity = 100 in -def : Pat <(zextloadi16 (HexagonCONST32_GP tglobaladdr:$global)), - (LDuh_GP tglobaladdr:$global)>; +def : Pat <(i32 (zextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDub_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; // Map from i1 loads to 32 bits. This assumes that the i1* is byte aligned. def : Pat <(i32 (zextloadi1 ADDRriS11_0:$addr)), - (AND_rr (LDrib ADDRriS11_0:$addr), (TFRI 0x1))>; + (i32 (AND_rr (i32 (LDrib ADDRriS11_0:$addr)), (TFRI 0x1)))>; // Map from Rdd = sign_extend_inreg(Rss, i32) -> Rdd = SXTW(Rss.lo). -def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i32)), - (i64 (SXTW (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg)))>; +def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i32)), + (i64 (SXTW (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg))))>; // Map from Rdd = sign_extend_inreg(Rss, i16) -> Rdd = SXTW(SXTH(Rss.lo)). -def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i16)), - (i64 (SXTW (SXTH (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg))))>; +def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i16)), + (i64 (SXTW (i32 (SXTH (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), + subreg_loreg))))))>; // Map from Rdd = sign_extend_inreg(Rss, i8) -> Rdd = SXTW(SXTB(Rss.lo)). -def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i8)), - (i64 (SXTW (SXTB (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg))))>; +def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i8)), + (i64 (SXTW (i32 (SXTB (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), + subreg_loreg))))))>; // We want to prevent emiting pnot's as much as possible. // Map brcond with an unsupported setcc to a JMP_cNot. -def : Pat <(brcond (i1 (setne IntRegs:$src1, IntRegs:$src2)), bb:$offset), - (JMP_cNot (CMPEQrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>; +def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + bb:$offset), + (JMP_cNot (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), + bb:$offset)>; -def : Pat <(brcond (i1 (setne IntRegs:$src1, s10ImmPred:$src2)), bb:$offset), - (JMP_cNot (CMPEQri IntRegs:$src1, s10ImmPred:$src2), bb:$offset)>; +def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)), + bb:$offset), + (JMP_cNot (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2), bb:$offset)>; -def : Pat <(brcond (i1 (setne PredRegs:$src1, (i1 -1))), bb:$offset), - (JMP_cNot PredRegs:$src1, bb:$offset)>; +def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 -1))), bb:$offset), + (JMP_cNot (i1 PredRegs:$src1), bb:$offset)>; -def : Pat <(brcond (i1 (setne PredRegs:$src1, (i1 0))), bb:$offset), - (JMP_c PredRegs:$src1, bb:$offset)>; +def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 0))), bb:$offset), + (JMP_c (i1 PredRegs:$src1), bb:$offset)>; -def : Pat <(brcond (i1 (setlt IntRegs:$src1, s8ImmPred:$src2)), bb:$offset), - (JMP_cNot (CMPGEri IntRegs:$src1, s8ImmPred:$src2), bb:$offset)>; +def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)), + bb:$offset), + (JMP_cNot (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2), bb:$offset)>; -def : Pat <(brcond (i1 (setlt IntRegs:$src1, IntRegs:$src2)), bb:$offset), - (JMP_c (CMPLTrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>; +def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + bb:$offset), + (JMP_c (CMPLTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), bb:$offset)>; -def : Pat <(brcond (i1 (setuge DoubleRegs:$src1, DoubleRegs:$src2)), +def : Pat <(brcond (i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), bb:$offset), - (JMP_cNot (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1), + (JMP_cNot (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)), bb:$offset)>; -def : Pat <(brcond (i1 (setule IntRegs:$src1, IntRegs:$src2)), bb:$offset), - (JMP_cNot (CMPGTUrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>; +def : Pat <(brcond (i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + bb:$offset), + (JMP_cNot (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), + bb:$offset)>; -def : Pat <(brcond (i1 (setule DoubleRegs:$src1, DoubleRegs:$src2)), +def : Pat <(brcond (i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), bb:$offset), - (JMP_cNot (CMPGTU64rr DoubleRegs:$src1, DoubleRegs:$src2), - bb:$offset)>; + (JMP_cNot (CMPGTU64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)), + bb:$offset)>; // Map from a 64-bit select to an emulated 64-bit mux. // Hexagon does not support 64-bit MUXes; so emulate with combines. -def : Pat <(select PredRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3), - (COMBINE_rr - (MUX_rr PredRegs:$src1, - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg), - (EXTRACT_SUBREG DoubleRegs:$src3, subreg_hireg)), - (MUX_rr PredRegs:$src1, - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src3, subreg_loreg)))>; +def : Pat <(select (i1 PredRegs:$src1), (i64 DoubleRegs:$src2), + (i64 DoubleRegs:$src3)), + (i64 (COMBINE_rr (i32 (MUX_rr (i1 PredRegs:$src1), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), + subreg_hireg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src3), + subreg_hireg)))), + (i32 (MUX_rr (i1 PredRegs:$src1), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), + subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src3), + subreg_loreg))))))>; // Map from a 1-bit select to logical ops. // From LegalizeDAG.cpp: (B1 ? B2 : B3) <=> (B1 & B2)|(!B1&B3). -def : Pat <(select PredRegs:$src1, PredRegs:$src2, PredRegs:$src3), - (OR_pp (AND_pp PredRegs:$src1, PredRegs:$src2), - (AND_pp (NOT_p PredRegs:$src1), PredRegs:$src3))>; +def : Pat <(select (i1 PredRegs:$src1), (i1 PredRegs:$src2), + (i1 PredRegs:$src3)), + (OR_pp (AND_pp (i1 PredRegs:$src1), (i1 PredRegs:$src2)), + (AND_pp (NOT_p (i1 PredRegs:$src1)), (i1 PredRegs:$src3)))>; // Map Pd = load(addr) -> Rs = load(addr); Pd = Rs. def : Pat<(i1 (load ADDRriS11_2:$addr)), (i1 (TFR_PdRs (i32 (LDrib ADDRriS11_2:$addr))))>; // Map for truncating from 64 immediates to 32 bit immediates. -def : Pat<(i32 (trunc DoubleRegs:$src)), - (i32 (EXTRACT_SUBREG DoubleRegs:$src, subreg_loreg))>; +def : Pat<(i32 (trunc (i64 DoubleRegs:$src))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), subreg_loreg))>; // Map for truncating from i64 immediates to i1 bit immediates. -def : Pat<(i1 (trunc DoubleRegs:$src)), - (i1 (TFR_PdRs (i32(EXTRACT_SUBREG DoubleRegs:$src, subreg_loreg))))>; +def : Pat<(i1 (trunc (i64 DoubleRegs:$src))), + (i1 (TFR_PdRs (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), + subreg_loreg))))>; // Map memb(Rs) = Rdd -> memb(Rs) = Rt. -def : Pat<(truncstorei8 DoubleRegs:$src, ADDRriS11_0:$addr), - (STrib ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG DoubleRegs:$src, +def : Pat<(truncstorei8 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), + (STrib ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), subreg_loreg)))>; // Map memh(Rs) = Rdd -> memh(Rs) = Rt. -def : Pat<(truncstorei16 DoubleRegs:$src, ADDRriS11_0:$addr), - (STrih ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG DoubleRegs:$src, +def : Pat<(truncstorei16 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), + (STrih ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), + subreg_loreg)))>; +// Map memw(Rs) = Rdd -> memw(Rs) = Rt +def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), + (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), subreg_loreg)))>; // Map memw(Rs) = Rdd -> memw(Rs) = Rt. -def : Pat<(truncstorei32 DoubleRegs:$src, ADDRriS11_0:$addr), - (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG DoubleRegs:$src, +def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), + (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), subreg_loreg)))>; // Map from i1 = constant<-1>; memw(addr) = i1 -> r0 = 1; memw(addr) = r0. @@ -2770,118 +3187,134 @@ let AddedComplexity = 100 in // Map from i1 = constant<-1>; memw(CONST32(#foo)) = i1 -> r0 = 1; // memw(#foo) = r0 def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)), - (STb_GP tglobaladdr:$global, (TFRI 1))>; - + (STb_GP tglobaladdr:$global, (TFRI 1))>, + Requires<[NoV4T]>; // Map from i1 = constant<-1>; store i1 -> r0 = 1; store r0. def : Pat<(store (i1 -1), ADDRriS11_2:$addr), (STrib ADDRriS11_2:$addr, (TFRI 1))>; // Map from memb(Rs) = Pd -> Rt = mux(Pd, #0, #1); store Rt. -def : Pat<(store PredRegs:$src1, ADDRriS11_2:$addr), - (STrib ADDRriS11_2:$addr, (i32 (MUX_ii PredRegs:$src1, 1, 0)) )>; +def : Pat<(store (i1 PredRegs:$src1), ADDRriS11_2:$addr), + (STrib ADDRriS11_2:$addr, (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0)) )>; // Map Rdd = anyext(Rs) -> Rdd = sxtw(Rs). // Hexagon_TODO: We can probably use combine but that will cost 2 instructions. // Better way to do this? -def : Pat<(i64 (anyext IntRegs:$src1)), - (i64 (SXTW IntRegs:$src1))>; +def : Pat<(i64 (anyext (i32 IntRegs:$src1))), + (i64 (SXTW (i32 IntRegs:$src1)))>; // Map cmple -> cmpgt. // rs <= rt -> !(rs > rt). -def : Pat<(i1 (setle IntRegs:$src1, s10ImmPred:$src2)), - (i1 (NOT_p (CMPGTri IntRegs:$src1, s10ImmPred:$src2)))>; +def : Pat<(i1 (setle (i32 IntRegs:$src1), s10ImmPred:$src2)), + (i1 (NOT_p (CMPGTri (i32 IntRegs:$src1), s10ImmPred:$src2)))>; // rs <= rt -> !(rs > rt). -def : Pat<(i1 (setle IntRegs:$src1, IntRegs:$src2)), - (i1 (NOT_p (CMPGTrr IntRegs:$src1, IntRegs:$src2)))>; +def : Pat<(i1 (setle (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (NOT_p (CMPGTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>; // Rss <= Rtt -> !(Rss > Rtt). -def : Pat<(i1 (setle DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (NOT_p (CMPGT64rr DoubleRegs:$src1, DoubleRegs:$src2)))>; +def : Pat<(i1 (setle (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (NOT_p (CMPGT64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))))>; // Map cmpne -> cmpeq. // Hexagon_TODO: We should improve on this. // rs != rt -> !(rs == rt). -def : Pat <(i1 (setne IntRegs:$src1, s10ImmPred:$src2)), - (i1 (NOT_p(i1 (CMPEQri IntRegs:$src1, s10ImmPred:$src2))))>; +def : Pat <(i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)), + (i1 (NOT_p(i1 (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2))))>; // Map cmpne(Rs) -> !cmpeqe(Rs). // rs != rt -> !(rs == rt). -def : Pat <(i1 (setne IntRegs:$src1, IntRegs:$src2)), - (i1 (NOT_p(i1 (CMPEQrr IntRegs:$src1, IntRegs:$src2))))>; +def : Pat <(i1 (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (NOT_p (i1 (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)))))>; // Convert setne back to xor for hexagon since we compute w/ pred registers. -def : Pat <(i1 (setne PredRegs:$src1, PredRegs:$src2)), - (i1 (XOR_pp PredRegs:$src1, PredRegs:$src2))>; +def : Pat <(i1 (setne (i1 PredRegs:$src1), (i1 PredRegs:$src2))), + (i1 (XOR_pp (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>; // Map cmpne(Rss) -> !cmpew(Rss). // rs != rt -> !(rs == rt). -def : Pat <(i1 (setne DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (NOT_p(i1 (CMPEHexagon4rr DoubleRegs:$src1, DoubleRegs:$src2))))>; +def : Pat <(i1 (setne (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (NOT_p (i1 (CMPEHexagon4rr (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2)))))>; // Map cmpge(Rs, Rt) -> !(cmpgt(Rs, Rt). // rs >= rt -> !(rt > rs). -def : Pat <(i1 (setge IntRegs:$src1, IntRegs:$src2)), - (i1 (NOT_p(i1 (CMPGTrr IntRegs:$src2, IntRegs:$src1))))>; +def : Pat <(i1 (setge (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (NOT_p (i1 (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))))>; -def : Pat <(i1 (setge IntRegs:$src1, s8ImmPred:$src2)), - (i1 (CMPGEri IntRegs:$src1, s8ImmPred:$src2))>; +def : Pat <(i1 (setge (i32 IntRegs:$src1), s8ImmPred:$src2)), + (i1 (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2))>; // Map cmpge(Rss, Rtt) -> !cmpgt(Rtt, Rss). // rss >= rtt -> !(rtt > rss). -def : Pat <(i1 (setge DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (NOT_p(i1 (CMPGT64rr DoubleRegs:$src2, DoubleRegs:$src1))))>; +def : Pat <(i1 (setge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (NOT_p (i1 (CMPGT64rr (i64 DoubleRegs:$src2), + (i64 DoubleRegs:$src1)))))>; // Map cmplt(Rs, Imm) -> !cmpge(Rs, Imm). // rs < rt -> !(rs >= rt). -def : Pat <(i1 (setlt IntRegs:$src1, s8ImmPred:$src2)), - (i1 (NOT_p (CMPGEri IntRegs:$src1, s8ImmPred:$src2)))>; +def : Pat <(i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)), + (i1 (NOT_p (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2)))>; -// Map cmplt(Rs, Rt) -> cmplt(Rs, Rt). -// rs < rt -> rs < rt. Let assembler map it. -def : Pat <(i1 (setlt IntRegs:$src1, IntRegs:$src2)), - (i1 (CMPLTrr IntRegs:$src2, IntRegs:$src1))>; +// Map cmplt(Rs, Rt) -> cmpgt(Rt, Rs). +// rs < rt -> rt > rs. +// We can let assembler map it, or we can do in the compiler itself. +def : Pat <(i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))>; // Map cmplt(Rss, Rtt) -> cmpgt(Rtt, Rss). // rss < rtt -> (rtt > rss). -def : Pat <(i1 (setlt DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (CMPGT64rr DoubleRegs:$src2, DoubleRegs:$src1))>; +def : Pat <(i1 (setlt (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (CMPGT64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>; -// Map from cmpltu(Rs, Rd) -> !cmpgtu(Rs, Rd - 1). +// Map from cmpltu(Rs, Rd) -> cmpgtu(Rd, Rs) // rs < rt -> rt > rs. -def : Pat <(i1 (setult IntRegs:$src1, IntRegs:$src2)), - (i1 (CMPGTUrr IntRegs:$src2, IntRegs:$src1))>; +// We can let assembler map it, or we can do in the compiler itself. +def : Pat <(i1 (setult (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (CMPGTUrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))>; -// Map from cmpltu(Rss, Rdd) -> !cmpgtu(Rss, Rdd - 1). +// Map from cmpltu(Rss, Rdd) -> cmpgtu(Rdd, Rss). // rs < rt -> rt > rs. -def : Pat <(i1 (setult DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1))>; +def : Pat <(i1 (setult (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>; + +// Generate cmpgeu(Rs, #u8) +def : Pat <(i1 (setuge (i32 IntRegs:$src1), u8ImmPred:$src2)), + (i1 (CMPGEUri (i32 IntRegs:$src1), u8ImmPred:$src2))>; + +// Generate cmpgtu(Rs, #u9) +def : Pat <(i1 (setugt (i32 IntRegs:$src1), u9ImmPred:$src2)), + (i1 (CMPGTUri (i32 IntRegs:$src1), u9ImmPred:$src2))>; // Map from Rs >= Rt -> !(Rt > Rs). // rs >= rt -> !(rt > rs). -def : Pat <(i1 (setuge IntRegs:$src1, IntRegs:$src2)), - (i1 (NOT_p (CMPGTUrr IntRegs:$src2, IntRegs:$src1)))>; +def : Pat <(i1 (setuge (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (NOT_p (CMPGTUrr (i32 IntRegs:$src2), (i32 IntRegs:$src1))))>; // Map from Rs >= Rt -> !(Rt > Rs). // rs >= rt -> !(rt > rs). -def : Pat <(i1 (setuge DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (NOT_p (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1)))>; +def : Pat <(i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (NOT_p (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1))))>; // Map from cmpleu(Rs, Rs) -> !cmpgtu(Rs, Rs). // Map from (Rs <= Rt) -> !(Rs > Rt). -def : Pat <(i1 (setule IntRegs:$src1, IntRegs:$src2)), - (i1 (NOT_p (CMPGTUrr IntRegs:$src1, IntRegs:$src2)))>; +def : Pat <(i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (NOT_p (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>; // Map from cmpleu(Rss, Rtt) -> !cmpgtu(Rss, Rtt-1). // Map from (Rs <= Rt) -> !(Rs > Rt). -def : Pat <(i1 (setule DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (NOT_p (CMPGTU64rr DoubleRegs:$src1, DoubleRegs:$src2)))>; +def : Pat <(i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (NOT_p (CMPGTU64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))))>; // Sign extends. // i1 -> i32 -def : Pat <(i32 (sext PredRegs:$src1)), - (i32 (MUX_ii PredRegs:$src1, -1, 0))>; +def : Pat <(i32 (sext (i1 PredRegs:$src1))), + (i32 (MUX_ii (i1 PredRegs:$src1), -1, 0))>; + +// i1 -> i64 +def : Pat <(i64 (sext (i1 PredRegs:$src1))), + (i64 (COMBINE_rr (TFRI -1), (MUX_ii (i1 PredRegs:$src1), -1, 0)))>; // Convert sign-extended load back to load and sign extend. // i8 -> i64 @@ -2906,16 +3339,16 @@ def: Pat <(i64 (sextloadi32 ADDRriS11_2:$src1)), // Zero extends. // i1 -> i32 -def : Pat <(i32 (zext PredRegs:$src1)), - (i32 (MUX_ii PredRegs:$src1, 1, 0))>; +def : Pat <(i32 (zext (i1 PredRegs:$src1))), + (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>; // i1 -> i64 -def : Pat <(i64 (zext PredRegs:$src1)), - (i64 (COMBINE_rr (TFRI 0), (MUX_ii PredRegs:$src1, 1, 0)))>; +def : Pat <(i64 (zext (i1 PredRegs:$src1))), + (i64 (COMBINE_rr (TFRI 0), (MUX_ii (i1 PredRegs:$src1), 1, 0)))>; // i32 -> i64 -def : Pat <(i64 (zext IntRegs:$src1)), - (i64 (COMBINE_rr (TFRI 0), IntRegs:$src1))>; +def : Pat <(i64 (zext (i32 IntRegs:$src1))), + (i64 (COMBINE_rr (TFRI 0), (i32 IntRegs:$src1)))>; // i8 -> i64 def: Pat <(i64 (zextloadi8 ADDRriS11_0:$src1)), @@ -2933,16 +3366,16 @@ def: Pat <(i32 (zextloadi1 ADDRriS11_0:$src1)), (i32 (LDriw ADDRriS11_0:$src1))>; // Map from Rs = Pd to Pd = mux(Pd, #1, #0) -def : Pat <(i32 (zext PredRegs:$src1)), - (i32 (MUX_ii PredRegs:$src1, 1, 0))>; +def : Pat <(i32 (zext (i1 PredRegs:$src1))), + (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>; // Map from Rs = Pd to Pd = mux(Pd, #1, #0) -def : Pat <(i32 (anyext PredRegs:$src1)), - (i32 (MUX_ii PredRegs:$src1, 1, 0))>; +def : Pat <(i32 (anyext (i1 PredRegs:$src1))), + (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>; // Map from Rss = Pd to Rdd = sxtw (mux(Pd, #1, #0)) -def : Pat <(i64 (anyext PredRegs:$src1)), - (i64 (SXTW (i32 (MUX_ii PredRegs:$src1, 1, 0))))>; +def : Pat <(i64 (anyext (i1 PredRegs:$src1))), + (i64 (SXTW (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))))>; // Any extended 64-bit load. @@ -2955,75 +3388,104 @@ def: Pat <(i64 (extloadi16 ADDRriS11_2:$src1)), (i64 (COMBINE_rr (TFRI 0), (LDrih ADDRriS11_2:$src1)))>; // Map from Rdd = zxtw(Rs) -> Rdd = combine(0, Rs). -def : Pat<(i64 (zext IntRegs:$src1)), - (i64 (COMBINE_rr (TFRI 0), IntRegs:$src1))>; +def : Pat<(i64 (zext (i32 IntRegs:$src1))), + (i64 (COMBINE_rr (TFRI 0), (i32 IntRegs:$src1)))>; // Multiply 64-bit unsigned and use upper result. -def : Pat <(mulhu DoubleRegs:$src1, DoubleRegs:$src2), - (MPYU64_acc(COMBINE_rr (TFRI 0), - (EXTRACT_SUBREG - (LSRd_ri(MPYU64_acc(MPYU64_acc(COMBINE_rr (TFRI 0), - (EXTRACT_SUBREG (LSRd_ri(MPYU64 - (EXTRACT_SUBREG DoubleRegs:$src1, - subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, - subreg_loreg)), - 32) ,subreg_loreg)), - (EXTRACT_SUBREG DoubleRegs:$src1, - subreg_hireg), - (EXTRACT_SUBREG DoubleRegs:$src2, - subreg_loreg)), - (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)), - 32),subreg_loreg)), - (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg) - )>; +def : Pat <(mulhu (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)), + (i64 + (MPYU64_acc + (i64 + (COMBINE_rr + (TFRI 0), + (i32 + (EXTRACT_SUBREG + (i64 + (LSRd_ri + (i64 + (MPYU64_acc + (i64 + (MPYU64_acc + (i64 + (COMBINE_rr (TFRI 0), + (i32 + (EXTRACT_SUBREG + (i64 + (LSRd_ri + (i64 + (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), + subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), + subreg_loreg)))), 32)), + subreg_loreg)))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg)))), + 32)), subreg_loreg)))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))))>; // Multiply 64-bit signed and use upper result. -def : Pat <(mulhs DoubleRegs:$src1, DoubleRegs:$src2), - (MPY64_acc(COMBINE_rr (TFRI 0), - (EXTRACT_SUBREG - (LSRd_ri(MPY64_acc(MPY64_acc(COMBINE_rr (TFRI 0), - (EXTRACT_SUBREG (LSRd_ri(MPYU64 - (EXTRACT_SUBREG DoubleRegs:$src1, - subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, - subreg_loreg)), - 32) ,subreg_loreg)), - (EXTRACT_SUBREG DoubleRegs:$src1, - subreg_hireg), - (EXTRACT_SUBREG DoubleRegs:$src2, - subreg_loreg)), - (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)), - 32),subreg_loreg)), - (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg) - )>; +def : Pat <(mulhs (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)), + (i64 + (MPY64_acc + (i64 + (COMBINE_rr (TFRI 0), + (i32 + (EXTRACT_SUBREG + (i64 + (LSRd_ri + (i64 + (MPY64_acc + (i64 + (MPY64_acc + (i64 + (COMBINE_rr (TFRI 0), + (i32 + (EXTRACT_SUBREG + (i64 + (LSRd_ri + (i64 + (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), + subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), + subreg_loreg)))), 32)), + subreg_loreg)))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg)))), + 32)), subreg_loreg)))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))))>; // Hexagon specific ISD nodes. -def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>]>; +//def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>]>; +def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, + [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; def Hexagon_ADJDYNALLOC : SDNode<"HexagonISD::ADJDYNALLOC", - SDTHexagonADJDYNALLOC>; + SDTHexagonADJDYNALLOC>; // Needed to tag these instructions for stack layout. let usesCustomInserter = 1 in def ADJDYNALLOC : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1, s16Imm:$src2), "$dst = add($src1, #$src2)", - [(set IntRegs:$dst, (Hexagon_ADJDYNALLOC IntRegs:$src1, - s16ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), + (Hexagon_ADJDYNALLOC (i32 IntRegs:$src1), + s16ImmPred:$src2))]>; -def SDTHexagonARGEXTEND : SDTypeProfile<1, 1, []>; +def SDTHexagonARGEXTEND : SDTypeProfile<1, 1, [SDTCisVT<0, i32>]>; def Hexagon_ARGEXTEND : SDNode<"HexagonISD::ARGEXTEND", SDTHexagonARGEXTEND>; +let neverHasSideEffects = 1 in def ARGEXTEND : ALU32_rr <(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = $src1", - [(set IntRegs:$dst, (Hexagon_ARGEXTEND IntRegs:$src1))]>; + [(set (i32 IntRegs:$dst), + (Hexagon_ARGEXTEND (i32 IntRegs:$src1)))]>; let AddedComplexity = 100 in -def : Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND IntRegs:$src1), i16)), - (TFR IntRegs:$src1)>; - +def : Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND (i32 IntRegs:$src1)), i16)), + (COPY (i32 IntRegs:$src1))>; def SDHexagonBR_JT: SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>; def HexagonBR_JT: SDNode<"HexagonISD::BR_JT", SDHexagonBR_JT, [SDNPHasChain]>; @@ -3031,12 +3493,94 @@ def HexagonBR_JT: SDNode<"HexagonISD::BR_JT", SDHexagonBR_JT, [SDNPHasChain]>; let isBranch=1, isIndirectBranch=1, isTerminator=1, isBarrier = 1 in def BR_JT : JRInst<(outs), (ins IntRegs:$src), "jumpr $src", - [(HexagonBR_JT IntRegs:$src)]>; + [(HexagonBR_JT (i32 IntRegs:$src))]>; + def HexagonWrapperJT: SDNode<"HexagonISD::WrapperJT", SDTIntUnaryOp>; +def HexagonWrapperCP: SDNode<"HexagonISD::WrapperCP", SDTIntUnaryOp>; def : Pat<(HexagonWrapperJT tjumptable:$dst), - (CONST32_set_jt tjumptable:$dst)>; + (i32 (CONST32_set_jt tjumptable:$dst))>; +def : Pat<(HexagonWrapperCP tconstpool :$dst), + (i32 (CONST32_set_jt tconstpool:$dst))>; + +// XTYPE/SHIFT + +// Multi-class for logical operators : +// Shift by immediate/register and accumulate/logical +multiclass xtype_imm<string OpcStr, SDNode OpNode1, SDNode OpNode2> { + def _ri : SInst_acc<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2, u5Imm:$src3), + !strconcat("$dst ", !strconcat(OpcStr, "($src2, #$src3)")), + [(set (i32 IntRegs:$dst), + (OpNode2 (i32 IntRegs:$src1), + (OpNode1 (i32 IntRegs:$src2), + u5ImmPred:$src3)))], + "$src1 = $dst">; + def d_ri : SInst_acc<(outs DoubleRegs:$dst), + (ins DoubleRegs:$src1, DoubleRegs:$src2, u6Imm:$src3), + !strconcat("$dst ", !strconcat(OpcStr, "($src2, #$src3)")), + [(set (i64 DoubleRegs:$dst), (OpNode2 (i64 DoubleRegs:$src1), + (OpNode1 (i64 DoubleRegs:$src2), u6ImmPred:$src3)))], + "$src1 = $dst">; +} + +// Multi-class for logical operators : +// Shift by register and accumulate/logical (32/64 bits) +multiclass xtype_reg<string OpcStr, SDNode OpNode1, SDNode OpNode2> { + def _rr : SInst_acc<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), + !strconcat("$dst ", !strconcat(OpcStr, "($src2, $src3)")), + [(set (i32 IntRegs:$dst), + (OpNode2 (i32 IntRegs:$src1), + (OpNode1 (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], + "$src1 = $dst">; + + def d_rr : SInst_acc<(outs DoubleRegs:$dst), + (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), + !strconcat("$dst ", !strconcat(OpcStr, "($src2, $src3)")), + [(set (i64 DoubleRegs:$dst), + (OpNode2 (i64 DoubleRegs:$src1), + (OpNode1 (i64 DoubleRegs:$src2), + (i32 IntRegs:$src3))))], + "$src1 = $dst">; + +} + +multiclass basic_xtype_imm<string OpcStr, SDNode OpNode> { +let AddedComplexity = 100 in + defm _ADD : xtype_imm< !strconcat("+= ", OpcStr), OpNode, add>; + defm _SUB : xtype_imm< !strconcat("-= ", OpcStr), OpNode, sub>; + defm _AND : xtype_imm< !strconcat("&= ", OpcStr), OpNode, and>; + defm _OR : xtype_imm< !strconcat("|= ", OpcStr), OpNode, or>; +} + +multiclass basic_xtype_reg<string OpcStr, SDNode OpNode> { +let AddedComplexity = 100 in + defm _ADD : xtype_reg< !strconcat("+= ", OpcStr), OpNode, add>; + defm _SUB : xtype_reg< !strconcat("-= ", OpcStr), OpNode, sub>; + defm _AND : xtype_reg< !strconcat("&= ", OpcStr), OpNode, and>; + defm _OR : xtype_reg< !strconcat("|= ", OpcStr), OpNode, or>; +} + +multiclass xtype_xor_imm<string OpcStr, SDNode OpNode> { +let AddedComplexity = 100 in + defm _XOR : xtype_imm< !strconcat("^= ", OpcStr), OpNode, xor>; +} + +defm ASL : basic_xtype_imm<"asl", shl>, basic_xtype_reg<"asl", shl>, + xtype_xor_imm<"asl", shl>; + +defm LSR : basic_xtype_imm<"lsr", srl>, basic_xtype_reg<"lsr", srl>, + xtype_xor_imm<"lsr", srl>; + +defm ASR : basic_xtype_imm<"asr", sra>, basic_xtype_reg<"asr", sra>; +defm LSL : basic_xtype_reg<"lsl", shl>; + +// Change the sign of the immediate for Rd=-mpyi(Rs,#u8) +def : Pat <(mul (i32 IntRegs:$src1), (ineg n8ImmPred:$src2)), + (i32 (MPYI_rin (i32 IntRegs:$src1), u8ImmPred:$src2))>; //===----------------------------------------------------------------------===// // V3 Instructions + @@ -3057,3 +3601,13 @@ include "HexagonInstrInfoV4.td" //===----------------------------------------------------------------------===// // V4 Instructions - //===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// V5 Instructions + +//===----------------------------------------------------------------------===// + +include "HexagonInstrInfoV5.td" + +//===----------------------------------------------------------------------===// +// V5 Instructions - +//===----------------------------------------------------------------------===// diff --git a/lib/Target/Hexagon/HexagonInstrInfoV5.td b/lib/Target/Hexagon/HexagonInstrInfoV5.td new file mode 100644 index 0000000000..92d098cc04 --- /dev/null +++ b/lib/Target/Hexagon/HexagonInstrInfoV5.td @@ -0,0 +1,626 @@ +def SDTHexagonFCONST32 : SDTypeProfile<1, 1, [ + SDTCisVT<0, f32>, + SDTCisPtrTy<1>]>; +def HexagonFCONST32 : SDNode<"HexagonISD::FCONST32", SDTHexagonFCONST32>; + +let isReMaterializable = 1, isMoveImm = 1 in +def FCONST32_nsdata : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), + "$dst = CONST32(#$global)", + [(set (f32 IntRegs:$dst), + (HexagonFCONST32 tglobaladdr:$global))]>, + Requires<[HasV5T]>; + +let isReMaterializable = 1, isMoveImm = 1 in +def CONST64_Float_Real : LDInst<(outs DoubleRegs:$dst), (ins f64imm:$src1), + "$dst = CONST64(#$src1)", + [(set DoubleRegs:$dst, fpimm:$src1)]>, + Requires<[HasV5T]>; + +let isReMaterializable = 1, isMoveImm = 1 in +def CONST32_Float_Real : LDInst<(outs IntRegs:$dst), (ins f32imm:$src1), + "$dst = CONST32(#$src1)", + [(set IntRegs:$dst, fpimm:$src1)]>, + Requires<[HasV5T]>; + +// Transfer immediate float. +// Only works with single precision fp value. +// For double precision, use CONST64_float_real, as 64bit transfer +// can only hold 40-bit values - 32 from const ext + 8 bit immediate. +let isMoveImm = 1, isReMaterializable = 1, isPredicable = 1 in +def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32imm:$src1), + "$dst = ##$src1", + [(set IntRegs:$dst, fpimm:$src1)]>, + Requires<[HasV5T]>; + +def TFRI_cPt_f : ALU32_ri<(outs IntRegs:$dst), + (ins PredRegs:$src1, f32imm:$src2), + "if ($src1) $dst = ##$src2", + []>, + Requires<[HasV5T]>; + +let isPredicated = 1 in +def TFRI_cNotPt_f : ALU32_ri<(outs IntRegs:$dst), + (ins PredRegs:$src1, f32imm:$src2), + "if (!$src1) $dst = ##$src2", + []>, + Requires<[HasV5T]>; + +// Convert single precision to double precision and vice-versa. +def CONVERT_sf2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2df($src)", + [(set DoubleRegs:$dst, (fextend IntRegs:$src))]>, + Requires<[HasV5T]>; + +def CONVERT_df2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2sf($src)", + [(set IntRegs:$dst, (fround DoubleRegs:$src))]>, + Requires<[HasV5T]>; + + +// Load. +def LDrid_f : LDInst<(outs DoubleRegs:$dst), + (ins MEMri:$addr), + "$dst = memd($addr)", + [(set DoubleRegs:$dst, (f64 (load ADDRriS11_3:$addr)))]>, + Requires<[HasV5T]>; + + +let AddedComplexity = 20 in +def LDrid_indexed_f : LDInst<(outs DoubleRegs:$dst), + (ins IntRegs:$src1, s11_3Imm:$offset), + "$dst = memd($src1+#$offset)", + [(set DoubleRegs:$dst, (f64 (load (add IntRegs:$src1, + s11_3ImmPred:$offset))))]>, + Requires<[HasV5T]>; + +def LDriw_f : LDInst<(outs IntRegs:$dst), + (ins MEMri:$addr), "$dst = memw($addr)", + [(set IntRegs:$dst, (f32 (load ADDRriS11_2:$addr)))]>, + Requires<[HasV5T]>; + + +let AddedComplexity = 20 in +def LDriw_indexed_f : LDInst<(outs IntRegs:$dst), + (ins IntRegs:$src1, s11_2Imm:$offset), + "$dst = memw($src1+#$offset)", + [(set IntRegs:$dst, (f32 (load (add IntRegs:$src1, + s11_2ImmPred:$offset))))]>, + Requires<[HasV5T]>; + +// Store. +def STriw_f : STInst<(outs), + (ins MEMri:$addr, IntRegs:$src1), + "memw($addr) = $src1", + [(store (f32 IntRegs:$src1), ADDRriS11_2:$addr)]>, + Requires<[HasV5T]>; + +let AddedComplexity = 10 in +def STriw_indexed_f : STInst<(outs), + (ins IntRegs:$src1, s11_2Imm:$src2, IntRegs:$src3), + "memw($src1+#$src2) = $src3", + [(store (f32 IntRegs:$src3), + (add IntRegs:$src1, s11_2ImmPred:$src2))]>, + Requires<[HasV5T]>; + +def STrid_f : STInst<(outs), + (ins MEMri:$addr, DoubleRegs:$src1), + "memd($addr) = $src1", + [(store (f64 DoubleRegs:$src1), ADDRriS11_2:$addr)]>, + Requires<[HasV5T]>; + +// Indexed store double word. +let AddedComplexity = 10 in +def STrid_indexed_f : STInst<(outs), + (ins IntRegs:$src1, s11_3Imm:$src2, DoubleRegs:$src3), + "memd($src1+#$src2) = $src3", + [(store (f64 DoubleRegs:$src3), + (add IntRegs:$src1, s11_3ImmPred:$src2))]>, + Requires<[HasV5T]>; + + +// Add +let isCommutable = 1 in +def fADD_rr : ALU64_rr<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2), + "$dst = sfadd($src1, $src2)", + [(set IntRegs:$dst, (fadd IntRegs:$src1, IntRegs:$src2))]>, + Requires<[HasV5T]>; + +let isCommutable = 1 in +def fADD64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, + DoubleRegs:$src2), + "$dst = dfadd($src1, $src2)", + [(set DoubleRegs:$dst, (fadd DoubleRegs:$src1, + DoubleRegs:$src2))]>, + Requires<[HasV5T]>; + +def fSUB_rr : ALU64_rr<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2), + "$dst = sfsub($src1, $src2)", + [(set IntRegs:$dst, (fsub IntRegs:$src1, IntRegs:$src2))]>, + Requires<[HasV5T]>; + +def fSUB64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, + DoubleRegs:$src2), + "$dst = dfsub($src1, $src2)", + [(set DoubleRegs:$dst, (fsub DoubleRegs:$src1, + DoubleRegs:$src2))]>, + Requires<[HasV5T]>; + +let isCommutable = 1 in +def fMUL_rr : ALU64_rr<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2), + "$dst = sfmpy($src1, $src2)", + [(set IntRegs:$dst, (fmul IntRegs:$src1, IntRegs:$src2))]>, + Requires<[HasV5T]>; + +let isCommutable = 1 in +def fMUL64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, + DoubleRegs:$src2), + "$dst = dfmpy($src1, $src2)", + [(set DoubleRegs:$dst, (fmul DoubleRegs:$src1, + DoubleRegs:$src2))]>, + Requires<[HasV5T]>; + +// Compare. +let isCompare = 1 in { +multiclass FCMP64_rr<string OpcStr, PatFrag OpNode> { + def _rr : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$b, DoubleRegs:$c), + !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), + [(set PredRegs:$dst, + (OpNode (f64 DoubleRegs:$b), (f64 DoubleRegs:$c)))]>, + Requires<[HasV5T]>; +} + +multiclass FCMP32_rr<string OpcStr, PatFrag OpNode> { + def _rr : ALU64_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c), + !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), + [(set PredRegs:$dst, + (OpNode (f32 IntRegs:$b), (f32 IntRegs:$c)))]>, + Requires<[HasV5T]>; +} +} + +defm FCMPOEQ64 : FCMP64_rr<"dfcmp.eq", setoeq>; +defm FCMPUEQ64 : FCMP64_rr<"dfcmp.eq", setueq>; +defm FCMPOGT64 : FCMP64_rr<"dfcmp.gt", setogt>; +defm FCMPUGT64 : FCMP64_rr<"dfcmp.gt", setugt>; +defm FCMPOGE64 : FCMP64_rr<"dfcmp.ge", setoge>; +defm FCMPUGE64 : FCMP64_rr<"dfcmp.ge", setuge>; + +defm FCMPOEQ32 : FCMP32_rr<"sfcmp.eq", setoeq>; +defm FCMPUEQ32 : FCMP32_rr<"sfcmp.eq", setueq>; +defm FCMPOGT32 : FCMP32_rr<"sfcmp.gt", setogt>; +defm FCMPUGT32 : FCMP32_rr<"sfcmp.gt", setugt>; +defm FCMPOGE32 : FCMP32_rr<"sfcmp.ge", setoge>; +defm FCMPUGE32 : FCMP32_rr<"sfcmp.ge", setuge>; + +// olt. +def : Pat <(i1 (setolt (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (i1 (FCMPOGT32_rr IntRegs:$src2, IntRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setolt (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (FCMPOGT32_rr (f32 (TFRI_f fpimm:$src2)), (f32 IntRegs:$src1)))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setolt (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (i1 (FCMPOGT64_rr DoubleRegs:$src2, DoubleRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setolt (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (FCMPOGT64_rr (f64 (CONST64_Float_Real fpimm:$src2)), + (f64 DoubleRegs:$src1)))>, + Requires<[HasV5T]>; + +// gt. +def : Pat <(i1 (setugt (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (FCMPUGT64_rr (f64 DoubleRegs:$src1), + (f64 (CONST64_Float_Real fpimm:$src2))))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setugt (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (FCMPUGT32_rr (f32 IntRegs:$src1), (f32 (TFRI_f fpimm:$src2))))>, + Requires<[HasV5T]>; + +// ult. +def : Pat <(i1 (setult (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (i1 (FCMPUGT32_rr IntRegs:$src2, IntRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setult (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (FCMPUGT32_rr (f32 (TFRI_f fpimm:$src2)), (f32 IntRegs:$src1)))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setult (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (i1 (FCMPUGT64_rr DoubleRegs:$src2, DoubleRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setult (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (FCMPUGT64_rr (f64 (CONST64_Float_Real fpimm:$src2)), + (f64 DoubleRegs:$src1)))>, + Requires<[HasV5T]>; + +// le. +// rs <= rt -> rt >= rs. +def : Pat<(i1 (setole (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (i1 (FCMPOGE32_rr IntRegs:$src2, IntRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setole (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (FCMPOGE32_rr (f32 (TFRI_f fpimm:$src2)), IntRegs:$src1))>, + Requires<[HasV5T]>; + + +// Rss <= Rtt -> Rtt >= Rss. +def : Pat<(i1 (setole (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (i1 (FCMPOGE64_rr DoubleRegs:$src2, DoubleRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setole (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (FCMPOGE64_rr (f64 (CONST64_Float_Real fpimm:$src2)), + DoubleRegs:$src1))>, + Requires<[HasV5T]>; + +// rs <= rt -> rt >= rs. +def : Pat<(i1 (setule (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (i1 (FCMPUGE32_rr IntRegs:$src2, IntRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setule (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (FCMPUGE32_rr (f32 (TFRI_f fpimm:$src2)), IntRegs:$src1))>, + Requires<[HasV5T]>; + +// Rss <= Rtt -> Rtt >= Rss. +def : Pat<(i1 (setule (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (i1 (FCMPUGE64_rr DoubleRegs:$src2, DoubleRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setule (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (FCMPUGE64_rr (f64 (CONST64_Float_Real fpimm:$src2)), + DoubleRegs:$src1))>, + Requires<[HasV5T]>; + +// ne. +def : Pat<(i1 (setone (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (i1 (NOT_p (FCMPOEQ32_rr IntRegs:$src1, IntRegs:$src2)))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setone (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (i1 (NOT_p (FCMPOEQ64_rr DoubleRegs:$src1, DoubleRegs:$src2)))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setune (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (i1 (NOT_p (FCMPUEQ32_rr IntRegs:$src1, IntRegs:$src2)))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setune (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (i1 (NOT_p (FCMPUEQ64_rr DoubleRegs:$src1, DoubleRegs:$src2)))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setone (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (NOT_p (FCMPOEQ32_rr IntRegs:$src1, (f32 (TFRI_f fpimm:$src2)))))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setone (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (NOT_p (FCMPOEQ64_rr DoubleRegs:$src1, + (f64 (CONST64_Float_Real fpimm:$src2)))))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setune (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (NOT_p (FCMPUEQ32_rr IntRegs:$src1, (f32 (TFRI_f fpimm:$src2)))))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setune (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (NOT_p (FCMPUEQ64_rr DoubleRegs:$src1, + (f64 (CONST64_Float_Real fpimm:$src2)))))>, + Requires<[HasV5T]>; + +// Convert Integer to Floating Point. +def CONVERT_d2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_d2sf($src)", + [(set (f32 IntRegs:$dst), (sint_to_fp (i64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_ud2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_ud2sf($src)", + [(set (f32 IntRegs:$dst), (uint_to_fp (i64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_uw2sf : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src), + "$dst = convert_uw2sf($src)", + [(set (f32 IntRegs:$dst), (uint_to_fp (i32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_w2sf : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src), + "$dst = convert_w2sf($src)", + [(set (f32 IntRegs:$dst), (sint_to_fp (i32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_d2df : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_d2df($src)", + [(set (f64 DoubleRegs:$dst), (sint_to_fp (i64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_ud2df : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_ud2df($src)", + [(set (f64 DoubleRegs:$dst), (uint_to_fp (i64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_uw2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_uw2df($src)", + [(set (f64 DoubleRegs:$dst), (uint_to_fp (i32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_w2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_w2df($src)", + [(set (f64 DoubleRegs:$dst), (sint_to_fp (i32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +// Convert Floating Point to Integer - default. +def CONVERT_df2uw : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2uw($src):chop", + [(set (i32 IntRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_df2w : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2w($src):chop", + [(set (i32 IntRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_sf2uw : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2uw($src):chop", + [(set (i32 IntRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_sf2w : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2w($src):chop", + [(set (i32 IntRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_df2d : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2d($src):chop", + [(set (i64 DoubleRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_df2ud : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2ud($src):chop", + [(set (i64 DoubleRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_sf2d : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2d($src):chop", + [(set (i64 DoubleRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_sf2ud : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2ud($src):chop", + [(set (i64 DoubleRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +// Convert Floating Point to Integer: non-chopped. +let AddedComplexity = 20 in +def CONVERT_df2uw_nchop : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2uw($src)", + [(set (i32 IntRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_df2w_nchop : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2w($src)", + [(set (i32 IntRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_sf2uw_nchop : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2uw($src)", + [(set (i32 IntRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_sf2w_nchop : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2w($src)", + [(set (i32 IntRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_df2d_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2d($src)", + [(set (i64 DoubleRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_df2ud_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2ud($src)", + [(set (i64 DoubleRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_sf2d_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2d($src)", + [(set (i64 DoubleRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_sf2ud_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2ud($src)", + [(set (i64 DoubleRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + + + +// Bitcast is different than [fp|sint|uint]_to_[sint|uint|fp]. +def : Pat <(i32 (bitconvert (f32 IntRegs:$src))), + (i32 (TFR IntRegs:$src))>, + Requires<[HasV5T]>; + +def : Pat <(f32 (bitconvert (i32 IntRegs:$src))), + (f32 (TFR IntRegs:$src))>, + Requires<[HasV5T]>; + +def : Pat <(i64 (bitconvert (f64 DoubleRegs:$src))), + (i64 (TFR64 DoubleRegs:$src))>, + Requires<[HasV5T]>; + +def : Pat <(f64 (bitconvert (i64 DoubleRegs:$src))), + (f64 (TFR64 DoubleRegs:$src))>, + Requires<[HasV5T]>; + +// Floating point fused multiply-add. +def FMADD_dp : ALU64_acc<(outs DoubleRegs:$dst), + (ins DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3), + "$dst += dfmpy($src2, $src3)", + [(set (f64 DoubleRegs:$dst), + (fma DoubleRegs:$src2, DoubleRegs:$src3, DoubleRegs:$src1))], + "$src1 = $dst">, + Requires<[HasV5T]>; + +def FMADD_sp : ALU64_acc<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), + "$dst += sfmpy($src2, $src3)", + [(set (f32 IntRegs:$dst), + (fma IntRegs:$src2, IntRegs:$src3, IntRegs:$src1))], + "$src1 = $dst">, + Requires<[HasV5T]>; + + +// Floating point max/min. +let AddedComplexity = 100 in +def FMAX_dp : ALU64_rr<(outs DoubleRegs:$dst), + (ins DoubleRegs:$src1, DoubleRegs:$src2), + "$dst = dfmax($src1, $src2)", + [(set DoubleRegs:$dst, (f64 (select (i1 (setolt DoubleRegs:$src2, + DoubleRegs:$src1)), + DoubleRegs:$src1, + DoubleRegs:$src2)))]>, + Requires<[HasV5T]>; + +let AddedComplexity = 100 in +def FMAX_sp : ALU64_rr<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2), + "$dst = sfmax($src1, $src2)", + [(set IntRegs:$dst, (f32 (select (i1 (setolt IntRegs:$src2, + IntRegs:$src1)), + IntRegs:$src1, + IntRegs:$src2)))]>, + Requires<[HasV5T]>; + +let AddedComplexity = 100 in +def FMIN_dp : ALU64_rr<(outs DoubleRegs:$dst), + (ins DoubleRegs:$src1, DoubleRegs:$src2), + "$dst = dfmin($src1, $src2)", + [(set DoubleRegs:$dst, (f64 (select (i1 (setogt DoubleRegs:$src2, + DoubleRegs:$src1)), + DoubleRegs:$src1, + DoubleRegs:$src2)))]>, + Requires<[HasV5T]>; + +let AddedComplexity = 100 in +def FMIN_sp : ALU64_rr<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2), + "$dst = sfmin($src1, $src2)", + [(set IntRegs:$dst, (f32 (select (i1 (setogt IntRegs:$src2, + IntRegs:$src1)), + IntRegs:$src1, + IntRegs:$src2)))]>, + Requires<[HasV5T]>; + +// Pseudo instruction to encode a set of conditional transfers. +// This instruction is used instead of a mux and trades-off codesize +// for performance. We conduct this transformation optimistically in +// the hope that these instructions get promoted to dot-new transfers. +let AddedComplexity = 100, isPredicated = 1 in +def TFR_condset_rr_f : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, + IntRegs:$src2, + IntRegs:$src3), + "Error; should not emit", + [(set IntRegs:$dst, (f32 (select PredRegs:$src1, + IntRegs:$src2, + IntRegs:$src3)))]>, + Requires<[HasV5T]>; + +let AddedComplexity = 100, isPredicated = 1 in +def TFR_condset_rr64_f : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, + DoubleRegs:$src2, + DoubleRegs:$src3), + "Error; should not emit", + [(set DoubleRegs:$dst, (f64 (select PredRegs:$src1, + DoubleRegs:$src2, + DoubleRegs:$src3)))]>, + Requires<[HasV5T]>; + + + +let AddedComplexity = 100, isPredicated = 1 in +def TFR_condset_ri_f : ALU32_rr<(outs IntRegs:$dst), + (ins PredRegs:$src1, IntRegs:$src2, f32imm:$src3), + "Error; should not emit", + [(set IntRegs:$dst, + (f32 (select PredRegs:$src1, IntRegs:$src2, fpimm:$src3)))]>, + Requires<[HasV5T]>; + +let AddedComplexity = 100, isPredicated = 1 in +def TFR_condset_ir_f : ALU32_rr<(outs IntRegs:$dst), + (ins PredRegs:$src1, f32imm:$src2, IntRegs:$src3), + "Error; should not emit", + [(set IntRegs:$dst, + (f32 (select PredRegs:$src1, fpimm:$src2, IntRegs:$src3)))]>, + Requires<[HasV5T]>; + +let AddedComplexity = 100, isPredicated = 1 in +def TFR_condset_ii_f : ALU32_rr<(outs IntRegs:$dst), + (ins PredRegs:$src1, f32imm:$src2, f32imm:$src3), + "Error; should not emit", + [(set IntRegs:$dst, (f32 (select PredRegs:$src1, + fpimm:$src2, + fpimm:$src3)))]>, + Requires<[HasV5T]>; + + +def : Pat <(select (i1 (setult (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (f32 IntRegs:$src3), + (f32 IntRegs:$src4)), + (TFR_condset_rr_f (FCMPUGT32_rr IntRegs:$src2, IntRegs:$src1), IntRegs:$src4, + IntRegs:$src3)>, Requires<[HasV5T]>; + +def : Pat <(select (i1 (setult (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (f64 DoubleRegs:$src3), + (f64 DoubleRegs:$src4)), + (TFR_condset_rr64_f (FCMPUGT64_rr DoubleRegs:$src2, DoubleRegs:$src1), + DoubleRegs:$src4, DoubleRegs:$src3)>, Requires<[HasV5T]>; + +// Map from p0 = pnot(p0); r0 = mux(p0, #i, #j) => r0 = mux(p0, #j, #i). +def : Pat <(select (not PredRegs:$src1), fpimm:$src2, fpimm:$src3), + (TFR_condset_ii_f PredRegs:$src1, fpimm:$src3, fpimm:$src2)>; + +// Map from p0 = pnot(p0); r0 = select(p0, #i, r1) +// => r0 = TFR_condset_ri(p0, r1, #i) +def : Pat <(select (not PredRegs:$src1), fpimm:$src2, IntRegs:$src3), + (TFR_condset_ri_f PredRegs:$src1, IntRegs:$src3, fpimm:$src2)>; + +// Map from p0 = pnot(p0); r0 = mux(p0, r1, #i) +// => r0 = TFR_condset_ir(p0, #i, r1) +def : Pat <(select (not PredRegs:$src1), IntRegs:$src2, fpimm:$src3), + (TFR_condset_ir_f PredRegs:$src1, fpimm:$src3, IntRegs:$src2)>; + +def : Pat <(i32 (fp_to_sint (f64 DoubleRegs:$src1))), + (i32 (EXTRACT_SUBREG (i64 (CONVERT_df2d (f64 DoubleRegs:$src1))), subreg_loreg))>, + Requires<[HasV5T]>; + +def : Pat <(fabs (f32 IntRegs:$src1)), + (CLRBIT_31 (f32 IntRegs:$src1), 31)>, + Requires<[HasV5T]>; + +def : Pat <(fneg (f32 IntRegs:$src1)), + (TOGBIT_31 (f32 IntRegs:$src1), 31)>, + Requires<[HasV5T]>; + +/* +def : Pat <(fabs (f64 DoubleRegs:$src1)), + (CLRBIT_31 (f32 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)), 31)>, + Requires<[HasV5T]>; + +def : Pat <(fabs (f64 DoubleRegs:$src1)), + (CLRBIT_31 (f32 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)), 31)>, + Requires<[HasV5T]>; + */ diff --git a/lib/Target/Hexagon/HexagonIntrinsics.td b/lib/Target/Hexagon/HexagonIntrinsics.td index b15e293fdf..1ffdc41cd1 100644 --- a/lib/Target/Hexagon/HexagonIntrinsics.td +++ b/lib/Target/Hexagon/HexagonIntrinsics.td @@ -551,13 +551,6 @@ class di_SInst_diu6u6<string opc, Intrinsic IntID> [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2, imm:$src3))]>; -class di_SInst_didisi<string opc, Intrinsic IntID> - : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, - IntRegs:$src3), - !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, $src3)")), - [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2, - IntRegs:$src3))]>; - class di_SInst_didiqi<string opc, Intrinsic IntID> : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), @@ -952,6 +945,17 @@ class si_SInst_sisi_sat<string opc, Intrinsic IntID> !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")), [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>; +class si_SInst_didi_sat<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>; + +class si_SInst_disi_s1_rnd_sat<string opc, Intrinsic IntID> + : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2), + !strconcat("$dst = ", !strconcat(opc , + "($src1, $src2):<<1:rnd:sat")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2))]>; + class si_MInst_sisi_s1_rnd_sat<string opc, Intrinsic IntID> : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), !strconcat("$dst = ", !strconcat(opc , @@ -1612,6 +1616,18 @@ class di_MInst_dididi_acc_rnd_sat<string opc, Intrinsic IntID> DoubleRegs:$src2))], "$dst2 = $dst">; +class di_MInst_dididi_acc_s1<string opc, Intrinsic IntID> + : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, + DoubleRegs:$src1, + DoubleRegs:$src2), + !strconcat("$dst += ", + !strconcat(opc , "($src1, $src2):<<1")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, + DoubleRegs:$src1, + DoubleRegs:$src2))], + "$dst2 = $dst">; + + class di_MInst_dididi_acc_s1_sat<string opc, Intrinsic IntID> : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1, @@ -1822,53 +1838,63 @@ class si_MInst_didi<string opc, Intrinsic IntID> !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")), [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>; +// +// LDInst classes. +// +let mayLoad = 1, neverHasSideEffects = 1 in +class di_LDInstPI_diu4<string opc, Intrinsic IntID> + : LDInstPI<(outs IntRegs:$dst, DoubleRegs:$dst2), + (ins IntRegs:$src1, IntRegs:$src2, CRRegs:$src3, s4Imm:$offset), + "$dst2 = memd($src1++#$offset:circ($src3))", + [], + "$src1 = $dst">; /******************************************************************** * ALU32/ALU * *********************************************************************/ // ALU32 / ALU / Add. -def Hexagon_A2_add: +def HEXAGON_A2_add: si_ALU32_sisi <"add", int_hexagon_A2_add>; -def Hexagon_A2_addi: +def HEXAGON_A2_addi: si_ALU32_sis16 <"add", int_hexagon_A2_addi>; // ALU32 / ALU / Logical operations. -def Hexagon_A2_and: +def HEXAGON_A2_and: si_ALU32_sisi <"and", int_hexagon_A2_and>; -def Hexagon_A2_andir: +def HEXAGON_A2_andir: si_ALU32_sis10 <"and", int_hexagon_A2_andir>; -def Hexagon_A2_not: +def HEXAGON_A2_not: si_ALU32_si <"not", int_hexagon_A2_not>; -def Hexagon_A2_or: +def HEXAGON_A2_or: si_ALU32_sisi <"or", int_hexagon_A2_or>; -def Hexagon_A2_orir: +def HEXAGON_A2_orir: si_ALU32_sis10 <"or", int_hexagon_A2_orir>; -def Hexagon_A2_xor: +def HEXAGON_A2_xor: si_ALU32_sisi <"xor", int_hexagon_A2_xor>; // ALU32 / ALU / Negate. -def Hexagon_A2_neg: +def HEXAGON_A2_neg: si_ALU32_si <"neg", int_hexagon_A2_neg>; // ALU32 / ALU / Subtract. -def Hexagon_A2_sub: +def HEXAGON_A2_sub: si_ALU32_sisi <"sub", int_hexagon_A2_sub>; -def Hexagon_A2_subri: +def HEXAGON_A2_subri: si_ALU32_s10si <"sub", int_hexagon_A2_subri>; // ALU32 / ALU / Transfer Immediate. -def Hexagon_A2_tfril: +def HEXAGON_A2_tfril: si_lo_ALU32_siu16 <"", int_hexagon_A2_tfril>; -def Hexagon_A2_tfrih: +def HEXAGON_A2_tfrih: si_hi_ALU32_siu16 <"", int_hexagon_A2_tfrih>; -def Hexagon_A2_tfrsi: +def HEXAGON_A2_tfrsi: si_ALU32_s16 <"", int_hexagon_A2_tfrsi>; -def Hexagon_A2_tfrpi: +def HEXAGON_A2_tfrpi: di_ALU32_s8 <"", int_hexagon_A2_tfrpi>; // ALU32 / ALU / Transfer Register. -def Hexagon_A2_tfr: +def HEXAGON_A2_tfr: si_ALU32_si_tfr <"", int_hexagon_A2_tfr>; /******************************************************************** @@ -1876,45 +1902,45 @@ def Hexagon_A2_tfr: *********************************************************************/ // ALU32 / PERM / Combine. -def Hexagon_A2_combinew: +def HEXAGON_A2_combinew: di_ALU32_sisi <"combine", int_hexagon_A2_combinew>; -def Hexagon_A2_combine_hh: +def HEXAGON_A2_combine_hh: si_MInst_sisi_hh <"combine", int_hexagon_A2_combine_hh>; -def Hexagon_A2_combine_lh: +def HEXAGON_A2_combine_lh: si_MInst_sisi_lh <"combine", int_hexagon_A2_combine_lh>; -def Hexagon_A2_combine_hl: +def HEXAGON_A2_combine_hl: si_MInst_sisi_hl <"combine", int_hexagon_A2_combine_hl>; -def Hexagon_A2_combine_ll: +def HEXAGON_A2_combine_ll: si_MInst_sisi_ll <"combine", int_hexagon_A2_combine_ll>; -def Hexagon_A2_combineii: +def HEXAGON_A2_combineii: di_MInst_s8s8 <"combine", int_hexagon_A2_combineii>; // ALU32 / PERM / Mux. -def Hexagon_C2_mux: +def HEXAGON_C2_mux: si_ALU32_qisisi <"mux", int_hexagon_C2_mux>; -def Hexagon_C2_muxri: +def HEXAGON_C2_muxri: si_ALU32_qis8si <"mux", int_hexagon_C2_muxri>; -def Hexagon_C2_muxir: +def HEXAGON_C2_muxir: si_ALU32_qisis8 <"mux", int_hexagon_C2_muxir>; -def Hexagon_C2_muxii: +def HEXAGON_C2_muxii: si_ALU32_qis8s8 <"mux", int_hexagon_C2_muxii>; // ALU32 / PERM / Shift halfword. -def Hexagon_A2_aslh: +def HEXAGON_A2_aslh: si_ALU32_si <"aslh", int_hexagon_A2_aslh>; -def Hexagon_A2_asrh: +def HEXAGON_A2_asrh: si_ALU32_si <"asrh", int_hexagon_A2_asrh>; def SI_to_SXTHI_asrh: si_ALU32_si <"asrh", int_hexagon_SI_to_SXTHI_asrh>; // ALU32 / PERM / Sign/zero extend. -def Hexagon_A2_sxth: +def HEXAGON_A2_sxth: si_ALU32_si <"sxth", int_hexagon_A2_sxth>; -def Hexagon_A2_sxtb: +def HEXAGON_A2_sxtb: si_ALU32_si <"sxtb", int_hexagon_A2_sxtb>; -def Hexagon_A2_zxth: +def HEXAGON_A2_zxth: si_ALU32_si <"zxth", int_hexagon_A2_zxth>; -def Hexagon_A2_zxtb: +def HEXAGON_A2_zxtb: si_ALU32_si <"zxtb", int_hexagon_A2_zxtb>; /******************************************************************** @@ -1922,25 +1948,25 @@ def Hexagon_A2_zxtb: *********************************************************************/ // ALU32 / PRED / Compare. -def Hexagon_C2_cmpeq: +def HEXAGON_C2_cmpeq: qi_ALU32_sisi <"cmp.eq", int_hexagon_C2_cmpeq>; -def Hexagon_C2_cmpeqi: +def HEXAGON_C2_cmpeqi: qi_ALU32_sis10 <"cmp.eq", int_hexagon_C2_cmpeqi>; -def Hexagon_C2_cmpgei: +def HEXAGON_C2_cmpgei: qi_ALU32_sis8 <"cmp.ge", int_hexagon_C2_cmpgei>; -def Hexagon_C2_cmpgeui: +def HEXAGON_C2_cmpgeui: qi_ALU32_siu8 <"cmp.geu", int_hexagon_C2_cmpgeui>; -def Hexagon_C2_cmpgt: +def HEXAGON_C2_cmpgt: qi_ALU32_sisi <"cmp.gt", int_hexagon_C2_cmpgt>; -def Hexagon_C2_cmpgti: +def HEXAGON_C2_cmpgti: qi_ALU32_sis10 <"cmp.gt", int_hexagon_C2_cmpgti>; -def Hexagon_C2_cmpgtu: +def HEXAGON_C2_cmpgtu: qi_ALU32_sisi <"cmp.gtu", int_hexagon_C2_cmpgtu>; -def Hexagon_C2_cmpgtui: +def HEXAGON_C2_cmpgtui: qi_ALU32_siu9 <"cmp.gtu", int_hexagon_C2_cmpgtui>; -def Hexagon_C2_cmplt: +def HEXAGON_C2_cmplt: qi_ALU32_sisi <"cmp.lt", int_hexagon_C2_cmplt>; -def Hexagon_C2_cmpltu: +def HEXAGON_C2_cmpltu: qi_ALU32_sisi <"cmp.ltu", int_hexagon_C2_cmpltu>; /******************************************************************** @@ -1949,27 +1975,27 @@ def Hexagon_C2_cmpltu: // ALU32 / VH / Vector add halfwords. // Rd32=vadd[u]h(Rs32,Rt32:sat] -def Hexagon_A2_svaddh: +def HEXAGON_A2_svaddh: si_ALU32_sisi <"vaddh", int_hexagon_A2_svaddh>; -def Hexagon_A2_svaddhs: +def HEXAGON_A2_svaddhs: si_ALU32_sisi_sat <"vaddh", int_hexagon_A2_svaddhs>; -def Hexagon_A2_svadduhs: +def HEXAGON_A2_svadduhs: si_ALU32_sisi_sat <"vadduh", int_hexagon_A2_svadduhs>; // ALU32 / VH / Vector average halfwords. -def Hexagon_A2_svavgh: +def HEXAGON_A2_svavgh: si_ALU32_sisi <"vavgh", int_hexagon_A2_svavgh>; -def Hexagon_A2_svavghs: +def HEXAGON_A2_svavghs: si_ALU32_sisi_rnd <"vavgh", int_hexagon_A2_svavghs>; -def Hexagon_A2_svnavgh: +def HEXAGON_A2_svnavgh: si_ALU32_sisi <"vnavgh", int_hexagon_A2_svnavgh>; // ALU32 / VH / Vector subtract halfwords. -def Hexagon_A2_svsubh: +def HEXAGON_A2_svsubh: si_ALU32_sisi <"vsubh", int_hexagon_A2_svsubh>; -def Hexagon_A2_svsubhs: +def HEXAGON_A2_svsubhs: si_ALU32_sisi_sat <"vsubh", int_hexagon_A2_svsubhs>; -def Hexagon_A2_svsubuhs: +def HEXAGON_A2_svsubuhs: si_ALU32_sisi_sat <"vsubuh", int_hexagon_A2_svsubuhs>; /******************************************************************** @@ -1977,109 +2003,109 @@ def Hexagon_A2_svsubuhs: *********************************************************************/ // ALU64 / ALU / Add. -def Hexagon_A2_addp: +def HEXAGON_A2_addp: di_ALU64_didi <"add", int_hexagon_A2_addp>; -def Hexagon_A2_addsat: +def HEXAGON_A2_addsat: si_ALU64_sisi_sat <"add", int_hexagon_A2_addsat>; // ALU64 / ALU / Add halfword. // Even though the definition says hl, it should be lh - //so DON'T change the class " si_ALU64_sisi_l16_lh " it inherits. -def Hexagon_A2_addh_l16_hl: +def HEXAGON_A2_addh_l16_hl: si_ALU64_sisi_l16_lh <"add", int_hexagon_A2_addh_l16_hl>; -def Hexagon_A2_addh_l16_ll: +def HEXAGON_A2_addh_l16_ll: si_ALU64_sisi_l16_ll <"add", int_hexagon_A2_addh_l16_ll>; -def Hexagon_A2_addh_l16_sat_hl: +def HEXAGON_A2_addh_l16_sat_hl: si_ALU64_sisi_l16_sat_lh <"add", int_hexagon_A2_addh_l16_sat_hl>; -def Hexagon_A2_addh_l16_sat_ll: +def HEXAGON_A2_addh_l16_sat_ll: si_ALU64_sisi_l16_sat_ll <"add", int_hexagon_A2_addh_l16_sat_ll>; -def Hexagon_A2_addh_h16_hh: +def HEXAGON_A2_addh_h16_hh: si_ALU64_sisi_h16_hh <"add", int_hexagon_A2_addh_h16_hh>; -def Hexagon_A2_addh_h16_hl: +def HEXAGON_A2_addh_h16_hl: si_ALU64_sisi_h16_hl <"add", int_hexagon_A2_addh_h16_hl>; -def Hexagon_A2_addh_h16_lh: +def HEXAGON_A2_addh_h16_lh: si_ALU64_sisi_h16_lh <"add", int_hexagon_A2_addh_h16_lh>; -def Hexagon_A2_addh_h16_ll: +def HEXAGON_A2_addh_h16_ll: si_ALU64_sisi_h16_ll <"add", int_hexagon_A2_addh_h16_ll>; -def Hexagon_A2_addh_h16_sat_hh: +def HEXAGON_A2_addh_h16_sat_hh: si_ALU64_sisi_h16_sat_hh <"add", int_hexagon_A2_addh_h16_sat_hh>; -def Hexagon_A2_addh_h16_sat_hl: +def HEXAGON_A2_addh_h16_sat_hl: si_ALU64_sisi_h16_sat_hl <"add", int_hexagon_A2_addh_h16_sat_hl>; -def Hexagon_A2_addh_h16_sat_lh: +def HEXAGON_A2_addh_h16_sat_lh: si_ALU64_sisi_h16_sat_lh <"add", int_hexagon_A2_addh_h16_sat_lh>; -def Hexagon_A2_addh_h16_sat_ll: +def HEXAGON_A2_addh_h16_sat_ll: si_ALU64_sisi_h16_sat_ll <"add", int_hexagon_A2_addh_h16_sat_ll>; // ALU64 / ALU / Compare. -def Hexagon_C2_cmpeqp: +def HEXAGON_C2_cmpeqp: qi_ALU64_didi <"cmp.eq", int_hexagon_C2_cmpeqp>; -def Hexagon_C2_cmpgtp: +def HEXAGON_C2_cmpgtp: qi_ALU64_didi <"cmp.gt", int_hexagon_C2_cmpgtp>; -def Hexagon_C2_cmpgtup: +def HEXAGON_C2_cmpgtup: qi_ALU64_didi <"cmp.gtu", int_hexagon_C2_cmpgtup>; // ALU64 / ALU / Logical operations. -def Hexagon_A2_andp: +def HEXAGON_A2_andp: di_ALU64_didi <"and", int_hexagon_A2_andp>; -def Hexagon_A2_orp: +def HEXAGON_A2_orp: di_ALU64_didi <"or", int_hexagon_A2_orp>; -def Hexagon_A2_xorp: +def HEXAGON_A2_xorp: di_ALU64_didi <"xor", int_hexagon_A2_xorp>; // ALU64 / ALU / Maximum. -def Hexagon_A2_max: +def HEXAGON_A2_max: si_ALU64_sisi <"max", int_hexagon_A2_max>; -def Hexagon_A2_maxu: +def HEXAGON_A2_maxu: si_ALU64_sisi <"maxu", int_hexagon_A2_maxu>; // ALU64 / ALU / Minimum. -def Hexagon_A2_min: +def HEXAGON_A2_min: si_ALU64_sisi <"min", int_hexagon_A2_min>; -def Hexagon_A2_minu: +def HEXAGON_A2_minu: si_ALU64_sisi <"minu", int_hexagon_A2_minu>; // ALU64 / ALU / Subtract. -def Hexagon_A2_subp: +def HEXAGON_A2_subp: di_ALU64_didi <"sub", int_hexagon_A2_subp>; -def Hexagon_A2_subsat: +def HEXAGON_A2_subsat: si_ALU64_sisi_sat <"sub", int_hexagon_A2_subsat>; // ALU64 / ALU / Subtract halfword. // Even though the definition says hl, it should be lh - //so DON'T change the class " si_ALU64_sisi_l16_lh " it inherits. -def Hexagon_A2_subh_l16_hl: +def HEXAGON_A2_subh_l16_hl: si_ALU64_sisi_l16_lh <"sub", int_hexagon_A2_subh_l16_hl>; -def Hexagon_A2_subh_l16_ll: +def HEXAGON_A2_subh_l16_ll: si_ALU64_sisi_l16_ll <"sub", int_hexagon_A2_subh_l16_ll>; -def Hexagon_A2_subh_l16_sat_hl: +def HEXAGON_A2_subh_l16_sat_hl: si_ALU64_sisi_l16_sat_lh <"sub", int_hexagon_A2_subh_l16_sat_hl>; -def Hexagon_A2_subh_l16_sat_ll: +def HEXAGON_A2_subh_l16_sat_ll: si_ALU64_sisi_l16_sat_ll <"sub", int_hexagon_A2_subh_l16_sat_ll>; -def Hexagon_A2_subh_h16_hh: +def HEXAGON_A2_subh_h16_hh: si_ALU64_sisi_h16_hh <"sub", int_hexagon_A2_subh_h16_hh>; -def Hexagon_A2_subh_h16_hl: +def HEXAGON_A2_subh_h16_hl: si_ALU64_sisi_h16_hl <"sub", int_hexagon_A2_subh_h16_hl>; -def Hexagon_A2_subh_h16_lh: +def HEXAGON_A2_subh_h16_lh: si_ALU64_sisi_h16_lh <"sub", int_hexagon_A2_subh_h16_lh>; -def Hexagon_A2_subh_h16_ll: +def HEXAGON_A2_subh_h16_ll: si_ALU64_sisi_h16_ll <"sub", int_hexagon_A2_subh_h16_ll>; -def Hexagon_A2_subh_h16_sat_hh: +def HEXAGON_A2_subh_h16_sat_hh: si_ALU64_sisi_h16_sat_hh <"sub", int_hexagon_A2_subh_h16_sat_hh>; -def Hexagon_A2_subh_h16_sat_hl: +def HEXAGON_A2_subh_h16_sat_hl: si_ALU64_sisi_h16_sat_hl <"sub", int_hexagon_A2_subh_h16_sat_hl>; -def Hexagon_A2_subh_h16_sat_lh: +def HEXAGON_A2_subh_h16_sat_lh: si_ALU64_sisi_h16_sat_lh <"sub", int_hexagon_A2_subh_h16_sat_lh>; -def Hexagon_A2_subh_h16_sat_ll: +def HEXAGON_A2_subh_h16_sat_ll: si_ALU64_sisi_h16_sat_ll <"sub", int_hexagon_A2_subh_h16_sat_ll>; // ALU64 / ALU / Transfer register. -def Hexagon_A2_tfrp: +def HEXAGON_A2_tfrp: di_ALU64_di <"", int_hexagon_A2_tfrp>; /******************************************************************** @@ -2087,7 +2113,7 @@ def Hexagon_A2_tfrp: *********************************************************************/ // ALU64 / BIT / Masked parity. -def Hexagon_S2_parityp: +def HEXAGON_S2_parityp: si_ALU64_didi <"parity", int_hexagon_S2_parityp>; /******************************************************************** @@ -2095,7 +2121,7 @@ def Hexagon_S2_parityp: *********************************************************************/ // ALU64 / PERM / Vector pack high and low halfwords. -def Hexagon_S2_packhl: +def HEXAGON_S2_packhl: di_ALU64_sisi <"packhl", int_hexagon_S2_packhl>; /******************************************************************** @@ -2103,37 +2129,37 @@ def Hexagon_S2_packhl: *********************************************************************/ // ALU64 / VB / Vector add unsigned bytes. -def Hexagon_A2_vaddub: +def HEXAGON_A2_vaddub: di_ALU64_didi <"vaddub", int_hexagon_A2_vaddub>; -def Hexagon_A2_vaddubs: +def HEXAGON_A2_vaddubs: di_ALU64_didi_sat <"vaddub", int_hexagon_A2_vaddubs>; // ALU64 / VB / Vector average unsigned bytes. -def Hexagon_A2_vavgub: +def HEXAGON_A2_vavgub: di_ALU64_didi <"vavgub", int_hexagon_A2_vavgub>; -def Hexagon_A2_vavgubr: +def HEXAGON_A2_vavgubr: di_ALU64_didi_rnd <"vavgub", int_hexagon_A2_vavgubr>; // ALU64 / VB / Vector compare unsigned bytes. -def Hexagon_A2_vcmpbeq: +def HEXAGON_A2_vcmpbeq: qi_ALU64_didi <"vcmpb.eq", int_hexagon_A2_vcmpbeq>; -def Hexagon_A2_vcmpbgtu: +def HEXAGON_A2_vcmpbgtu: qi_ALU64_didi <"vcmpb.gtu",int_hexagon_A2_vcmpbgtu>; // ALU64 / VB / Vector maximum/minimum unsigned bytes. -def Hexagon_A2_vmaxub: +def HEXAGON_A2_vmaxub: di_ALU64_didi <"vmaxub", int_hexagon_A2_vmaxub>; -def Hexagon_A2_vminub: +def HEXAGON_A2_vminub: di_ALU64_didi <"vminub", int_hexagon_A2_vminub>; // ALU64 / VB / Vector subtract unsigned bytes. -def Hexagon_A2_vsubub: +def HEXAGON_A2_vsubub: di_ALU64_didi <"vsubub", int_hexagon_A2_vsubub>; -def Hexagon_A2_vsububs: +def HEXAGON_A2_vsububs: di_ALU64_didi_sat <"vsubub", int_hexagon_A2_vsububs>; // ALU64 / VB / Vector mux. -def Hexagon_C2_vmux: +def HEXAGON_C2_vmux: di_ALU64_qididi <"vmux", int_hexagon_C2_vmux>; @@ -2143,58 +2169,58 @@ def Hexagon_C2_vmux: // ALU64 / VH / Vector add halfwords. // Rdd64=vadd[u]h(Rss64,Rtt64:sat] -def Hexagon_A2_vaddh: +def HEXAGON_A2_vaddh: di_ALU64_didi <"vaddh", int_hexagon_A2_vaddh>; -def Hexagon_A2_vaddhs: +def HEXAGON_A2_vaddhs: di_ALU64_didi_sat <"vaddh", int_hexagon_A2_vaddhs>; -def Hexagon_A2_vadduhs: +def HEXAGON_A2_vadduhs: di_ALU64_didi_sat <"vadduh", int_hexagon_A2_vadduhs>; // ALU64 / VH / Vector average halfwords. // Rdd64=v[n]avg[u]h(Rss64,Rtt64:rnd/:crnd][:sat] -def Hexagon_A2_vavgh: +def HEXAGON_A2_vavgh: di_ALU64_didi <"vavgh", int_hexagon_A2_vavgh>; -def Hexagon_A2_vavghcr: +def HEXAGON_A2_vavghcr: di_ALU64_didi_crnd <"vavgh", int_hexagon_A2_vavghcr>; -def Hexagon_A2_vavghr: +def HEXAGON_A2_vavghr: di_ALU64_didi_rnd <"vavgh", int_hexagon_A2_vavghr>; -def Hexagon_A2_vavguh: +def HEXAGON_A2_vavguh: di_ALU64_didi <"vavguh", int_hexagon_A2_vavguh>; -def Hexagon_A2_vavguhr: +def HEXAGON_A2_vavguhr: di_ALU64_didi_rnd <"vavguh", int_hexagon_A2_vavguhr>; -def Hexagon_A2_vnavgh: +def HEXAGON_A2_vnavgh: di_ALU64_didi <"vnavgh", int_hexagon_A2_vnavgh>; -def Hexagon_A2_vnavghcr: +def HEXAGON_A2_vnavghcr: di_ALU64_didi_crnd_sat <"vnavgh", int_hexagon_A2_vnavghcr>; -def Hexagon_A2_vnavghr: +def HEXAGON_A2_vnavghr: di_ALU64_didi_rnd_sat <"vnavgh", int_hexagon_A2_vnavghr>; // ALU64 / VH / Vector compare halfwords. -def Hexagon_A2_vcmpheq: +def HEXAGON_A2_vcmpheq: qi_ALU64_didi <"vcmph.eq", int_hexagon_A2_vcmpheq>; -def Hexagon_A2_vcmphgt: +def HEXAGON_A2_vcmphgt: qi_ALU64_didi <"vcmph.gt", int_hexagon_A2_vcmphgt>; -def Hexagon_A2_vcmphgtu: +def HEXAGON_A2_vcmphgtu: qi_ALU64_didi <"vcmph.gtu",int_hexagon_A2_vcmphgtu>; // ALU64 / VH / Vector maximum halfwords. -def Hexagon_A2_vmaxh: +def HEXAGON_A2_vmaxh: di_ALU64_didi <"vmaxh", int_hexagon_A2_vmaxh>; -def Hexagon_A2_vmaxuh: +def HEXAGON_A2_vmaxuh: di_ALU64_didi <"vmaxuh", int_hexagon_A2_vmaxuh>; // ALU64 / VH / Vector minimum halfwords. -def Hexagon_A2_vminh: +def HEXAGON_A2_vminh: di_ALU64_didi <"vminh", int_hexagon_A2_vminh>; -def Hexagon_A2_vminuh: +def HEXAGON_A2_vminuh: di_ALU64_didi <"vminuh", int_hexagon_A2_vminuh>; // ALU64 / VH / Vector subtract halfwords. -def Hexagon_A2_vsubh: +def HEXAGON_A2_vsubh: di_ALU64_didi <"vsubh", int_hexagon_A2_vsubh>; -def Hexagon_A2_vsubhs: +def HEXAGON_A2_vsubhs: di_ALU64_didi_sat <"vsubh", int_hexagon_A2_vsubhs>; -def Hexagon_A2_vsubuhs: +def HEXAGON_A2_vsubuhs: di_ALU64_didi_sat <"vsubuh", int_hexagon_A2_vsubuhs>; @@ -2204,53 +2230,53 @@ def Hexagon_A2_vsubuhs: // ALU64 / VW / Vector add words. // Rdd32=vaddw(Rss32,Rtt32)[:sat] -def Hexagon_A2_vaddw: +def HEXAGON_A2_vaddw: di_ALU64_didi <"vaddw", int_hexagon_A2_vaddw>; -def Hexagon_A2_vaddws: +def HEXAGON_A2_vaddws: di_ALU64_didi_sat <"vaddw", int_hexagon_A2_vaddws>; // ALU64 / VW / Vector average words. -def Hexagon_A2_vavguw: +def HEXAGON_A2_vavguw: di_ALU64_didi <"vavguw", int_hexagon_A2_vavguw>; -def Hexagon_A2_vavguwr: +def HEXAGON_A2_vavguwr: di_ALU64_didi_rnd <"vavguw", int_hexagon_A2_vavguwr>; -def Hexagon_A2_vavgw: +def HEXAGON_A2_vavgw: di_ALU64_didi <"vavgw", int_hexagon_A2_vavgw>; -def Hexagon_A2_vavgwcr: +def HEXAGON_A2_vavgwcr: di_ALU64_didi_crnd <"vavgw", int_hexagon_A2_vavgwcr>; -def Hexagon_A2_vavgwr: +def HEXAGON_A2_vavgwr: di_ALU64_didi_rnd <"vavgw", int_hexagon_A2_vavgwr>; -def Hexagon_A2_vnavgw: +def HEXAGON_A2_vnavgw: di_ALU64_didi <"vnavgw", int_hexagon_A2_vnavgw>; -def Hexagon_A2_vnavgwcr: +def HEXAGON_A2_vnavgwcr: di_ALU64_didi_crnd_sat <"vnavgw", int_hexagon_A2_vnavgwcr>; -def Hexagon_A2_vnavgwr: +def HEXAGON_A2_vnavgwr: di_ALU64_didi_rnd_sat <"vnavgw", int_hexagon_A2_vnavgwr>; // ALU64 / VW / Vector compare words. -def Hexagon_A2_vcmpweq: +def HEXAGON_A2_vcmpweq: qi_ALU64_didi <"vcmpw.eq", int_hexagon_A2_vcmpweq>; -def Hexagon_A2_vcmpwgt: +def HEXAGON_A2_vcmpwgt: qi_ALU64_didi <"vcmpw.gt", int_hexagon_A2_vcmpwgt>; -def Hexagon_A2_vcmpwgtu: +def HEXAGON_A2_vcmpwgtu: qi_ALU64_didi <"vcmpw.gtu",int_hexagon_A2_vcmpwgtu>; // ALU64 / VW / Vector maximum words. -def Hexagon_A2_vmaxw: +def HEXAGON_A2_vmaxw: di_ALU64_didi <"vmaxw", int_hexagon_A2_vmaxw>; -def Hexagon_A2_vmaxuw: +def HEXAGON_A2_vmaxuw: di_ALU64_didi <"vmaxuw", int_hexagon_A2_vmaxuw>; // ALU64 / VW / Vector minimum words. -def Hexagon_A2_vminw: +def HEXAGON_A2_vminw: di_ALU64_didi <"vminw", int_hexagon_A2_vminw>; -def Hexagon_A2_vminuw: +def HEXAGON_A2_vminuw: di_ALU64_didi <"vminuw", int_hexagon_A2_vminuw>; // ALU64 / VW / Vector subtract words. -def Hexagon_A2_vsubw: +def HEXAGON_A2_vsubw: di_ALU64_didi <"vsubw", int_hexagon_A2_vsubw>; -def Hexagon_A2_vsubws: +def HEXAGON_A2_vsubws: di_ALU64_didi_sat <"vsubw", int_hexagon_A2_vsubws>; @@ -2259,25 +2285,25 @@ def Hexagon_A2_vsubws: *********************************************************************/ // CR / Logical reductions on predicates. -def Hexagon_C2_all8: +def HEXAGON_C2_all8: qi_SInst_qi <"all8", int_hexagon_C2_all8>; -def Hexagon_C2_any8: +def HEXAGON_C2_any8: qi_SInst_qi <"any8", int_hexagon_C2_any8>; // CR / Logical operations on predicates. -def Hexagon_C2_pxfer_map: +def HEXAGON_C2_pxfer_map: qi_SInst_qi_pxfer <"", int_hexagon_C2_pxfer_map>; -def Hexagon_C2_and: +def HEXAGON_C2_and: qi_SInst_qiqi <"and", int_hexagon_C2_and>; -def Hexagon_C2_andn: +def HEXAGON_C2_andn: qi_SInst_qiqi_neg <"and", int_hexagon_C2_andn>; -def Hexagon_C2_not: +def HEXAGON_C2_not: qi_SInst_qi <"not", int_hexagon_C2_not>; -def Hexagon_C2_or: +def HEXAGON_C2_or: qi_SInst_qiqi <"or", int_hexagon_C2_or>; -def Hexagon_C2_orn: +def HEXAGON_C2_orn: qi_SInst_qiqi_neg <"or", int_hexagon_C2_orn>; -def Hexagon_C2_xor: +def HEXAGON_C2_xor: qi_SInst_qiqi <"xor", int_hexagon_C2_xor>; @@ -2286,27 +2312,27 @@ def Hexagon_C2_xor: *********************************************************************/ // MTYPE / ALU / Add and accumulate. -def Hexagon_M2_acci: +def HEXAGON_M2_acci: si_MInst_sisisi_acc <"add", int_hexagon_M2_acci>; -def Hexagon_M2_accii: +def HEXAGON_M2_accii: si_MInst_sisis8_acc <"add", int_hexagon_M2_accii>; -def Hexagon_M2_nacci: +def HEXAGON_M2_nacci: si_MInst_sisisi_nac <"add", int_hexagon_M2_nacci>; -def Hexagon_M2_naccii: +def HEXAGON_M2_naccii: si_MInst_sisis8_nac <"add", int_hexagon_M2_naccii>; // MTYPE / ALU / Subtract and accumulate. -def Hexagon_M2_subacc: +def HEXAGON_M2_subacc: si_MInst_sisisi_acc <"sub", int_hexagon_M2_subacc>; // MTYPE / ALU / Vector absolute difference. -def Hexagon_M2_vabsdiffh: +def HEXAGON_M2_vabsdiffh: di_MInst_didi <"vabsdiffh",int_hexagon_M2_vabsdiffh>; -def Hexagon_M2_vabsdiffw: +def HEXAGON_M2_vabsdiffw: di_MInst_didi <"vabsdiffw",int_hexagon_M2_vabsdiffw>; // MTYPE / ALU / XOR and xor with destination. -def Hexagon_M2_xor_xacc: +def HEXAGON_M2_xor_xacc: si_MInst_sisisi_xacc <"xor", int_hexagon_M2_xor_xacc>; @@ -2316,91 +2342,91 @@ def Hexagon_M2_xor_xacc: // MTYPE / COMPLEX / Complex multiply. // Rdd[-+]=cmpy(Rs, Rt:<<1]:sat -def Hexagon_M2_cmpys_s1: +def HEXAGON_M2_cmpys_s1: di_MInst_sisi_s1_sat <"cmpy", int_hexagon_M2_cmpys_s1>; -def Hexagon_M2_cmpys_s0: +def HEXAGON_M2_cmpys_s0: di_MInst_sisi_sat <"cmpy", int_hexagon_M2_cmpys_s0>; -def Hexagon_M2_cmpysc_s1: +def HEXAGON_M2_cmpysc_s1: di_MInst_sisi_s1_sat_conj <"cmpy", int_hexagon_M2_cmpysc_s1>; -def Hexagon_M2_cmpysc_s0: +def HEXAGON_M2_cmpysc_s0: di_MInst_sisi_sat_conj <"cmpy", int_hexagon_M2_cmpysc_s0>; -def Hexagon_M2_cmacs_s1: +def HEXAGON_M2_cmacs_s1: di_MInst_disisi_acc_s1_sat <"cmpy", int_hexagon_M2_cmacs_s1>; -def Hexagon_M2_cmacs_s0: +def HEXAGON_M2_cmacs_s0: di_MInst_disisi_acc_sat <"cmpy", int_hexagon_M2_cmacs_s0>; -def Hexagon_M2_cmacsc_s1: +def HEXAGON_M2_cmacsc_s1: di_MInst_disisi_acc_s1_sat_conj <"cmpy", int_hexagon_M2_cmacsc_s1>; -def Hexagon_M2_cmacsc_s0: +def HEXAGON_M2_cmacsc_s0: di_MInst_disisi_acc_sat_conj <"cmpy", int_hexagon_M2_cmacsc_s0>; -def Hexagon_M2_cnacs_s1: +def HEXAGON_M2_cnacs_s1: di_MInst_disisi_nac_s1_sat <"cmpy", int_hexagon_M2_cnacs_s1>; -def Hexagon_M2_cnacs_s0: +def HEXAGON_M2_cnacs_s0: di_MInst_disisi_nac_sat <"cmpy", int_hexagon_M2_cnacs_s0>; -def Hexagon_M2_cnacsc_s1: +def HEXAGON_M2_cnacsc_s1: di_MInst_disisi_nac_s1_sat_conj <"cmpy", int_hexagon_M2_cnacsc_s1>; -def Hexagon_M2_cnacsc_s0: +def HEXAGON_M2_cnacsc_s0: di_MInst_disisi_nac_sat_conj <"cmpy", int_hexagon_M2_cnacsc_s0>; // MTYPE / COMPLEX / Complex multiply real or imaginary. -def Hexagon_M2_cmpyr_s0: +def HEXAGON_M2_cmpyr_s0: di_MInst_sisi <"cmpyr", int_hexagon_M2_cmpyr_s0>; -def Hexagon_M2_cmacr_s0: +def HEXAGON_M2_cmacr_s0: di_MInst_disisi_acc <"cmpyr", int_hexagon_M2_cmacr_s0>; -def Hexagon_M2_cmpyi_s0: +def HEXAGON_M2_cmpyi_s0: di_MInst_sisi <"cmpyi", int_hexagon_M2_cmpyi_s0>; -def Hexagon_M2_cmaci_s0: +def HEXAGON_M2_cmaci_s0: di_MInst_disisi_acc <"cmpyi", int_hexagon_M2_cmaci_s0>; // MTYPE / COMPLEX / Complex multiply with round and pack. // Rxx32+=cmpy(Rs32,[*]Rt32:<<1]:rnd:sat -def Hexagon_M2_cmpyrs_s0: +def HEXAGON_M2_cmpyrs_s0: si_MInst_sisi_rnd_sat <"cmpy", int_hexagon_M2_cmpyrs_s0>; -def Hexagon_M2_cmpyrs_s1: +def HEXAGON_M2_cmpyrs_s1: si_MInst_sisi_s1_rnd_sat <"cmpy", int_hexagon_M2_cmpyrs_s1>; -def Hexagon_M2_cmpyrsc_s0: +def HEXAGON_M2_cmpyrsc_s0: si_MInst_sisi_rnd_sat_conj <"cmpy", int_hexagon_M2_cmpyrsc_s0>; -def Hexagon_M2_cmpyrsc_s1: +def HEXAGON_M2_cmpyrsc_s1: si_MInst_sisi_s1_rnd_sat_conj <"cmpy", int_hexagon_M2_cmpyrsc_s1>; //MTYPE / COMPLEX / Vector complex multiply real or imaginary. -def Hexagon_M2_vcmpy_s0_sat_i: +def HEXAGON_M2_vcmpy_s0_sat_i: di_MInst_didi_sat <"vcmpyi", int_hexagon_M2_vcmpy_s0_sat_i>; -def Hexagon_M2_vcmpy_s1_sat_i: +def HEXAGON_M2_vcmpy_s1_sat_i: di_MInst_didi_s1_sat <"vcmpyi", int_hexagon_M2_vcmpy_s1_sat_i>; -def Hexagon_M2_vcmpy_s0_sat_r: +def HEXAGON_M2_vcmpy_s0_sat_r: di_MInst_didi_sat <"vcmpyr", int_hexagon_M2_vcmpy_s0_sat_r>; -def Hexagon_M2_vcmpy_s1_sat_r: +def HEXAGON_M2_vcmpy_s1_sat_r: di_MInst_didi_s1_sat <"vcmpyr", int_hexagon_M2_vcmpy_s1_sat_r>; -def Hexagon_M2_vcmac_s0_sat_i: +def HEXAGON_M2_vcmac_s0_sat_i: di_MInst_dididi_acc_sat <"vcmpyi", int_hexagon_M2_vcmac_s0_sat_i>; -def Hexagon_M2_vcmac_s0_sat_r: +def HEXAGON_M2_vcmac_s0_sat_r: di_MInst_dididi_acc_sat <"vcmpyr", int_hexagon_M2_vcmac_s0_sat_r>; //MTYPE / COMPLEX / Vector reduce complex multiply real or imaginary. -def Hexagon_M2_vrcmpyi_s0: +def HEXAGON_M2_vrcmpyi_s0: di_MInst_didi <"vrcmpyi", int_hexagon_M2_vrcmpyi_s0>; -def Hexagon_M2_vrcmpyr_s0: +def HEXAGON_M2_vrcmpyr_s0: di_MInst_didi <"vrcmpyr", int_hexagon_M2_vrcmpyr_s0>; -def Hexagon_M2_vrcmpyi_s0c: +def HEXAGON_M2_vrcmpyi_s0c: di_MInst_didi_conj <"vrcmpyi", int_hexagon_M2_vrcmpyi_s0c>; -def Hexagon_M2_vrcmpyr_s0c: +def HEXAGON_M2_vrcmpyr_s0c: di_MInst_didi_conj <"vrcmpyr", int_hexagon_M2_vrcmpyr_s0c>; -def Hexagon_M2_vrcmaci_s0: +def HEXAGON_M2_vrcmaci_s0: di_MInst_dididi_acc <"vrcmpyi", int_hexagon_M2_vrcmaci_s0>; -def Hexagon_M2_vrcmacr_s0: +def HEXAGON_M2_vrcmacr_s0: di_MInst_dididi_acc <"vrcmpyr", int_hexagon_M2_vrcmacr_s0>; -def Hexagon_M2_vrcmaci_s0c: +def HEXAGON_M2_vrcmaci_s0c: di_MInst_dididi_acc_conj <"vrcmpyi", int_hexagon_M2_vrcmaci_s0c>; -def Hexagon_M2_vrcmacr_s0c: +def HEXAGON_M2_vrcmacr_s0c: di_MInst_dididi_acc_conj <"vrcmpyr", int_hexagon_M2_vrcmacr_s0c>; @@ -2409,115 +2435,115 @@ def Hexagon_M2_vrcmacr_s0c: *********************************************************************/ // MTYPE / MPYH / Multiply and use lower result. -//def Hexagon_M2_mpysmi: +//def HEXAGON_M2_mpysmi: // si_MInst_sim9 <"mpyi", int_hexagon_M2_mpysmi>; -def Hexagon_M2_mpyi: +def HEXAGON_M2_mpyi: si_MInst_sisi <"mpyi", int_hexagon_M2_mpyi>; -def Hexagon_M2_mpyui: +def HEXAGON_M2_mpyui: si_MInst_sisi <"mpyui", int_hexagon_M2_mpyui>; -def Hexagon_M2_macsip: +def HEXAGON_M2_macsip: si_MInst_sisiu8_acc <"mpyi", int_hexagon_M2_macsip>; -def Hexagon_M2_maci: +def HEXAGON_M2_maci: si_MInst_sisisi_acc <"mpyi", int_hexagon_M2_maci>; -def Hexagon_M2_macsin: +def HEXAGON_M2_macsin: si_MInst_sisiu8_nac <"mpyi", int_hexagon_M2_macsin>; // MTYPE / MPYH / Multiply word by half (32x16). //Rdd[+]=vmpywoh(Rss,Rtt)[:<<1][:rnd][:sat] //Rdd[+]=vmpyweh(Rss,Rtt)[:<<1][:rnd][:sat] -def Hexagon_M2_mmpyl_rs1: +def HEXAGON_M2_mmpyl_rs1: di_MInst_didi_s1_rnd_sat <"vmpyweh", int_hexagon_M2_mmpyl_rs1>; -def Hexagon_M2_mmpyl_s1: +def HEXAGON_M2_mmpyl_s1: di_MInst_didi_s1_sat <"vmpyweh", int_hexagon_M2_mmpyl_s1>; -def Hexagon_M2_mmpyl_rs0: +def HEXAGON_M2_mmpyl_rs0: di_MInst_didi_rnd_sat <"vmpyweh", int_hexagon_M2_mmpyl_rs0>; -def Hexagon_M2_mmpyl_s0: +def HEXAGON_M2_mmpyl_s0: di_MInst_didi_sat <"vmpyweh", int_hexagon_M2_mmpyl_s0>; -def Hexagon_M2_mmpyh_rs1: +def HEXAGON_M2_mmpyh_rs1: di_MInst_didi_s1_rnd_sat <"vmpywoh", int_hexagon_M2_mmpyh_rs1>; -def Hexagon_M2_mmpyh_s1: +def HEXAGON_M2_mmpyh_s1: di_MInst_didi_s1_sat <"vmpywoh", int_hexagon_M2_mmpyh_s1>; -def Hexagon_M2_mmpyh_rs0: +def HEXAGON_M2_mmpyh_rs0: di_MInst_didi_rnd_sat <"vmpywoh", int_hexagon_M2_mmpyh_rs0>; -def Hexagon_M2_mmpyh_s0: +def HEXAGON_M2_mmpyh_s0: di_MInst_didi_sat <"vmpywoh", int_hexagon_M2_mmpyh_s0>; -def Hexagon_M2_mmacls_rs1: +def HEXAGON_M2_mmacls_rs1: di_MInst_dididi_acc_s1_rnd_sat <"vmpyweh", int_hexagon_M2_mmacls_rs1>; -def Hexagon_M2_mmacls_s1: +def HEXAGON_M2_mmacls_s1: di_MInst_dididi_acc_s1_sat <"vmpyweh", int_hexagon_M2_mmacls_s1>; -def Hexagon_M2_mmacls_rs0: +def HEXAGON_M2_mmacls_rs0: di_MInst_dididi_acc_rnd_sat <"vmpyweh", int_hexagon_M2_mmacls_rs0>; -def Hexagon_M2_mmacls_s0: +def HEXAGON_M2_mmacls_s0: di_MInst_dididi_acc_sat <"vmpyweh", int_hexagon_M2_mmacls_s0>; -def Hexagon_M2_mmachs_rs1: +def HEXAGON_M2_mmachs_rs1: di_MInst_dididi_acc_s1_rnd_sat <"vmpywoh", int_hexagon_M2_mmachs_rs1>; -def Hexagon_M2_mmachs_s1: +def HEXAGON_M2_mmachs_s1: di_MInst_dididi_acc_s1_sat <"vmpywoh", int_hexagon_M2_mmachs_s1>; -def Hexagon_M2_mmachs_rs0: +def HEXAGON_M2_mmachs_rs0: di_MInst_dididi_acc_rnd_sat <"vmpywoh", int_hexagon_M2_mmachs_rs0>; -def Hexagon_M2_mmachs_s0: +def HEXAGON_M2_mmachs_s0: di_MInst_dididi_acc_sat <"vmpywoh", int_hexagon_M2_mmachs_s0>; // MTYPE / MPYH / Multiply word by unsigned half (32x16). //Rdd[+]=vmpywouh(Rss,Rtt)[:<<1][:rnd][:sat] //Rdd[+]=vmpyweuh(Rss,Rtt)[:<<1][:rnd][:sat] -def Hexagon_M2_mmpyul_rs1: +def HEXAGON_M2_mmpyul_rs1: di_MInst_didi_s1_rnd_sat <"vmpyweuh", int_hexagon_M2_mmpyul_rs1>; -def Hexagon_M2_mmpyul_s1: +def HEXAGON_M2_mmpyul_s1: di_MInst_didi_s1_sat <"vmpyweuh", int_hexagon_M2_mmpyul_s1>; -def Hexagon_M2_mmpyul_rs0: +def HEXAGON_M2_mmpyul_rs0: di_MInst_didi_rnd_sat <"vmpyweuh", int_hexagon_M2_mmpyul_rs0>; -def Hexagon_M2_mmpyul_s0: +def HEXAGON_M2_mmpyul_s0: di_MInst_didi_sat <"vmpyweuh", int_hexagon_M2_mmpyul_s0>; -def Hexagon_M2_mmpyuh_rs1: +def HEXAGON_M2_mmpyuh_rs1: di_MInst_didi_s1_rnd_sat <"vmpywouh", int_hexagon_M2_mmpyuh_rs1>; -def Hexagon_M2_mmpyuh_s1: +def HEXAGON_M2_mmpyuh_s1: di_MInst_didi_s1_sat <"vmpywouh", int_hexagon_M2_mmpyuh_s1>; -def Hexagon_M2_mmpyuh_rs0: +def HEXAGON_M2_mmpyuh_rs0: di_MInst_didi_rnd_sat <"vmpywouh", int_hexagon_M2_mmpyuh_rs0>; -def Hexagon_M2_mmpyuh_s0: +def HEXAGON_M2_mmpyuh_s0: di_MInst_didi_sat <"vmpywouh", int_hexagon_M2_mmpyuh_s0>; -def Hexagon_M2_mmaculs_rs1: +def HEXAGON_M2_mmaculs_rs1: di_MInst_dididi_acc_s1_rnd_sat <"vmpyweuh", int_hexagon_M2_mmaculs_rs1>; -def Hexagon_M2_mmaculs_s1: +def HEXAGON_M2_mmaculs_s1: di_MInst_dididi_acc_s1_sat <"vmpyweuh", int_hexagon_M2_mmaculs_s1>; -def Hexagon_M2_mmaculs_rs0: +def HEXAGON_M2_mmaculs_rs0: di_MInst_dididi_acc_rnd_sat <"vmpyweuh", int_hexagon_M2_mmaculs_rs0>; -def Hexagon_M2_mmaculs_s0: +def HEXAGON_M2_mmaculs_s0: di_MInst_dididi_acc_sat <"vmpyweuh", int_hexagon_M2_mmaculs_s0>; -def Hexagon_M2_mmacuhs_rs1: +def HEXAGON_M2_mmacuhs_rs1: di_MInst_dididi_acc_s1_rnd_sat <"vmpywouh", int_hexagon_M2_mmacuhs_rs1>; -def Hexagon_M2_mmacuhs_s1: +def HEXAGON_M2_mmacuhs_s1: di_MInst_dididi_acc_s1_sat <"vmpywouh", int_hexagon_M2_mmacuhs_s1>; -def Hexagon_M2_mmacuhs_rs0: +def HEXAGON_M2_mmacuhs_rs0: di_MInst_dididi_acc_rnd_sat <"vmpywouh", int_hexagon_M2_mmacuhs_rs0>; -def Hexagon_M2_mmacuhs_s0: +def HEXAGON_M2_mmacuhs_s0: di_MInst_dididi_acc_sat <"vmpywouh", int_hexagon_M2_mmacuhs_s0>; // MTYPE / MPYH / Multiply and use upper result. -def Hexagon_M2_hmmpyh_rs1: +def HEXAGON_M2_hmmpyh_rs1: si_MInst_sisi_h_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyh_rs1>; -def Hexagon_M2_hmmpyl_rs1: +def HEXAGON_M2_hmmpyl_rs1: si_MInst_sisi_l_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyl_rs1>; -def Hexagon_M2_mpy_up: +def HEXAGON_M2_mpy_up: si_MInst_sisi <"mpy", int_hexagon_M2_mpy_up>; -def Hexagon_M2_dpmpyss_rnd_s0: +def HEXAGON_M2_dpmpyss_rnd_s0: si_MInst_sisi_rnd <"mpy", int_hexagon_M2_dpmpyss_rnd_s0>; -def Hexagon_M2_mpyu_up: +def HEXAGON_M2_mpyu_up: si_MInst_sisi <"mpyu", int_hexagon_M2_mpyu_up>; // MTYPE / MPYH / Multiply and use full result. -def Hexagon_M2_dpmpyuu_s0: +def HEXAGON_M2_dpmpyuu_s0: di_MInst_sisi <"mpyu", int_hexagon_M2_dpmpyuu_s0>; -def Hexagon_M2_dpmpyuu_acc_s0: +def HEXAGON_M2_dpmpyuu_acc_s0: di_MInst_disisi_acc <"mpyu", int_hexagon_M2_dpmpyuu_acc_s0>; -def Hexagon_M2_dpmpyuu_nac_s0: +def HEXAGON_M2_dpmpyuu_nac_s0: di_MInst_disisi_nac <"mpyu", int_hexagon_M2_dpmpyuu_nac_s0>; -def Hexagon_M2_dpmpyss_s0: +def HEXAGON_M2_dpmpyss_s0: di_MInst_sisi <"mpy", int_hexagon_M2_dpmpyss_s0>; -def Hexagon_M2_dpmpyss_acc_s0: +def HEXAGON_M2_dpmpyss_acc_s0: di_MInst_disisi_acc <"mpy", int_hexagon_M2_dpmpyss_acc_s0>; -def Hexagon_M2_dpmpyss_nac_s0: +def HEXAGON_M2_dpmpyss_nac_s0: di_MInst_disisi_nac <"mpy", int_hexagon_M2_dpmpyss_nac_s0>; @@ -2528,334 +2554,334 @@ def Hexagon_M2_dpmpyss_nac_s0: // MTYPE / MPYS / Scalar 16x16 multiply signed. //Rd=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1]| // [:<<0[:rnd|:sat|:rnd:sat]|:<<1[:rnd|:sat|:rnd:sat]]] -def Hexagon_M2_mpy_hh_s0: +def HEXAGON_M2_mpy_hh_s0: si_MInst_sisi_hh <"mpy", int_hexagon_M2_mpy_hh_s0>; -def Hexagon_M2_mpy_hh_s1: +def HEXAGON_M2_mpy_hh_s1: si_MInst_sisi_hh_s1 <"mpy", int_hexagon_M2_mpy_hh_s1>; -def Hexagon_M2_mpy_rnd_hh_s1: +def HEXAGON_M2_mpy_rnd_hh_s1: si_MInst_sisi_rnd_hh_s1 <"mpy", int_hexagon_M2_mpy_rnd_hh_s1>; -def Hexagon_M2_mpy_sat_rnd_hh_s1: +def HEXAGON_M2_mpy_sat_rnd_hh_s1: si_MInst_sisi_sat_rnd_hh_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_hh_s1>; -def Hexagon_M2_mpy_sat_hh_s1: +def HEXAGON_M2_mpy_sat_hh_s1: si_MInst_sisi_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_sat_hh_s1>; -def Hexagon_M2_mpy_rnd_hh_s0: +def HEXAGON_M2_mpy_rnd_hh_s0: si_MInst_sisi_rnd_hh <"mpy", int_hexagon_M2_mpy_rnd_hh_s0>; -def Hexagon_M2_mpy_sat_rnd_hh_s0: +def HEXAGON_M2_mpy_sat_rnd_hh_s0: si_MInst_sisi_sat_rnd_hh <"mpy", int_hexagon_M2_mpy_sat_rnd_hh_s0>; -def Hexagon_M2_mpy_sat_hh_s0: +def HEXAGON_M2_mpy_sat_hh_s0: si_MInst_sisi_sat_hh <"mpy", int_hexagon_M2_mpy_sat_hh_s0>; -def Hexagon_M2_mpy_hl_s0: +def HEXAGON_M2_mpy_hl_s0: si_MInst_sisi_hl <"mpy", int_hexagon_M2_mpy_hl_s0>; -def Hexagon_M2_mpy_hl_s1: +def HEXAGON_M2_mpy_hl_s1: si_MInst_sisi_hl_s1 <"mpy", int_hexagon_M2_mpy_hl_s1>; -def Hexagon_M2_mpy_rnd_hl_s1: +def HEXAGON_M2_mpy_rnd_hl_s1: si_MInst_sisi_rnd_hl_s1 <"mpy", int_hexagon_M2_mpy_rnd_hl_s1>; -def Hexagon_M2_mpy_sat_rnd_hl_s1: +def HEXAGON_M2_mpy_sat_rnd_hl_s1: si_MInst_sisi_sat_rnd_hl_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_hl_s1>; -def Hexagon_M2_mpy_sat_hl_s1: +def HEXAGON_M2_mpy_sat_hl_s1: si_MInst_sisi_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_sat_hl_s1>; -def Hexagon_M2_mpy_rnd_hl_s0: +def HEXAGON_M2_mpy_rnd_hl_s0: si_MInst_sisi_rnd_hl <"mpy", int_hexagon_M2_mpy_rnd_hl_s0>; -def Hexagon_M2_mpy_sat_rnd_hl_s0: +def HEXAGON_M2_mpy_sat_rnd_hl_s0: si_MInst_sisi_sat_rnd_hl <"mpy", int_hexagon_M2_mpy_sat_rnd_hl_s0>; -def Hexagon_M2_mpy_sat_hl_s0: +def HEXAGON_M2_mpy_sat_hl_s0: si_MInst_sisi_sat_hl <"mpy", int_hexagon_M2_mpy_sat_hl_s0>; -def Hexagon_M2_mpy_lh_s0: +def HEXAGON_M2_mpy_lh_s0: si_MInst_sisi_lh <"mpy", int_hexagon_M2_mpy_lh_s0>; -def Hexagon_M2_mpy_lh_s1: +def HEXAGON_M2_mpy_lh_s1: si_MInst_sisi_lh_s1 <"mpy", int_hexagon_M2_mpy_lh_s1>; -def Hexagon_M2_mpy_rnd_lh_s1: +def HEXAGON_M2_mpy_rnd_lh_s1: si_MInst_sisi_rnd_lh_s1 <"mpy", int_hexagon_M2_mpy_rnd_lh_s1>; -def Hexagon_M2_mpy_sat_rnd_lh_s1: +def HEXAGON_M2_mpy_sat_rnd_lh_s1: si_MInst_sisi_sat_rnd_lh_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_lh_s1>; -def Hexagon_M2_mpy_sat_lh_s1: +def HEXAGON_M2_mpy_sat_lh_s1: si_MInst_sisi_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_sat_lh_s1>; -def Hexagon_M2_mpy_rnd_lh_s0: +def HEXAGON_M2_mpy_rnd_lh_s0: si_MInst_sisi_rnd_lh <"mpy", int_hexagon_M2_mpy_rnd_lh_s0>; -def Hexagon_M2_mpy_sat_rnd_lh_s0: +def HEXAGON_M2_mpy_sat_rnd_lh_s0: si_MInst_sisi_sat_rnd_lh <"mpy", int_hexagon_M2_mpy_sat_rnd_lh_s0>; -def Hexagon_M2_mpy_sat_lh_s0: +def HEXAGON_M2_mpy_sat_lh_s0: si_MInst_sisi_sat_lh <"mpy", int_hexagon_M2_mpy_sat_lh_s0>; -def Hexagon_M2_mpy_ll_s0: +def HEXAGON_M2_mpy_ll_s0: si_MInst_sisi_ll <"mpy", int_hexagon_M2_mpy_ll_s0>; -def Hexagon_M2_mpy_ll_s1: +def HEXAGON_M2_mpy_ll_s1: si_MInst_sisi_ll_s1 <"mpy", int_hexagon_M2_mpy_ll_s1>; -def Hexagon_M2_mpy_rnd_ll_s1: +def HEXAGON_M2_mpy_rnd_ll_s1: si_MInst_sisi_rnd_ll_s1 <"mpy", int_hexagon_M2_mpy_rnd_ll_s1>; -def Hexagon_M2_mpy_sat_rnd_ll_s1: +def HEXAGON_M2_mpy_sat_rnd_ll_s1: si_MInst_sisi_sat_rnd_ll_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_ll_s1>; -def Hexagon_M2_mpy_sat_ll_s1: +def HEXAGON_M2_mpy_sat_ll_s1: si_MInst_sisi_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_sat_ll_s1>; -def Hexagon_M2_mpy_rnd_ll_s0: +def HEXAGON_M2_mpy_rnd_ll_s0: si_MInst_sisi_rnd_ll <"mpy", int_hexagon_M2_mpy_rnd_ll_s0>; -def Hexagon_M2_mpy_sat_rnd_ll_s0: +def HEXAGON_M2_mpy_sat_rnd_ll_s0: si_MInst_sisi_sat_rnd_ll <"mpy", int_hexagon_M2_mpy_sat_rnd_ll_s0>; -def Hexagon_M2_mpy_sat_ll_s0: +def HEXAGON_M2_mpy_sat_ll_s0: si_MInst_sisi_sat_ll <"mpy", int_hexagon_M2_mpy_sat_ll_s0>; //Rdd=mpy(Rs.[H|L],Rt.[H|L])[[:<<0|:<<1]|[:<<0:rnd|:<<1:rnd]] -def Hexagon_M2_mpyd_hh_s0: +def HEXAGON_M2_mpyd_hh_s0: di_MInst_sisi_hh <"mpy", int_hexagon_M2_mpyd_hh_s0>; -def Hexagon_M2_mpyd_hh_s1: +def HEXAGON_M2_mpyd_hh_s1: di_MInst_sisi_hh_s1 <"mpy", int_hexagon_M2_mpyd_hh_s1>; -def Hexagon_M2_mpyd_rnd_hh_s1: +def HEXAGON_M2_mpyd_rnd_hh_s1: di_MInst_sisi_rnd_hh_s1 <"mpy", int_hexagon_M2_mpyd_rnd_hh_s1>; -def Hexagon_M2_mpyd_rnd_hh_s0: +def HEXAGON_M2_mpyd_rnd_hh_s0: di_MInst_sisi_rnd_hh <"mpy", int_hexagon_M2_mpyd_rnd_hh_s0>; -def Hexagon_M2_mpyd_hl_s0: +def HEXAGON_M2_mpyd_hl_s0: di_MInst_sisi_hl <"mpy", int_hexagon_M2_mpyd_hl_s0>; -def Hexagon_M2_mpyd_hl_s1: +def HEXAGON_M2_mpyd_hl_s1: di_MInst_sisi_hl_s1 <"mpy", int_hexagon_M2_mpyd_hl_s1>; -def Hexagon_M2_mpyd_rnd_hl_s1: +def HEXAGON_M2_mpyd_rnd_hl_s1: di_MInst_sisi_rnd_hl_s1 <"mpy", int_hexagon_M2_mpyd_rnd_hl_s1>; -def Hexagon_M2_mpyd_rnd_hl_s0: +def HEXAGON_M2_mpyd_rnd_hl_s0: di_MInst_sisi_rnd_hl <"mpy", int_hexagon_M2_mpyd_rnd_hl_s0>; -def Hexagon_M2_mpyd_lh_s0: +def HEXAGON_M2_mpyd_lh_s0: di_MInst_sisi_lh <"mpy", int_hexagon_M2_mpyd_lh_s0>; -def Hexagon_M2_mpyd_lh_s1: +def HEXAGON_M2_mpyd_lh_s1: di_MInst_sisi_lh_s1 <"mpy", int_hexagon_M2_mpyd_lh_s1>; -def Hexagon_M2_mpyd_rnd_lh_s1: +def HEXAGON_M2_mpyd_rnd_lh_s1: di_MInst_sisi_rnd_lh_s1 <"mpy", int_hexagon_M2_mpyd_rnd_lh_s1>; -def Hexagon_M2_mpyd_rnd_lh_s0: +def HEXAGON_M2_mpyd_rnd_lh_s0: di_MInst_sisi_rnd_lh <"mpy", int_hexagon_M2_mpyd_rnd_lh_s0>; -def Hexagon_M2_mpyd_ll_s0: +def HEXAGON_M2_mpyd_ll_s0: di_MInst_sisi_ll <"mpy", int_hexagon_M2_mpyd_ll_s0>; -def Hexagon_M2_mpyd_ll_s1: +def HEXAGON_M2_mpyd_ll_s1: di_MInst_sisi_ll_s1 <"mpy", int_hexagon_M2_mpyd_ll_s1>; -def Hexagon_M2_mpyd_rnd_ll_s1: +def HEXAGON_M2_mpyd_rnd_ll_s1: di_MInst_sisi_rnd_ll_s1 <"mpy", int_hexagon_M2_mpyd_rnd_ll_s1>; -def Hexagon_M2_mpyd_rnd_ll_s0: +def HEXAGON_M2_mpyd_rnd_ll_s0: di_MInst_sisi_rnd_ll <"mpy", int_hexagon_M2_mpyd_rnd_ll_s0>; //Rx+=mpy(Rs.[H|L],Rt.[H|L])[[[:<<0|:<<1]|[:<<0:sat|:<<1:sat]] -def Hexagon_M2_mpy_acc_hh_s0: +def HEXAGON_M2_mpy_acc_hh_s0: si_MInst_sisisi_acc_hh <"mpy", int_hexagon_M2_mpy_acc_hh_s0>; -def Hexagon_M2_mpy_acc_hh_s1: +def HEXAGON_M2_mpy_acc_hh_s1: si_MInst_sisisi_acc_hh_s1 <"mpy", int_hexagon_M2_mpy_acc_hh_s1>; -def Hexagon_M2_mpy_acc_sat_hh_s1: +def HEXAGON_M2_mpy_acc_sat_hh_s1: si_MInst_sisisi_acc_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_hh_s1>; -def Hexagon_M2_mpy_acc_sat_hh_s0: +def HEXAGON_M2_mpy_acc_sat_hh_s0: si_MInst_sisisi_acc_sat_hh <"mpy", int_hexagon_M2_mpy_acc_sat_hh_s0>; -def Hexagon_M2_mpy_acc_hl_s0: +def HEXAGON_M2_mpy_acc_hl_s0: si_MInst_sisisi_acc_hl <"mpy", int_hexagon_M2_mpy_acc_hl_s0>; -def Hexagon_M2_mpy_acc_hl_s1: +def HEXAGON_M2_mpy_acc_hl_s1: si_MInst_sisisi_acc_hl_s1 <"mpy", int_hexagon_M2_mpy_acc_hl_s1>; -def Hexagon_M2_mpy_acc_sat_hl_s1: +def HEXAGON_M2_mpy_acc_sat_hl_s1: si_MInst_sisisi_acc_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_hl_s1>; -def Hexagon_M2_mpy_acc_sat_hl_s0: +def HEXAGON_M2_mpy_acc_sat_hl_s0: si_MInst_sisisi_acc_sat_hl <"mpy", int_hexagon_M2_mpy_acc_sat_hl_s0>; -def Hexagon_M2_mpy_acc_lh_s0: +def HEXAGON_M2_mpy_acc_lh_s0: si_MInst_sisisi_acc_lh <"mpy", int_hexagon_M2_mpy_acc_lh_s0>; -def Hexagon_M2_mpy_acc_lh_s1: +def HEXAGON_M2_mpy_acc_lh_s1: si_MInst_sisisi_acc_lh_s1 <"mpy", int_hexagon_M2_mpy_acc_lh_s1>; -def Hexagon_M2_mpy_acc_sat_lh_s1: +def HEXAGON_M2_mpy_acc_sat_lh_s1: si_MInst_sisisi_acc_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_lh_s1>; -def Hexagon_M2_mpy_acc_sat_lh_s0: +def HEXAGON_M2_mpy_acc_sat_lh_s0: si_MInst_sisisi_acc_sat_lh <"mpy", int_hexagon_M2_mpy_acc_sat_lh_s0>; -def Hexagon_M2_mpy_acc_ll_s0: +def HEXAGON_M2_mpy_acc_ll_s0: si_MInst_sisisi_acc_ll <"mpy", int_hexagon_M2_mpy_acc_ll_s0>; -def Hexagon_M2_mpy_acc_ll_s1: +def HEXAGON_M2_mpy_acc_ll_s1: si_MInst_sisisi_acc_ll_s1 <"mpy", int_hexagon_M2_mpy_acc_ll_s1>; -def Hexagon_M2_mpy_acc_sat_ll_s1: +def HEXAGON_M2_mpy_acc_sat_ll_s1: si_MInst_sisisi_acc_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_ll_s1>; -def Hexagon_M2_mpy_acc_sat_ll_s0: +def HEXAGON_M2_mpy_acc_sat_ll_s0: si_MInst_sisisi_acc_sat_ll <"mpy", int_hexagon_M2_mpy_acc_sat_ll_s0>; //Rx-=mpy(Rs.[H|L],Rt.[H|L])[[[:<<0|:<<1]|[:<<0:sat|:<<1:sat]] -def Hexagon_M2_mpy_nac_hh_s0: +def HEXAGON_M2_mpy_nac_hh_s0: si_MInst_sisisi_nac_hh <"mpy", int_hexagon_M2_mpy_nac_hh_s0>; -def Hexagon_M2_mpy_nac_hh_s1: +def HEXAGON_M2_mpy_nac_hh_s1: si_MInst_sisisi_nac_hh_s1 <"mpy", int_hexagon_M2_mpy_nac_hh_s1>; -def Hexagon_M2_mpy_nac_sat_hh_s1: +def HEXAGON_M2_mpy_nac_sat_hh_s1: si_MInst_sisisi_nac_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_hh_s1>; -def Hexagon_M2_mpy_nac_sat_hh_s0: +def HEXAGON_M2_mpy_nac_sat_hh_s0: si_MInst_sisisi_nac_sat_hh <"mpy", int_hexagon_M2_mpy_nac_sat_hh_s0>; -def Hexagon_M2_mpy_nac_hl_s0: +def HEXAGON_M2_mpy_nac_hl_s0: si_MInst_sisisi_nac_hl <"mpy", int_hexagon_M2_mpy_nac_hl_s0>; -def Hexagon_M2_mpy_nac_hl_s1: +def HEXAGON_M2_mpy_nac_hl_s1: si_MInst_sisisi_nac_hl_s1 <"mpy", int_hexagon_M2_mpy_nac_hl_s1>; -def Hexagon_M2_mpy_nac_sat_hl_s1: +def HEXAGON_M2_mpy_nac_sat_hl_s1: si_MInst_sisisi_nac_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_hl_s1>; -def Hexagon_M2_mpy_nac_sat_hl_s0: +def HEXAGON_M2_mpy_nac_sat_hl_s0: si_MInst_sisisi_nac_sat_hl <"mpy", int_hexagon_M2_mpy_nac_sat_hl_s0>; -def Hexagon_M2_mpy_nac_lh_s0: +def HEXAGON_M2_mpy_nac_lh_s0: si_MInst_sisisi_nac_lh <"mpy", int_hexagon_M2_mpy_nac_lh_s0>; -def Hexagon_M2_mpy_nac_lh_s1: +def HEXAGON_M2_mpy_nac_lh_s1: si_MInst_sisisi_nac_lh_s1 <"mpy", int_hexagon_M2_mpy_nac_lh_s1>; -def Hexagon_M2_mpy_nac_sat_lh_s1: +def HEXAGON_M2_mpy_nac_sat_lh_s1: si_MInst_sisisi_nac_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_lh_s1>; -def Hexagon_M2_mpy_nac_sat_lh_s0: +def HEXAGON_M2_mpy_nac_sat_lh_s0: si_MInst_sisisi_nac_sat_lh <"mpy", int_hexagon_M2_mpy_nac_sat_lh_s0>; -def Hexagon_M2_mpy_nac_ll_s0: +def HEXAGON_M2_mpy_nac_ll_s0: si_MInst_sisisi_nac_ll <"mpy", int_hexagon_M2_mpy_nac_ll_s0>; -def Hexagon_M2_mpy_nac_ll_s1: +def HEXAGON_M2_mpy_nac_ll_s1: si_MInst_sisisi_nac_ll_s1 <"mpy", int_hexagon_M2_mpy_nac_ll_s1>; -def Hexagon_M2_mpy_nac_sat_ll_s1: +def HEXAGON_M2_mpy_nac_sat_ll_s1: si_MInst_sisisi_nac_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_ll_s1>; -def Hexagon_M2_mpy_nac_sat_ll_s0: +def HEXAGON_M2_mpy_nac_sat_ll_s0: si_MInst_sisisi_nac_sat_ll <"mpy", int_hexagon_M2_mpy_nac_sat_ll_s0>; //Rx+=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1] -def Hexagon_M2_mpyd_acc_hh_s0: +def HEXAGON_M2_mpyd_acc_hh_s0: di_MInst_disisi_acc_hh <"mpy", int_hexagon_M2_mpyd_acc_hh_s0>; -def Hexagon_M2_mpyd_acc_hh_s1: +def HEXAGON_M2_mpyd_acc_hh_s1: di_MInst_disisi_acc_hh_s1 <"mpy", int_hexagon_M2_mpyd_acc_hh_s1>; -def Hexagon_M2_mpyd_acc_hl_s0: +def HEXAGON_M2_mpyd_acc_hl_s0: di_MInst_disisi_acc_hl <"mpy", int_hexagon_M2_mpyd_acc_hl_s0>; -def Hexagon_M2_mpyd_acc_hl_s1: +def HEXAGON_M2_mpyd_acc_hl_s1: di_MInst_disisi_acc_hl_s1 <"mpy", int_hexagon_M2_mpyd_acc_hl_s1>; -def Hexagon_M2_mpyd_acc_lh_s0: +def HEXAGON_M2_mpyd_acc_lh_s0: di_MInst_disisi_acc_lh <"mpy", int_hexagon_M2_mpyd_acc_lh_s0>; -def Hexagon_M2_mpyd_acc_lh_s1: +def HEXAGON_M2_mpyd_acc_lh_s1: di_MInst_disisi_acc_lh_s1 <"mpy", int_hexagon_M2_mpyd_acc_lh_s1>; -def Hexagon_M2_mpyd_acc_ll_s0: +def HEXAGON_M2_mpyd_acc_ll_s0: di_MInst_disisi_acc_ll <"mpy", int_hexagon_M2_mpyd_acc_ll_s0>; -def Hexagon_M2_mpyd_acc_ll_s1: +def HEXAGON_M2_mpyd_acc_ll_s1: di_MInst_disisi_acc_ll_s1 <"mpy", int_hexagon_M2_mpyd_acc_ll_s1>; //Rx-=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1] -def Hexagon_M2_mpyd_nac_hh_s0: +def HEXAGON_M2_mpyd_nac_hh_s0: di_MInst_disisi_nac_hh <"mpy", int_hexagon_M2_mpyd_nac_hh_s0>; -def Hexagon_M2_mpyd_nac_hh_s1: +def HEXAGON_M2_mpyd_nac_hh_s1: di_MInst_disisi_nac_hh_s1 <"mpy", int_hexagon_M2_mpyd_nac_hh_s1>; -def Hexagon_M2_mpyd_nac_hl_s0: +def HEXAGON_M2_mpyd_nac_hl_s0: di_MInst_disisi_nac_hl <"mpy", int_hexagon_M2_mpyd_nac_hl_s0>; -def Hexagon_M2_mpyd_nac_hl_s1: +def HEXAGON_M2_mpyd_nac_hl_s1: di_MInst_disisi_nac_hl_s1 <"mpy", int_hexagon_M2_mpyd_nac_hl_s1>; -def Hexagon_M2_mpyd_nac_lh_s0: +def HEXAGON_M2_mpyd_nac_lh_s0: di_MInst_disisi_nac_lh <"mpy", int_hexagon_M2_mpyd_nac_lh_s0>; -def Hexagon_M2_mpyd_nac_lh_s1: +def HEXAGON_M2_mpyd_nac_lh_s1: di_MInst_disisi_nac_lh_s1 <"mpy", int_hexagon_M2_mpyd_nac_lh_s1>; -def Hexagon_M2_mpyd_nac_ll_s0: +def HEXAGON_M2_mpyd_nac_ll_s0: di_MInst_disisi_nac_ll <"mpy", int_hexagon_M2_mpyd_nac_ll_s0>; -def Hexagon_M2_mpyd_nac_ll_s1: +def HEXAGON_M2_mpyd_nac_ll_s1: di_MInst_disisi_nac_ll_s1 <"mpy", int_hexagon_M2_mpyd_nac_ll_s1>; // MTYPE / MPYS / Scalar 16x16 multiply unsigned. //Rd=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1] -def Hexagon_M2_mpyu_hh_s0: +def HEXAGON_M2_mpyu_hh_s0: si_MInst_sisi_hh <"mpyu", int_hexagon_M2_mpyu_hh_s0>; -def Hexagon_M2_mpyu_hh_s1: +def HEXAGON_M2_mpyu_hh_s1: si_MInst_sisi_hh_s1 <"mpyu", int_hexagon_M2_mpyu_hh_s1>; -def Hexagon_M2_mpyu_hl_s0: +def HEXAGON_M2_mpyu_hl_s0: si_MInst_sisi_hl <"mpyu", int_hexagon_M2_mpyu_hl_s0>; -def Hexagon_M2_mpyu_hl_s1: +def HEXAGON_M2_mpyu_hl_s1: si_MInst_sisi_hl_s1 <"mpyu", int_hexagon_M2_mpyu_hl_s1>; -def Hexagon_M2_mpyu_lh_s0: +def HEXAGON_M2_mpyu_lh_s0: si_MInst_sisi_lh <"mpyu", int_hexagon_M2_mpyu_lh_s0>; -def Hexagon_M2_mpyu_lh_s1: +def HEXAGON_M2_mpyu_lh_s1: si_MInst_sisi_lh_s1 <"mpyu", int_hexagon_M2_mpyu_lh_s1>; -def Hexagon_M2_mpyu_ll_s0: +def HEXAGON_M2_mpyu_ll_s0: si_MInst_sisi_ll <"mpyu", int_hexagon_M2_mpyu_ll_s0>; -def Hexagon_M2_mpyu_ll_s1: +def HEXAGON_M2_mpyu_ll_s1: si_MInst_sisi_ll_s1 <"mpyu", int_hexagon_M2_mpyu_ll_s1>; //Rdd=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1] -def Hexagon_M2_mpyud_hh_s0: +def HEXAGON_M2_mpyud_hh_s0: di_MInst_sisi_hh <"mpyu", int_hexagon_M2_mpyud_hh_s0>; -def Hexagon_M2_mpyud_hh_s1: +def HEXAGON_M2_mpyud_hh_s1: di_MInst_sisi_hh_s1 <"mpyu", int_hexagon_M2_mpyud_hh_s1>; -def Hexagon_M2_mpyud_hl_s0: +def HEXAGON_M2_mpyud_hl_s0: di_MInst_sisi_hl <"mpyu", int_hexagon_M2_mpyud_hl_s0>; -def Hexagon_M2_mpyud_hl_s1: +def HEXAGON_M2_mpyud_hl_s1: di_MInst_sisi_hl_s1 <"mpyu", int_hexagon_M2_mpyud_hl_s1>; -def Hexagon_M2_mpyud_lh_s0: +def HEXAGON_M2_mpyud_lh_s0: di_MInst_sisi_lh <"mpyu", int_hexagon_M2_mpyud_lh_s0>; -def Hexagon_M2_mpyud_lh_s1: +def HEXAGON_M2_mpyud_lh_s1: di_MInst_sisi_lh_s1 <"mpyu", int_hexagon_M2_mpyud_lh_s1>; -def Hexagon_M2_mpyud_ll_s0: +def HEXAGON_M2_mpyud_ll_s0: di_MInst_sisi_ll <"mpyu", int_hexagon_M2_mpyud_ll_s0>; -def Hexagon_M2_mpyud_ll_s1: +def HEXAGON_M2_mpyud_ll_s1: di_MInst_sisi_ll_s1 <"mpyu", int_hexagon_M2_mpyud_ll_s1>; //Rd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1] -def Hexagon_M2_mpyu_acc_hh_s0: +def HEXAGON_M2_mpyu_acc_hh_s0: si_MInst_sisisi_acc_hh <"mpyu", int_hexagon_M2_mpyu_acc_hh_s0>; -def Hexagon_M2_mpyu_acc_hh_s1: +def HEXAGON_M2_mpyu_acc_hh_s1: si_MInst_sisisi_acc_hh_s1 <"mpyu", int_hexagon_M2_mpyu_acc_hh_s1>; -def Hexagon_M2_mpyu_acc_hl_s0: +def HEXAGON_M2_mpyu_acc_hl_s0: si_MInst_sisisi_acc_hl <"mpyu", int_hexagon_M2_mpyu_acc_hl_s0>; -def Hexagon_M2_mpyu_acc_hl_s1: +def HEXAGON_M2_mpyu_acc_hl_s1: si_MInst_sisisi_acc_hl_s1 <"mpyu", int_hexagon_M2_mpyu_acc_hl_s1>; -def Hexagon_M2_mpyu_acc_lh_s0: +def HEXAGON_M2_mpyu_acc_lh_s0: si_MInst_sisisi_acc_lh <"mpyu", int_hexagon_M2_mpyu_acc_lh_s0>; -def Hexagon_M2_mpyu_acc_lh_s1: +def HEXAGON_M2_mpyu_acc_lh_s1: si_MInst_sisisi_acc_lh_s1 <"mpyu", int_hexagon_M2_mpyu_acc_lh_s1>; -def Hexagon_M2_mpyu_acc_ll_s0: +def HEXAGON_M2_mpyu_acc_ll_s0: si_MInst_sisisi_acc_ll <"mpyu", int_hexagon_M2_mpyu_acc_ll_s0>; -def Hexagon_M2_mpyu_acc_ll_s1: +def HEXAGON_M2_mpyu_acc_ll_s1: si_MInst_sisisi_acc_ll_s1 <"mpyu", int_hexagon_M2_mpyu_acc_ll_s1>; //Rd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1] -def Hexagon_M2_mpyu_nac_hh_s0: +def HEXAGON_M2_mpyu_nac_hh_s0: si_MInst_sisisi_nac_hh <"mpyu", int_hexagon_M2_mpyu_nac_hh_s0>; -def Hexagon_M2_mpyu_nac_hh_s1: +def HEXAGON_M2_mpyu_nac_hh_s1: si_MInst_sisisi_nac_hh_s1 <"mpyu", int_hexagon_M2_mpyu_nac_hh_s1>; -def Hexagon_M2_mpyu_nac_hl_s0: +def HEXAGON_M2_mpyu_nac_hl_s0: si_MInst_sisisi_nac_hl <"mpyu", int_hexagon_M2_mpyu_nac_hl_s0>; -def Hexagon_M2_mpyu_nac_hl_s1: +def HEXAGON_M2_mpyu_nac_hl_s1: si_MInst_sisisi_nac_hl_s1 <"mpyu", int_hexagon_M2_mpyu_nac_hl_s1>; -def Hexagon_M2_mpyu_nac_lh_s0: +def HEXAGON_M2_mpyu_nac_lh_s0: si_MInst_sisisi_nac_lh <"mpyu", int_hexagon_M2_mpyu_nac_lh_s0>; -def Hexagon_M2_mpyu_nac_lh_s1: +def HEXAGON_M2_mpyu_nac_lh_s1: si_MInst_sisisi_nac_lh_s1 <"mpyu", int_hexagon_M2_mpyu_nac_lh_s1>; -def Hexagon_M2_mpyu_nac_ll_s0: +def HEXAGON_M2_mpyu_nac_ll_s0: si_MInst_sisisi_nac_ll <"mpyu", int_hexagon_M2_mpyu_nac_ll_s0>; -def Hexagon_M2_mpyu_nac_ll_s1: +def HEXAGON_M2_mpyu_nac_ll_s1: si_MInst_sisisi_nac_ll_s1 <"mpyu", int_hexagon_M2_mpyu_nac_ll_s1>; //Rdd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1] -def Hexagon_M2_mpyud_acc_hh_s0: +def HEXAGON_M2_mpyud_acc_hh_s0: di_MInst_disisi_acc_hh <"mpyu", int_hexagon_M2_mpyud_acc_hh_s0>; -def Hexagon_M2_mpyud_acc_hh_s1: +def HEXAGON_M2_mpyud_acc_hh_s1: di_MInst_disisi_acc_hh_s1 <"mpyu", int_hexagon_M2_mpyud_acc_hh_s1>; -def Hexagon_M2_mpyud_acc_hl_s0: +def HEXAGON_M2_mpyud_acc_hl_s0: di_MInst_disisi_acc_hl <"mpyu", int_hexagon_M2_mpyud_acc_hl_s0>; -def Hexagon_M2_mpyud_acc_hl_s1: +def HEXAGON_M2_mpyud_acc_hl_s1: di_MInst_disisi_acc_hl_s1 <"mpyu", int_hexagon_M2_mpyud_acc_hl_s1>; -def Hexagon_M2_mpyud_acc_lh_s0: +def HEXAGON_M2_mpyud_acc_lh_s0: di_MInst_disisi_acc_lh <"mpyu", int_hexagon_M2_mpyud_acc_lh_s0>; -def Hexagon_M2_mpyud_acc_lh_s1: +def HEXAGON_M2_mpyud_acc_lh_s1: di_MInst_disisi_acc_lh_s1 <"mpyu", int_hexagon_M2_mpyud_acc_lh_s1>; -def Hexagon_M2_mpyud_acc_ll_s0: +def HEXAGON_M2_mpyud_acc_ll_s0: di_MInst_disisi_acc_ll <"mpyu", int_hexagon_M2_mpyud_acc_ll_s0>; -def Hexagon_M2_mpyud_acc_ll_s1: +def HEXAGON_M2_mpyud_acc_ll_s1: di_MInst_disisi_acc_ll_s1 <"mpyu", int_hexagon_M2_mpyud_acc_ll_s1>; //Rdd-=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1] -def Hexagon_M2_mpyud_nac_hh_s0: +def HEXAGON_M2_mpyud_nac_hh_s0: di_MInst_disisi_nac_hh <"mpyu", int_hexagon_M2_mpyud_nac_hh_s0>; -def Hexagon_M2_mpyud_nac_hh_s1: +def HEXAGON_M2_mpyud_nac_hh_s1: di_MInst_disisi_nac_hh_s1 <"mpyu", int_hexagon_M2_mpyud_nac_hh_s1>; -def Hexagon_M2_mpyud_nac_hl_s0: +def HEXAGON_M2_mpyud_nac_hl_s0: di_MInst_disisi_nac_hl <"mpyu", int_hexagon_M2_mpyud_nac_hl_s0>; -def Hexagon_M2_mpyud_nac_hl_s1: +def HEXAGON_M2_mpyud_nac_hl_s1: di_MInst_disisi_nac_hl_s1 <"mpyu", int_hexagon_M2_mpyud_nac_hl_s1>; -def Hexagon_M2_mpyud_nac_lh_s0: +def HEXAGON_M2_mpyud_nac_lh_s0: di_MInst_disisi_nac_lh <"mpyu", int_hexagon_M2_mpyud_nac_lh_s0>; -def Hexagon_M2_mpyud_nac_lh_s1: +def HEXAGON_M2_mpyud_nac_lh_s1: di_MInst_disisi_nac_lh_s1 <"mpyu", int_hexagon_M2_mpyud_nac_lh_s1>; -def Hexagon_M2_mpyud_nac_ll_s0: +def HEXAGON_M2_mpyud_nac_ll_s0: di_MInst_disisi_nac_ll <"mpyu", int_hexagon_M2_mpyud_nac_ll_s0>; -def Hexagon_M2_mpyud_nac_ll_s1: +def HEXAGON_M2_mpyud_nac_ll_s1: di_MInst_disisi_nac_ll_s1 <"mpyu", int_hexagon_M2_mpyud_nac_ll_s1>; @@ -2864,15 +2890,15 @@ def Hexagon_M2_mpyud_nac_ll_s1: *********************************************************************/ // MTYPE / VB / Vector reduce add unsigned bytes. -def Hexagon_A2_vraddub: +def HEXAGON_A2_vraddub: di_MInst_didi <"vraddub", int_hexagon_A2_vraddub>; -def Hexagon_A2_vraddub_acc: +def HEXAGON_A2_vraddub_acc: di_MInst_dididi_acc <"vraddub", int_hexagon_A2_vraddub_acc>; // MTYPE / VB / Vector sum of absolute differences unsigned bytes. -def Hexagon_A2_vrsadub: +def HEXAGON_A2_vrsadub: di_MInst_didi <"vrsadub", int_hexagon_A2_vrsadub>; -def Hexagon_A2_vrsadub_acc: +def HEXAGON_A2_vrsadub_acc: di_MInst_dididi_acc <"vrsadub", int_hexagon_A2_vrsadub_acc>; /******************************************************************** @@ -2880,56 +2906,56 @@ def Hexagon_A2_vrsadub_acc: *********************************************************************/ // MTYPE / VH / Vector dual multiply. -def Hexagon_M2_vdmpys_s1: +def HEXAGON_M2_vdmpys_s1: di_MInst_didi_s1_sat <"vdmpy", int_hexagon_M2_vdmpys_s1>; -def Hexagon_M2_vdmpys_s0: +def HEXAGON_M2_vdmpys_s0: di_MInst_didi_sat <"vdmpy", int_hexagon_M2_vdmpys_s0>; -def Hexagon_M2_vdmacs_s1: +def HEXAGON_M2_vdmacs_s1: di_MInst_dididi_acc_s1_sat <"vdmpy", int_hexagon_M2_vdmacs_s1>; -def Hexagon_M2_vdmacs_s0: +def HEXAGON_M2_vdmacs_s0: di_MInst_dididi_acc_sat <"vdmpy", int_hexagon_M2_vdmacs_s0>; // MTYPE / VH / Vector dual multiply with round and pack. -def Hexagon_M2_vdmpyrs_s0: +def HEXAGON_M2_vdmpyrs_s0: si_MInst_didi_rnd_sat <"vdmpy", int_hexagon_M2_vdmpyrs_s0>; -def Hexagon_M2_vdmpyrs_s1: +def HEXAGON_M2_vdmpyrs_s1: si_MInst_didi_s1_rnd_sat <"vdmpy", int_hexagon_M2_vdmpyrs_s1>; // MTYPE / VH / Vector multiply even halfwords. -def Hexagon_M2_vmpy2es_s1: +def HEXAGON_M2_vmpy2es_s1: di_MInst_didi_s1_sat <"vmpyeh", int_hexagon_M2_vmpy2es_s1>; -def Hexagon_M2_vmpy2es_s0: +def HEXAGON_M2_vmpy2es_s0: di_MInst_didi_sat <"vmpyeh", int_hexagon_M2_vmpy2es_s0>; -def Hexagon_M2_vmac2es: +def HEXAGON_M2_vmac2es: di_MInst_dididi_acc <"vmpyeh", int_hexagon_M2_vmac2es>; -def Hexagon_M2_vmac2es_s1: +def HEXAGON_M2_vmac2es_s1: di_MInst_dididi_acc_s1_sat <"vmpyeh", int_hexagon_M2_vmac2es_s1>; -def Hexagon_M2_vmac2es_s0: +def HEXAGON_M2_vmac2es_s0: di_MInst_dididi_acc_sat <"vmpyeh", int_hexagon_M2_vmac2es_s0>; // MTYPE / VH / Vector multiply halfwords. -def Hexagon_M2_vmpy2s_s0: +def HEXAGON_M2_vmpy2s_s0: di_MInst_sisi_sat <"vmpyh", int_hexagon_M2_vmpy2s_s0>; -def Hexagon_M2_vmpy2s_s1: +def HEXAGON_M2_vmpy2s_s1: di_MInst_sisi_s1_sat <"vmpyh", int_hexagon_M2_vmpy2s_s1>; -def Hexagon_M2_vmac2: +def HEXAGON_M2_vmac2: di_MInst_disisi_acc <"vmpyh", int_hexagon_M2_vmac2>; -def Hexagon_M2_vmac2s_s0: +def HEXAGON_M2_vmac2s_s0: di_MInst_disisi_acc_sat <"vmpyh", int_hexagon_M2_vmac2s_s0>; -def Hexagon_M2_vmac2s_s1: +def HEXAGON_M2_vmac2s_s1: di_MInst_disisi_acc_s1_sat <"vmpyh", int_hexagon_M2_vmac2s_s1>; // MTYPE / VH / Vector multiply halfwords with round and pack. -def Hexagon_M2_vmpy2s_s0pack: +def HEXAGON_M2_vmpy2s_s0pack: si_MInst_sisi_rnd_sat <"vmpyh", int_hexagon_M2_vmpy2s_s0pack>; -def Hexagon_M2_vmpy2s_s1pack: +def HEXAGON_M2_vmpy2s_s1pack: si_MInst_sisi_s1_rnd_sat <"vmpyh", int_hexagon_M2_vmpy2s_s1pack>; // MTYPE / VH / Vector reduce multiply halfwords. // Rxx32+=vrmpyh(Rss32,Rtt32) -def Hexagon_M2_vrmpy_s0: +def HEXAGON_M2_vrmpy_s0: di_MInst_didi <"vrmpyh", int_hexagon_M2_vrmpy_s0>; -def Hexagon_M2_vrmac_s0: +def HEXAGON_M2_vrmac_s0: di_MInst_dididi_acc <"vrmpyh", int_hexagon_M2_vrmac_s0>; @@ -2938,25 +2964,25 @@ def Hexagon_M2_vrmac_s0: *********************************************************************/ // STYPE / ALU / Absolute value. -def Hexagon_A2_abs: +def HEXAGON_A2_abs: si_SInst_si <"abs", int_hexagon_A2_abs>; -def Hexagon_A2_absp: +def HEXAGON_A2_absp: di_SInst_di <"abs", int_hexagon_A2_absp>; -def Hexagon_A2_abssat: +def HEXAGON_A2_abssat: si_SInst_si_sat <"abs", int_hexagon_A2_abssat>; // STYPE / ALU / Negate. -def Hexagon_A2_negp: +def HEXAGON_A2_negp: di_SInst_di <"neg", int_hexagon_A2_negp>; -def Hexagon_A2_negsat: +def HEXAGON_A2_negsat: si_SInst_si_sat <"neg", int_hexagon_A2_negsat>; // STYPE / ALU / Logical Not. -def Hexagon_A2_notp: +def HEXAGON_A2_notp: di_SInst_di <"not", int_hexagon_A2_notp>; // STYPE / ALU / Sign extend word to doubleword. -def Hexagon_A2_sxtw: +def HEXAGON_A2_sxtw: di_SInst_si <"sxtw", int_hexagon_A2_sxtw>; @@ -2965,88 +2991,88 @@ def Hexagon_A2_sxtw: *********************************************************************/ // STYPE / BIT / Count leading. -def Hexagon_S2_cl0: +def HEXAGON_S2_cl0: si_SInst_si <"cl0", int_hexagon_S2_cl0>; -def Hexagon_S2_cl0p: +def HEXAGON_S2_cl0p: si_SInst_di <"cl0", int_hexagon_S2_cl0p>; -def Hexagon_S2_cl1: +def HEXAGON_S2_cl1: si_SInst_si <"cl1", int_hexagon_S2_cl1>; -def Hexagon_S2_cl1p: +def HEXAGON_S2_cl1p: si_SInst_di <"cl1", int_hexagon_S2_cl1p>; -def Hexagon_S2_clb: +def HEXAGON_S2_clb: si_SInst_si <"clb", int_hexagon_S2_clb>; -def Hexagon_S2_clbp: +def HEXAGON_S2_clbp: si_SInst_di <"clb", int_hexagon_S2_clbp>; -def Hexagon_S2_clbnorm: +def HEXAGON_S2_clbnorm: si_SInst_si <"normamt", int_hexagon_S2_clbnorm>; // STYPE / BIT / Count trailing. -def Hexagon_S2_ct0: +def HEXAGON_S2_ct0: si_SInst_si <"ct0", int_hexagon_S2_ct0>; -def Hexagon_S2_ct1: +def HEXAGON_S2_ct1: si_SInst_si <"ct1", int_hexagon_S2_ct1>; // STYPE / BIT / Compare bit mask. -def HEXAGON_C2_bitsclr: +def Hexagon_C2_bitsclr: qi_SInst_sisi <"bitsclr", int_hexagon_C2_bitsclr>; -def HEXAGON_C2_bitsclri: +def Hexagon_C2_bitsclri: qi_SInst_siu6 <"bitsclr", int_hexagon_C2_bitsclri>; -def HEXAGON_C2_bitsset: +def Hexagon_C2_bitsset: qi_SInst_sisi <"bitsset", int_hexagon_C2_bitsset>; // STYPE / BIT / Extract unsigned. // Rd[d][32/64]=extractu(Rs[s],Rt[t],[imm]) -def Hexagon_S2_extractu: +def HEXAGON_S2_extractu: si_SInst_siu5u5 <"extractu",int_hexagon_S2_extractu>; -def Hexagon_S2_extractu_rp: +def HEXAGON_S2_extractu_rp: si_SInst_sidi <"extractu",int_hexagon_S2_extractu_rp>; -def Hexagon_S2_extractup: +def HEXAGON_S2_extractup: di_SInst_diu6u6 <"extractu",int_hexagon_S2_extractup>; -def Hexagon_S2_extractup_rp: +def HEXAGON_S2_extractup_rp: di_SInst_didi <"extractu",int_hexagon_S2_extractup_rp>; // STYPE / BIT / Insert bitfield. -def HEXAGON_S2_insert: +def Hexagon_S2_insert: si_SInst_sisiu5u5 <"insert", int_hexagon_S2_insert>; -def HEXAGON_S2_insert_rp: +def Hexagon_S2_insert_rp: si_SInst_sisidi <"insert", int_hexagon_S2_insert_rp>; -def HEXAGON_S2_insertp: +def Hexagon_S2_insertp: di_SInst_didiu6u6 <"insert", int_hexagon_S2_insertp>; -def HEXAGON_S2_insertp_rp: +def Hexagon_S2_insertp_rp: di_SInst_dididi <"insert", int_hexagon_S2_insertp_rp>; // STYPE / BIT / Innterleave/deinterleave. -def HEXAGON_S2_interleave: +def Hexagon_S2_interleave: di_SInst_di <"interleave", int_hexagon_S2_interleave>; -def HEXAGON_S2_deinterleave: +def Hexagon_S2_deinterleave: di_SInst_di <"deinterleave", int_hexagon_S2_deinterleave>; // STYPE / BIT / Linear feedback-shift Iteration. -def HEXAGON_S2_lfsp: +def Hexagon_S2_lfsp: di_SInst_didi <"lfs", int_hexagon_S2_lfsp>; // STYPE / BIT / Bit reverse. -def HEXAGON_S2_brev: +def Hexagon_S2_brev: si_SInst_si <"brev", int_hexagon_S2_brev>; // STYPE / BIT / Set/Clear/Toggle Bit. -def Hexagon_S2_setbit_i: +def HEXAGON_S2_setbit_i: si_SInst_siu5 <"setbit", int_hexagon_S2_setbit_i>; -def Hexagon_S2_togglebit_i: +def HEXAGON_S2_togglebit_i: si_SInst_siu5 <"togglebit", int_hexagon_S2_togglebit_i>; -def Hexagon_S2_clrbit_i: +def HEXAGON_S2_clrbit_i: si_SInst_siu5 <"clrbit", int_hexagon_S2_clrbit_i>; -def Hexagon_S2_setbit_r: +def HEXAGON_S2_setbit_r: si_SInst_sisi <"setbit", int_hexagon_S2_setbit_r>; -def Hexagon_S2_togglebit_r: +def HEXAGON_S2_togglebit_r: si_SInst_sisi <"togglebit", int_hexagon_S2_togglebit_r>; -def Hexagon_S2_clrbit_r: +def HEXAGON_S2_clrbit_r: si_SInst_sisi <"clrbit", int_hexagon_S2_clrbit_r>; // STYPE / BIT / Test Bit. -def Hexagon_S2_tstbit_i: +def HEXAGON_S2_tstbit_i: qi_SInst_siu5 <"tstbit", int_hexagon_S2_tstbit_i>; -def Hexagon_S2_tstbit_r: +def HEXAGON_S2_tstbit_r: qi_SInst_sisi <"tstbit", int_hexagon_S2_tstbit_r>; @@ -3055,11 +3081,11 @@ def Hexagon_S2_tstbit_r: *********************************************************************/ // STYPE / COMPLEX / Vector Complex conjugate. -def Hexagon_A2_vconj: +def HEXAGON_A2_vconj: di_SInst_di_sat <"vconj", int_hexagon_A2_vconj>; // STYPE / COMPLEX / Vector Complex rotate. -def Hexagon_S2_vcrotate: +def HEXAGON_S2_vcrotate: di_SInst_disi <"vcrotate",int_hexagon_S2_vcrotate>; @@ -3068,102 +3094,102 @@ def Hexagon_S2_vcrotate: *********************************************************************/ // STYPE / PERM / Saturate. -def Hexagon_A2_sat: +def HEXAGON_A2_sat: si_SInst_di <"sat", int_hexagon_A2_sat>; -def Hexagon_A2_satb: +def HEXAGON_A2_satb: si_SInst_si <"satb", int_hexagon_A2_satb>; -def Hexagon_A2_sath: +def HEXAGON_A2_sath: si_SInst_si <"sath", int_hexagon_A2_sath>; -def Hexagon_A2_satub: +def HEXAGON_A2_satub: si_SInst_si <"satub", int_hexagon_A2_satub>; -def Hexagon_A2_satuh: +def HEXAGON_A2_satuh: si_SInst_si <"satuh", int_hexagon_A2_satuh>; // STYPE / PERM / Swizzle bytes. -def Hexagon_A2_swiz: +def HEXAGON_A2_swiz: si_SInst_si <"swiz", int_hexagon_A2_swiz>; // STYPE / PERM / Vector align. // Need custom lowering -def Hexagon_S2_valignib: +def HEXAGON_S2_valignib: di_SInst_didiu3 <"valignb", int_hexagon_S2_valignib>; -def Hexagon_S2_valignrb: +def HEXAGON_S2_valignrb: di_SInst_didiqi <"valignb", int_hexagon_S2_valignrb>; // STYPE / PERM / Vector round and pack. -def Hexagon_S2_vrndpackwh: +def HEXAGON_S2_vrndpackwh: si_SInst_di <"vrndwh", int_hexagon_S2_vrndpackwh>; -def Hexagon_S2_vrndpackwhs: +def HEXAGON_S2_vrndpackwhs: si_SInst_di_sat <"vrndwh", int_hexagon_S2_vrndpackwhs>; // STYPE / PERM / Vector saturate and pack. -def Hexagon_S2_svsathb: +def HEXAGON_S2_svsathb: si_SInst_si <"vsathb", int_hexagon_S2_svsathb>; -def Hexagon_S2_vsathb: +def HEXAGON_S2_vsathb: si_SInst_di <"vsathb", int_hexagon_S2_vsathb>; -def Hexagon_S2_svsathub: +def HEXAGON_S2_svsathub: si_SInst_si <"vsathub", int_hexagon_S2_svsathub>; -def Hexagon_S2_vsathub: +def HEXAGON_S2_vsathub: si_SInst_di <"vsathub", int_hexagon_S2_vsathub>; -def Hexagon_S2_vsatwh: +def HEXAGON_S2_vsatwh: si_SInst_di <"vsatwh", int_hexagon_S2_vsatwh>; -def Hexagon_S2_vsatwuh: +def HEXAGON_S2_vsatwuh: si_SInst_di <"vsatwuh", int_hexagon_S2_vsatwuh>; // STYPE / PERM / Vector saturate without pack. -def Hexagon_S2_vsathb_nopack: +def HEXAGON_S2_vsathb_nopack: di_SInst_di <"vsathb", int_hexagon_S2_vsathb_nopack>; -def Hexagon_S2_vsathub_nopack: +def HEXAGON_S2_vsathub_nopack: di_SInst_di <"vsathub", int_hexagon_S2_vsathub_nopack>; -def Hexagon_S2_vsatwh_nopack: +def HEXAGON_S2_vsatwh_nopack: di_SInst_di <"vsatwh", int_hexagon_S2_vsatwh_nopack>; -def Hexagon_S2_vsatwuh_nopack: +def HEXAGON_S2_vsatwuh_nopack: di_SInst_di <"vsatwuh", int_hexagon_S2_vsatwuh_nopack>; // STYPE / PERM / Vector shuffle. -def Hexagon_S2_shuffeb: +def HEXAGON_S2_shuffeb: di_SInst_didi <"shuffeb", int_hexagon_S2_shuffeb>; -def Hexagon_S2_shuffeh: +def HEXAGON_S2_shuffeh: di_SInst_didi <"shuffeh", int_hexagon_S2_shuffeh>; -def Hexagon_S2_shuffob: +def HEXAGON_S2_shuffob: di_SInst_didi <"shuffob", int_hexagon_S2_shuffob>; -def Hexagon_S2_shuffoh: +def HEXAGON_S2_shuffoh: di_SInst_didi <"shuffoh", int_hexagon_S2_shuffoh>; // STYPE / PERM / Vector splat bytes. -def Hexagon_S2_vsplatrb: +def HEXAGON_S2_vsplatrb: si_SInst_si <"vsplatb", int_hexagon_S2_vsplatrb>; // STYPE / PERM / Vector splat halfwords. -def Hexagon_S2_vsplatrh: +def HEXAGON_S2_vsplatrh: di_SInst_si <"vsplath", int_hexagon_S2_vsplatrh>; // STYPE / PERM / Vector splice. -def HEXAGON_S2_vsplicerb: +def Hexagon_S2_vsplicerb: di_SInst_didiqi <"vspliceb",int_hexagon_S2_vsplicerb>; -def HEXAGON_S2_vspliceib: +def Hexagon_S2_vspliceib: di_SInst_didiu3 <"vspliceb",int_hexagon_S2_vspliceib>; // STYPE / PERM / Sign extend. -def Hexagon_S2_vsxtbh: +def HEXAGON_S2_vsxtbh: di_SInst_si <"vsxtbh", int_hexagon_S2_vsxtbh>; -def Hexagon_S2_vsxthw: +def HEXAGON_S2_vsxthw: di_SInst_si <"vsxthw", int_hexagon_S2_vsxthw>; // STYPE / PERM / Truncate. -def Hexagon_S2_vtrunehb: +def HEXAGON_S2_vtrunehb: si_SInst_di <"vtrunehb",int_hexagon_S2_vtrunehb>; -def Hexagon_S2_vtrunohb: +def HEXAGON_S2_vtrunohb: si_SInst_di <"vtrunohb",int_hexagon_S2_vtrunohb>; -def Hexagon_S2_vtrunewh: +def HEXAGON_S2_vtrunewh: di_SInst_didi <"vtrunewh",int_hexagon_S2_vtrunewh>; -def Hexagon_S2_vtrunowh: +def HEXAGON_S2_vtrunowh: di_SInst_didi <"vtrunowh",int_hexagon_S2_vtrunowh>; // STYPE / PERM / Zero extend. -def Hexagon_S2_vzxtbh: +def HEXAGON_S2_vzxtbh: di_SInst_si <"vzxtbh", int_hexagon_S2_vzxtbh>; -def Hexagon_S2_vzxthw: +def HEXAGON_S2_vzxthw: di_SInst_si <"vzxthw", int_hexagon_S2_vzxthw>; @@ -3172,17 +3198,17 @@ def Hexagon_S2_vzxthw: *********************************************************************/ // STYPE / PRED / Mask generate from predicate. -def Hexagon_C2_mask: +def HEXAGON_C2_mask: di_SInst_qi <"mask", int_hexagon_C2_mask>; // STYPE / PRED / Predicate transfer. -def Hexagon_C2_tfrpr: +def HEXAGON_C2_tfrpr: si_SInst_qi <"", int_hexagon_C2_tfrpr>; -def Hexagon_C2_tfrrp: +def HEXAGON_C2_tfrrp: qi_SInst_si <"", int_hexagon_C2_tfrrp>; // STYPE / PRED / Viterbi pack even and odd predicate bits. -def Hexagon_C2_vitpack: +def HEXAGON_C2_vitpack: si_SInst_qiqi <"vitpack",int_hexagon_C2_vitpack>; @@ -3191,202 +3217,202 @@ def Hexagon_C2_vitpack: *********************************************************************/ // STYPE / SHIFT / Shift by immediate. -def Hexagon_S2_asl_i_r: +def HEXAGON_S2_asl_i_r: si_SInst_siu5 <"asl", int_hexagon_S2_asl_i_r>; -def Hexagon_S2_asr_i_r: +def HEXAGON_S2_asr_i_r: si_SInst_siu5 <"asr", int_hexagon_S2_asr_i_r>; -def Hexagon_S2_lsr_i_r: +def HEXAGON_S2_lsr_i_r: si_SInst_siu5 <"lsr", int_hexagon_S2_lsr_i_r>; -def Hexagon_S2_asl_i_p: +def HEXAGON_S2_asl_i_p: di_SInst_diu6 <"asl", int_hexagon_S2_asl_i_p>; -def Hexagon_S2_asr_i_p: +def HEXAGON_S2_asr_i_p: di_SInst_diu6 <"asr", int_hexagon_S2_asr_i_p>; -def Hexagon_S2_lsr_i_p: +def HEXAGON_S2_lsr_i_p: di_SInst_diu6 <"lsr", int_hexagon_S2_lsr_i_p>; // STYPE / SHIFT / Shift by immediate and accumulate. -def Hexagon_S2_asl_i_r_acc: +def HEXAGON_S2_asl_i_r_acc: si_SInst_sisiu5_acc <"asl", int_hexagon_S2_asl_i_r_acc>; -def Hexagon_S2_asr_i_r_acc: +def HEXAGON_S2_asr_i_r_acc: si_SInst_sisiu5_acc <"asr", int_hexagon_S2_asr_i_r_acc>; -def Hexagon_S2_lsr_i_r_acc: +def HEXAGON_S2_lsr_i_r_acc: si_SInst_sisiu5_acc <"lsr", int_hexagon_S2_lsr_i_r_acc>; -def Hexagon_S2_asl_i_r_nac: +def HEXAGON_S2_asl_i_r_nac: si_SInst_sisiu5_nac <"asl", int_hexagon_S2_asl_i_r_nac>; -def Hexagon_S2_asr_i_r_nac: +def HEXAGON_S2_asr_i_r_nac: si_SInst_sisiu5_nac <"asr", int_hexagon_S2_asr_i_r_nac>; -def Hexagon_S2_lsr_i_r_nac: +def HEXAGON_S2_lsr_i_r_nac: si_SInst_sisiu5_nac <"lsr", int_hexagon_S2_lsr_i_r_nac>; -def Hexagon_S2_asl_i_p_acc: +def HEXAGON_S2_asl_i_p_acc: di_SInst_didiu6_acc <"asl", int_hexagon_S2_asl_i_p_acc>; -def Hexagon_S2_asr_i_p_acc: +def HEXAGON_S2_asr_i_p_acc: di_SInst_didiu6_acc <"asr", int_hexagon_S2_asr_i_p_acc>; -def Hexagon_S2_lsr_i_p_acc: +def HEXAGON_S2_lsr_i_p_acc: di_SInst_didiu6_acc <"lsr", int_hexagon_S2_lsr_i_p_acc>; -def Hexagon_S2_asl_i_p_nac: +def HEXAGON_S2_asl_i_p_nac: di_SInst_didiu6_nac <"asl", int_hexagon_S2_asl_i_p_nac>; -def Hexagon_S2_asr_i_p_nac: +def HEXAGON_S2_asr_i_p_nac: di_SInst_didiu6_nac <"asr", int_hexagon_S2_asr_i_p_nac>; -def Hexagon_S2_lsr_i_p_nac: +def HEXAGON_S2_lsr_i_p_nac: di_SInst_didiu6_nac <"lsr", int_hexagon_S2_lsr_i_p_nac>; // STYPE / SHIFT / Shift by immediate and add. -def Hexagon_S2_addasl_rrri: +def HEXAGON_S2_addasl_rrri: si_SInst_sisiu3 <"addasl", int_hexagon_S2_addasl_rrri>; // STYPE / SHIFT / Shift by immediate and logical. -def Hexagon_S2_asl_i_r_and: +def HEXAGON_S2_asl_i_r_and: si_SInst_sisiu5_and <"asl", int_hexagon_S2_asl_i_r_and>; -def Hexagon_S2_asr_i_r_and: +def HEXAGON_S2_asr_i_r_and: si_SInst_sisiu5_and <"asr", int_hexagon_S2_asr_i_r_and>; -def Hexagon_S2_lsr_i_r_and: +def HEXAGON_S2_lsr_i_r_and: si_SInst_sisiu5_and <"lsr", int_hexagon_S2_lsr_i_r_and>; -def Hexagon_S2_asl_i_r_xacc: +def HEXAGON_S2_asl_i_r_xacc: si_SInst_sisiu5_xor <"asl", int_hexagon_S2_asl_i_r_xacc>; -def Hexagon_S2_lsr_i_r_xacc: +def HEXAGON_S2_lsr_i_r_xacc: si_SInst_sisiu5_xor <"lsr", int_hexagon_S2_lsr_i_r_xacc>; -def Hexagon_S2_asl_i_r_or: +def HEXAGON_S2_asl_i_r_or: si_SInst_sisiu5_or <"asl", int_hexagon_S2_asl_i_r_or>; -def Hexagon_S2_asr_i_r_or: +def HEXAGON_S2_asr_i_r_or: si_SInst_sisiu5_or <"asr", int_hexagon_S2_asr_i_r_or>; -def Hexagon_S2_lsr_i_r_or: +def HEXAGON_S2_lsr_i_r_or: si_SInst_sisiu5_or <"lsr", int_hexagon_S2_lsr_i_r_or>; -def Hexagon_S2_asl_i_p_and: +def HEXAGON_S2_asl_i_p_and: di_SInst_didiu6_and <"asl", int_hexagon_S2_asl_i_p_and>; -def Hexagon_S2_asr_i_p_and: +def HEXAGON_S2_asr_i_p_and: di_SInst_didiu6_and <"asr", int_hexagon_S2_asr_i_p_and>; -def Hexagon_S2_lsr_i_p_and: +def HEXAGON_S2_lsr_i_p_and: di_SInst_didiu6_and <"lsr", int_hexagon_S2_lsr_i_p_and>; -def Hexagon_S2_asl_i_p_xacc: +def HEXAGON_S2_asl_i_p_xacc: di_SInst_didiu6_xor <"asl", int_hexagon_S2_asl_i_p_xacc>; -def Hexagon_S2_lsr_i_p_xacc: +def HEXAGON_S2_lsr_i_p_xacc: di_SInst_didiu6_xor <"lsr", int_hexagon_S2_lsr_i_p_xacc>; -def Hexagon_S2_asl_i_p_or: +def HEXAGON_S2_asl_i_p_or: di_SInst_didiu6_or <"asl", int_hexagon_S2_asl_i_p_or>; -def Hexagon_S2_asr_i_p_or: +def HEXAGON_S2_asr_i_p_or: di_SInst_didiu6_or <"asr", int_hexagon_S2_asr_i_p_or>; -def Hexagon_S2_lsr_i_p_or: +def HEXAGON_S2_lsr_i_p_or: di_SInst_didiu6_or <"lsr", int_hexagon_S2_lsr_i_p_or>; // STYPE / SHIFT / Shift right by immediate with rounding. -def Hexagon_S2_asr_i_r_rnd: +def HEXAGON_S2_asr_i_r_rnd: si_SInst_siu5_rnd <"asr", int_hexagon_S2_asr_i_r_rnd>; -def Hexagon_S2_asr_i_r_rnd_goodsyntax: +def HEXAGON_S2_asr_i_r_rnd_goodsyntax: si_SInst_siu5 <"asrrnd", int_hexagon_S2_asr_i_r_rnd_goodsyntax>; // STYPE / SHIFT / Shift left by immediate with saturation. -def Hexagon_S2_asl_i_r_sat: +def HEXAGON_S2_asl_i_r_sat: si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_i_r_sat>; // STYPE / SHIFT / Shift by register. -def Hexagon_S2_asl_r_r: +def HEXAGON_S2_asl_r_r: si_SInst_sisi <"asl", int_hexagon_S2_asl_r_r>; -def Hexagon_S2_asr_r_r: +def HEXAGON_S2_asr_r_r: si_SInst_sisi <"asr", int_hexagon_S2_asr_r_r>; -def Hexagon_S2_lsl_r_r: +def HEXAGON_S2_lsl_r_r: si_SInst_sisi <"lsl", int_hexagon_S2_lsl_r_r>; -def Hexagon_S2_lsr_r_r: +def HEXAGON_S2_lsr_r_r: si_SInst_sisi <"lsr", int_hexagon_S2_lsr_r_r>; -def Hexagon_S2_asl_r_p: +def HEXAGON_S2_asl_r_p: di_SInst_disi <"asl", int_hexagon_S2_asl_r_p>; -def Hexagon_S2_asr_r_p: +def HEXAGON_S2_asr_r_p: di_SInst_disi <"asr", int_hexagon_S2_asr_r_p>; -def Hexagon_S2_lsl_r_p: +def HEXAGON_S2_lsl_r_p: di_SInst_disi <"lsl", int_hexagon_S2_lsl_r_p>; -def Hexagon_S2_lsr_r_p: +def HEXAGON_S2_lsr_r_p: di_SInst_disi <"lsr", int_hexagon_S2_lsr_r_p>; // STYPE / SHIFT / Shift by register and accumulate. -def Hexagon_S2_asl_r_r_acc: +def HEXAGON_S2_asl_r_r_acc: si_SInst_sisisi_acc <"asl", int_hexagon_S2_asl_r_r_acc>; -def Hexagon_S2_asr_r_r_acc: +def HEXAGON_S2_asr_r_r_acc: si_SInst_sisisi_acc <"asr", int_hexagon_S2_asr_r_r_acc>; -def Hexagon_S2_lsl_r_r_acc: +def HEXAGON_S2_lsl_r_r_acc: si_SInst_sisisi_acc <"lsl", int_hexagon_S2_lsl_r_r_acc>; -def Hexagon_S2_lsr_r_r_acc: +def HEXAGON_S2_lsr_r_r_acc: si_SInst_sisisi_acc <"lsr", int_hexagon_S2_lsr_r_r_acc>; -def Hexagon_S2_asl_r_p_acc: +def HEXAGON_S2_asl_r_p_acc: di_SInst_didisi_acc <"asl", int_hexagon_S2_asl_r_p_acc>; -def Hexagon_S2_asr_r_p_acc: +def HEXAGON_S2_asr_r_p_acc: di_SInst_didisi_acc <"asr", int_hexagon_S2_asr_r_p_acc>; -def Hexagon_S2_lsl_r_p_acc: +def HEXAGON_S2_lsl_r_p_acc: di_SInst_didisi_acc <"lsl", int_hexagon_S2_lsl_r_p_acc>; -def Hexagon_S2_lsr_r_p_acc: +def HEXAGON_S2_lsr_r_p_acc: di_SInst_didisi_acc <"lsr", int_hexagon_S2_lsr_r_p_acc>; -def Hexagon_S2_asl_r_r_nac: +def HEXAGON_S2_asl_r_r_nac: si_SInst_sisisi_nac <"asl", int_hexagon_S2_asl_r_r_nac>; -def Hexagon_S2_asr_r_r_nac: +def HEXAGON_S2_asr_r_r_nac: si_SInst_sisisi_nac <"asr", int_hexagon_S2_asr_r_r_nac>; -def Hexagon_S2_lsl_r_r_nac: +def HEXAGON_S2_lsl_r_r_nac: si_SInst_sisisi_nac <"lsl", int_hexagon_S2_lsl_r_r_nac>; -def Hexagon_S2_lsr_r_r_nac: +def HEXAGON_S2_lsr_r_r_nac: si_SInst_sisisi_nac <"lsr", int_hexagon_S2_lsr_r_r_nac>; -def Hexagon_S2_asl_r_p_nac: +def HEXAGON_S2_asl_r_p_nac: di_SInst_didisi_nac <"asl", int_hexagon_S2_asl_r_p_nac>; -def Hexagon_S2_asr_r_p_nac: +def HEXAGON_S2_asr_r_p_nac: di_SInst_didisi_nac <"asr", int_hexagon_S2_asr_r_p_nac>; -def Hexagon_S2_lsl_r_p_nac: +def HEXAGON_S2_lsl_r_p_nac: di_SInst_didisi_nac <"lsl", int_hexagon_S2_lsl_r_p_nac>; -def Hexagon_S2_lsr_r_p_nac: +def HEXAGON_S2_lsr_r_p_nac: di_SInst_didisi_nac <"lsr", int_hexagon_S2_lsr_r_p_nac>; // STYPE / SHIFT / Shift by register and logical. -def Hexagon_S2_asl_r_r_and: +def HEXAGON_S2_asl_r_r_and: si_SInst_sisisi_and <"asl", int_hexagon_S2_asl_r_r_and>; -def Hexagon_S2_asr_r_r_and: +def HEXAGON_S2_asr_r_r_and: si_SInst_sisisi_and <"asr", int_hexagon_S2_asr_r_r_and>; -def Hexagon_S2_lsl_r_r_and: +def HEXAGON_S2_lsl_r_r_and: si_SInst_sisisi_and <"lsl", int_hexagon_S2_lsl_r_r_and>; -def Hexagon_S2_lsr_r_r_and: +def HEXAGON_S2_lsr_r_r_and: si_SInst_sisisi_and <"lsr", int_hexagon_S2_lsr_r_r_and>; -def Hexagon_S2_asl_r_r_or: +def HEXAGON_S2_asl_r_r_or: si_SInst_sisisi_or <"asl", int_hexagon_S2_asl_r_r_or>; -def Hexagon_S2_asr_r_r_or: +def HEXAGON_S2_asr_r_r_or: si_SInst_sisisi_or <"asr", int_hexagon_S2_asr_r_r_or>; -def Hexagon_S2_lsl_r_r_or: +def HEXAGON_S2_lsl_r_r_or: si_SInst_sisisi_or <"lsl", int_hexagon_S2_lsl_r_r_or>; -def Hexagon_S2_lsr_r_r_or: +def HEXAGON_S2_lsr_r_r_or: si_SInst_sisisi_or <"lsr", int_hexagon_S2_lsr_r_r_or>; -def Hexagon_S2_asl_r_p_and: +def HEXAGON_S2_asl_r_p_and: di_SInst_didisi_and <"asl", int_hexagon_S2_asl_r_p_and>; -def Hexagon_S2_asr_r_p_and: +def HEXAGON_S2_asr_r_p_and: di_SInst_didisi_and <"asr", int_hexagon_S2_asr_r_p_and>; -def Hexagon_S2_lsl_r_p_and: +def HEXAGON_S2_lsl_r_p_and: di_SInst_didisi_and <"lsl", int_hexagon_S2_lsl_r_p_and>; -def Hexagon_S2_lsr_r_p_and: +def HEXAGON_S2_lsr_r_p_and: di_SInst_didisi_and <"lsr", int_hexagon_S2_lsr_r_p_and>; -def Hexagon_S2_asl_r_p_or: +def HEXAGON_S2_asl_r_p_or: di_SInst_didisi_or <"asl", int_hexagon_S2_asl_r_p_or>; -def Hexagon_S2_asr_r_p_or: +def HEXAGON_S2_asr_r_p_or: di_SInst_didisi_or <"asr", int_hexagon_S2_asr_r_p_or>; -def Hexagon_S2_lsl_r_p_or: +def HEXAGON_S2_lsl_r_p_or: di_SInst_didisi_or <"lsl", int_hexagon_S2_lsl_r_p_or>; -def Hexagon_S2_lsr_r_p_or: +def HEXAGON_S2_lsr_r_p_or: di_SInst_didisi_or <"lsr", int_hexagon_S2_lsr_r_p_or>; // STYPE / SHIFT / Shift by register with saturation. -def Hexagon_S2_asl_r_r_sat: +def HEXAGON_S2_asl_r_r_sat: si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_r_r_sat>; -def Hexagon_S2_asr_r_r_sat: +def HEXAGON_S2_asr_r_r_sat: si_SInst_sisi_sat <"asr", int_hexagon_S2_asr_r_r_sat>; // STYPE / SHIFT / Table Index. -def HEXAGON_S2_tableidxb_goodsyntax: +def Hexagon_S2_tableidxb_goodsyntax: si_MInst_sisiu4u5 <"tableidxb",int_hexagon_S2_tableidxb_goodsyntax>; -def HEXAGON_S2_tableidxd_goodsyntax: +def Hexagon_S2_tableidxd_goodsyntax: si_MInst_sisiu4u5 <"tableidxd",int_hexagon_S2_tableidxd_goodsyntax>; -def HEXAGON_S2_tableidxh_goodsyntax: +def Hexagon_S2_tableidxh_goodsyntax: si_MInst_sisiu4u5 <"tableidxh",int_hexagon_S2_tableidxh_goodsyntax>; -def HEXAGON_S2_tableidxw_goodsyntax: +def Hexagon_S2_tableidxw_goodsyntax: si_MInst_sisiu4u5 <"tableidxw",int_hexagon_S2_tableidxw_goodsyntax>; @@ -3396,29 +3422,29 @@ def HEXAGON_S2_tableidxw_goodsyntax: // STYPE / VH / Vector absolute value halfwords. // Rdd64=vabsh(Rss64) -def Hexagon_A2_vabsh: +def HEXAGON_A2_vabsh: di_SInst_di <"vabsh", int_hexagon_A2_vabsh>; -def Hexagon_A2_vabshsat: +def HEXAGON_A2_vabshsat: di_SInst_di_sat <"vabsh", int_hexagon_A2_vabshsat>; // STYPE / VH / Vector shift halfwords by immediate. // Rdd64=v[asl/asr/lsr]h(Rss64,Rt32) -def Hexagon_S2_asl_i_vh: +def HEXAGON_S2_asl_i_vh: di_SInst_disi <"vaslh", int_hexagon_S2_asl_i_vh>; -def Hexagon_S2_asr_i_vh: +def HEXAGON_S2_asr_i_vh: di_SInst_disi <"vasrh", int_hexagon_S2_asr_i_vh>; -def Hexagon_S2_lsr_i_vh: +def HEXAGON_S2_lsr_i_vh: di_SInst_disi <"vlsrh", int_hexagon_S2_lsr_i_vh>; // STYPE / VH / Vector shift halfwords by register. // Rdd64=v[asl/asr/lsl/lsr]w(Rss64,Rt32) -def Hexagon_S2_asl_r_vh: +def HEXAGON_S2_asl_r_vh: di_SInst_disi <"vaslh", int_hexagon_S2_asl_r_vh>; -def Hexagon_S2_asr_r_vh: +def HEXAGON_S2_asr_r_vh: di_SInst_disi <"vasrh", int_hexagon_S2_asr_r_vh>; -def Hexagon_S2_lsl_r_vh: +def HEXAGON_S2_lsl_r_vh: di_SInst_disi <"vlslh", int_hexagon_S2_lsl_r_vh>; -def Hexagon_S2_lsr_r_vh: +def HEXAGON_S2_lsr_r_vh: di_SInst_disi <"vlsrh", int_hexagon_S2_lsr_r_vh>; @@ -3427,36 +3453,41 @@ def Hexagon_S2_lsr_r_vh: *********************************************************************/ // STYPE / VW / Vector absolute value words. -def Hexagon_A2_vabsw: +def HEXAGON_A2_vabsw: di_SInst_di <"vabsw", int_hexagon_A2_vabsw>; -def Hexagon_A2_vabswsat: +def HEXAGON_A2_vabswsat: di_SInst_di_sat <"vabsw", int_hexagon_A2_vabswsat>; // STYPE / VW / Vector shift words by immediate. // Rdd64=v[asl/vsl]w(Rss64,Rt32) -def Hexagon_S2_asl_i_vw: +def HEXAGON_S2_asl_i_vw: di_SInst_disi <"vaslw", int_hexagon_S2_asl_i_vw>; -def Hexagon_S2_asr_i_vw: +def HEXAGON_S2_asr_i_vw: di_SInst_disi <"vasrw", int_hexagon_S2_asr_i_vw>; -def Hexagon_S2_lsr_i_vw: +def HEXAGON_S2_lsr_i_vw: di_SInst_disi <"vlsrw", int_hexagon_S2_lsr_i_vw>; // STYPE / VW / Vector shift words by register. // Rdd64=v[asl/vsl]w(Rss64,Rt32) -def Hexagon_S2_asl_r_vw: +def HEXAGON_S2_asl_r_vw: di_SInst_disi <"vaslw", int_hexagon_S2_asl_r_vw>; -def Hexagon_S2_asr_r_vw: +def HEXAGON_S2_asr_r_vw: di_SInst_disi <"vasrw", int_hexagon_S2_asr_r_vw>; -def Hexagon_S2_lsl_r_vw: +def HEXAGON_S2_lsl_r_vw: di_SInst_disi <"vlslw", int_hexagon_S2_lsl_r_vw>; -def Hexagon_S2_lsr_r_vw: +def HEXAGON_S2_lsr_r_vw: di_SInst_disi <"vlsrw", int_hexagon_S2_lsr_r_vw>; // STYPE / VW / Vector shift words with truncate and pack. -def Hexagon_S2_asr_r_svw_trun: +def HEXAGON_S2_asr_r_svw_trun: si_SInst_disi <"vasrw", int_hexagon_S2_asr_r_svw_trun>; -def Hexagon_S2_asr_i_svw_trun: +def HEXAGON_S2_asr_i_svw_trun: si_SInst_diu5 <"vasrw", int_hexagon_S2_asr_i_svw_trun>; +// LD / Circular loads. +def HEXAGON_circ_ldd: + di_LDInstPI_diu4 <"circ_ldd", int_hexagon_circ_ldd>; + include "HexagonIntrinsicsV3.td" include "HexagonIntrinsicsV4.td" +include "HexagonIntrinsicsV5.td" diff --git a/lib/Target/Hexagon/HexagonIntrinsicsDerived.td b/lib/Target/Hexagon/HexagonIntrinsicsDerived.td index 68eaf68480..2788101d5a 100644 --- a/lib/Target/Hexagon/HexagonIntrinsicsDerived.td +++ b/lib/Target/Hexagon/HexagonIntrinsicsDerived.td @@ -12,18 +12,28 @@ // Optimized with intrinisics accumulates // def : Pat <(mul DoubleRegs:$src1, DoubleRegs:$src2), - (COMBINE_rr - (Hexagon_M2_maci - (Hexagon_M2_maci (EXTRACT_SUBREG (MPYU64 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg)), - subreg_hireg), - (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)), - (EXTRACT_SUBREG (MPYU64 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg)), - subreg_loreg))>; + (i64 + (COMBINE_rr + (HEXAGON_M2_maci + (HEXAGON_M2_maci + (i32 + (EXTRACT_SUBREG + (i64 + (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), + subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), + subreg_loreg)))), + subreg_hireg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg))), + (i32 + (EXTRACT_SUBREG + (i64 + (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), + subreg_loreg)))), subreg_loreg))))>; diff --git a/lib/Target/Hexagon/HexagonIntrinsicsV5.td b/lib/Target/Hexagon/HexagonIntrinsicsV5.td new file mode 100644 index 0000000000..4746b4c2b6 --- /dev/null +++ b/lib/Target/Hexagon/HexagonIntrinsicsV5.td @@ -0,0 +1,395 @@ +class sf_SInst_sf<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1))]>; + +class si_SInst_sf<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1))]>; + +class sf_SInst_si<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1))]>; + +class sf_SInst_di<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>; + +class sf_SInst_df<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>; + +class si_SInst_df<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>; + +class df_SInst_sf<string opc, Intrinsic IntID> + : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>; + +class di_SInst_sf<string opc, Intrinsic IntID> + : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>; + +class df_SInst_si<string opc, Intrinsic IntID> + : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>; + +class df_SInst_df<string opc, Intrinsic IntID> + : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>; + +class di_SInst_df<string opc, Intrinsic IntID> + : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>; + + +class df_SInst_di<string opc, Intrinsic IntID> + : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>; + +class sf_MInst_sfsf<string opc, Intrinsic IntID> + : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>; + +class df_MInst_dfdf<string opc, Intrinsic IntID> + : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>; + +class qi_ALU64_dfdf<string opc, Intrinsic IntID> + : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")), + [(set PredRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>; + +class qi_ALU64_dfu5<string opc, Intrinsic IntID> + : ALU64_ri<(outs PredRegs:$dst), (ins DoubleRegs:$src1, u5Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")), + [(set PredRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>; + + +class sf_MInst_sfsfsf_acc<string opc, Intrinsic IntID> + : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, + IntRegs:$dst2), + !strconcat("$dst += ", !strconcat(opc , + "($src1, $src2)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1, + IntRegs:$src2, IntRegs:$dst2))], + "$dst2 = $dst">; + +class sf_MInst_sfsfsf_nac<string opc, Intrinsic IntID> + : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, + IntRegs:$dst2), + !strconcat("$dst -= ", !strconcat(opc , + "($src1, $src2)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1, + IntRegs:$src2, IntRegs:$dst2))], + "$dst2 = $dst">; + + +class sf_MInst_sfsfsfsi_sc<string opc, Intrinsic IntID> + : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1, + IntRegs:$src2, IntRegs:$src3), + !strconcat("$dst += ", !strconcat(opc , + "($src1, $src2, $src3):scale")), + [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1, + IntRegs:$src2, IntRegs:$src3))], + "$dst2 = $dst">; + +class sf_MInst_sfsfsf_acc_lib<string opc, Intrinsic IntID> + : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, + IntRegs:$dst2), + !strconcat("$dst += ", !strconcat(opc , + "($src1, $src2):lib")), + [(set IntRegs:$dst, (IntID IntRegs:$src1, + IntRegs:$src2, IntRegs:$dst2))], + "$dst2 = $dst">; + +class sf_MInst_sfsfsf_nac_lib<string opc, Intrinsic IntID> + : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, + IntRegs:$dst2), + !strconcat("$dst -= ", !strconcat(opc , + "($src1, $src2):lib")), + [(set IntRegs:$dst, (IntID IntRegs:$src1, + IntRegs:$src2, IntRegs:$dst2))], + "$dst2 = $dst">; + +class df_MInst_dfdfdf_acc<string opc, Intrinsic IntID> + : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, + DoubleRegs:$dst2), + !strconcat("$dst += ", !strconcat(opc , + "($src1, $src2)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, + DoubleRegs:$src2, DoubleRegs:$dst2))], + "$dst2 = $dst">; + +class df_MInst_dfdfdf_nac<string opc, Intrinsic IntID> + : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, + DoubleRegs:$dst2), + !strconcat("$dst -= ", !strconcat(opc , + "($src1, $src2)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, + DoubleRegs:$src2, DoubleRegs:$dst2))], + "$dst2 = $dst">; + + +class df_MInst_dfdfdfsi_sc<string opc, Intrinsic IntID> + : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1, + DoubleRegs:$src2, IntRegs:$src3), + !strconcat("$dst += ", !strconcat(opc , + "($src1, $src2, $src3):scale")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1, + DoubleRegs:$src2, IntRegs:$src3))], + "$dst2 = $dst">; + +class df_MInst_dfdfdf_acc_lib<string opc, Intrinsic IntID> + : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, + DoubleRegs:$dst2), + !strconcat("$dst += ", !strconcat(opc , + "($src1, $src2):lib")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, + DoubleRegs:$src2, DoubleRegs:$dst2))], + "$dst2 = $dst">; + +class df_MInst_dfdfdf_nac_lib<string opc, Intrinsic IntID> + : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, + DoubleRegs:$dst2), + !strconcat("$dst -= ", !strconcat(opc , + "($src1, $src2):lib")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, + DoubleRegs:$src2, DoubleRegs:$dst2))], + "$dst2 = $dst">; + +class qi_SInst_sfsf<string opc, Intrinsic IntID> + : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")), + [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>; + +class qi_SInst_sfu5<string opc, Intrinsic IntID> + : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")), + [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>; + +class sf_ALU64_u10_pos<string opc, Intrinsic IntID> + : ALU64_ri<(outs IntRegs:$dst), (ins u10Imm:$src1), + !strconcat("$dst = ", !strconcat(opc , "#$src1):pos")), + [(set IntRegs:$dst, (IntID imm:$src1))]>; + +class sf_ALU64_u10_neg<string opc, Intrinsic IntID> + : ALU64_ri<(outs IntRegs:$dst), (ins u10Imm:$src1), + !strconcat("$dst = ", !strconcat(opc , "#$src1):neg")), + [(set IntRegs:$dst, (IntID imm:$src1))]>; + +class df_ALU64_u10_pos<string opc, Intrinsic IntID> + : ALU64_ri<(outs DoubleRegs:$dst), (ins u10Imm:$src1), + !strconcat("$dst = ", !strconcat(opc , "#$src1):pos")), + [(set DoubleRegs:$dst, (IntID imm:$src1))]>; + +class df_ALU64_u10_neg<string opc, Intrinsic IntID> + : ALU64_ri<(outs DoubleRegs:$dst), (ins u10Imm:$src1), + !strconcat("$dst = ", !strconcat(opc , "#$src1):neg")), + [(set DoubleRegs:$dst, (IntID imm:$src1))]>; + +class di_MInst_diu6<string opc, Intrinsic IntID> + : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>; + +class di_MInst_diu4_rnd<string opc, Intrinsic IntID> + : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):rnd")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>; + +class si_MInst_diu4_rnd_sat<string opc, Intrinsic IntID> + : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):rnd:sat")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>; + +class si_SInst_diu4_sat<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):sat")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>; + + +def HEXAGON_C4_fastcorner9: + qi_SInst_qiqi <"fastcorner9", int_hexagon_C4_fastcorner9>; +def HEXAGON_C4_fastcorner9_not: + qi_SInst_qiqi <"!fastcorner9", int_hexagon_C4_fastcorner9_not>; +def HEXAGON_M5_vrmpybuu: + di_MInst_didi <"vrmpybu", int_hexagon_M5_vrmpybuu>; +def HEXAGON_M5_vrmacbuu: + di_MInst_dididi_acc <"vrmpybu", int_hexagon_M5_vrmacbuu>; +def HEXAGON_M5_vrmpybsu: + di_MInst_didi <"vrmpybsu", int_hexagon_M5_vrmpybsu>; +def HEXAGON_M5_vrmacbsu: + di_MInst_dididi_acc <"vrmpybsu", int_hexagon_M5_vrmacbsu>; +def HEXAGON_M5_vmpybuu: + di_MInst_sisi <"vmpybu", int_hexagon_M5_vmpybuu>; +def HEXAGON_M5_vmpybsu: + di_MInst_sisi <"vmpybsu", int_hexagon_M5_vmpybsu>; +def HEXAGON_M5_vmacbuu: + di_MInst_disisi_acc <"vmpybu", int_hexagon_M5_vmacbuu>; +def HEXAGON_M5_vmacbsu: + di_MInst_disisi_acc <"vmpybsu", int_hexagon_M5_vmacbsu>; +def HEXAGON_M5_vdmpybsu: + di_MInst_didi_sat <"vdmpybsu", int_hexagon_M5_vdmpybsu>; +def HEXAGON_M5_vdmacbsu: + di_MInst_dididi_acc_sat <"vdmpybsu", int_hexagon_M5_vdmacbsu>; +def HEXAGON_A5_vaddhubs: + si_SInst_didi_sat <"vaddhub", int_hexagon_A5_vaddhubs>; +def HEXAGON_S5_popcountp: + si_SInst_di <"popcount", int_hexagon_S5_popcountp>; +def HEXAGON_S5_asrhub_rnd_sat_goodsyntax: + si_MInst_diu4_rnd_sat <"vasrhub", int_hexagon_S5_asrhub_rnd_sat_goodsyntax>; +def HEXAGON_S5_asrhub_sat: + si_SInst_diu4_sat <"vasrhub", int_hexagon_S5_asrhub_sat>; +def HEXAGON_S5_vasrhrnd_goodsyntax: + di_MInst_diu4_rnd <"vasrh", int_hexagon_S5_vasrhrnd_goodsyntax>; +def HEXAGON_S2_asr_i_p_rnd: + di_SInst_diu6 <"asr", int_hexagon_S2_asr_i_p_rnd>; +def HEXAGON_S2_asr_i_p_rnd_goodsyntax: + di_MInst_diu6 <"asrrnd", int_hexagon_S2_asr_i_p_rnd_goodsyntax>; +def HEXAGON_F2_sfadd: + sf_MInst_sfsf <"sfadd", int_hexagon_F2_sfadd>; +def HEXAGON_F2_sfsub: + sf_MInst_sfsf <"sfsub", int_hexagon_F2_sfsub>; +def HEXAGON_F2_sfmpy: + sf_MInst_sfsf <"sfmpy", int_hexagon_F2_sfmpy>; +def HEXAGON_F2_sffma: + sf_MInst_sfsfsf_acc <"sfmpy", int_hexagon_F2_sffma>; +def HEXAGON_F2_sffma_sc: + sf_MInst_sfsfsfsi_sc <"sfmpy", int_hexagon_F2_sffma_sc>; +def HEXAGON_F2_sffms: + sf_MInst_sfsfsf_nac <"sfmpy", int_hexagon_F2_sffms>; +def HEXAGON_F2_sffma_lib: + sf_MInst_sfsfsf_acc_lib <"sfmpy", int_hexagon_F2_sffma_lib>; +def HEXAGON_F2_sffms_lib: + sf_MInst_sfsfsf_nac_lib <"sfmpy", int_hexagon_F2_sffms_lib>; +def HEXAGON_F2_sfcmpeq: + qi_SInst_sfsf <"sfcmp.eq", int_hexagon_F2_sfcmpeq>; +def HEXAGON_F2_sfcmpgt: + qi_SInst_sfsf <"sfcmp.gt", int_hexagon_F2_sfcmpgt>; +def HEXAGON_F2_sfcmpge: + qi_SInst_sfsf <"sfcmp.ge", int_hexagon_F2_sfcmpge>; +def HEXAGON_F2_sfcmpuo: + qi_SInst_sfsf <"sfcmp.uo", int_hexagon_F2_sfcmpuo>; +def HEXAGON_F2_sfmax: + sf_MInst_sfsf <"sfmax", int_hexagon_F2_sfmax>; +def HEXAGON_F2_sfmin: + sf_MInst_sfsf <"sfmin", int_hexagon_F2_sfmin>; +def HEXAGON_F2_sfclass: + qi_SInst_sfu5 <"sfclass", int_hexagon_F2_sfclass>; +def HEXAGON_F2_sfimm_p: + sf_ALU64_u10_pos <"sfmake", int_hexagon_F2_sfimm_p>; +def HEXAGON_F2_sfimm_n: + sf_ALU64_u10_neg <"sfmake", int_hexagon_F2_sfimm_n>; +def HEXAGON_F2_sffixupn: + sf_MInst_sfsf <"sffixupn", int_hexagon_F2_sffixupn>; +def HEXAGON_F2_sffixupd: + sf_MInst_sfsf <"sffixupd", int_hexagon_F2_sffixupd>; +def HEXAGON_F2_sffixupr: + sf_SInst_sf <"sffixupr", int_hexagon_F2_sffixupr>; +def HEXAGON_F2_dfadd: + df_MInst_dfdf <"dfadd", int_hexagon_F2_dfadd>; +def HEXAGON_F2_dfsub: + df_MInst_dfdf <"dfsub", int_hexagon_F2_dfsub>; +def HEXAGON_F2_dfmpy: + df_MInst_dfdf <"dfmpy", int_hexagon_F2_dfmpy>; +def HEXAGON_F2_dffma: + df_MInst_dfdfdf_acc <"dfmpy", int_hexagon_F2_dffma>; +def HEXAGON_F2_dffms: + df_MInst_dfdfdf_nac <"dfmpy", int_hexagon_F2_dffms>; +def HEXAGON_F2_dffma_lib: + df_MInst_dfdfdf_acc_lib <"dfmpy", int_hexagon_F2_dffma_lib>; +def HEXAGON_F2_dffms_lib: + df_MInst_dfdfdf_nac_lib <"dfmpy", int_hexagon_F2_dffms_lib>; +def HEXAGON_F2_dffma_sc: + df_MInst_dfdfdfsi_sc <"dfmpy", int_hexagon_F2_dffma_sc>; +def HEXAGON_F2_dfmax: + df_MInst_dfdf <"dfmax", int_hexagon_F2_dfmax>; +def HEXAGON_F2_dfmin: + df_MInst_dfdf <"dfmin", int_hexagon_F2_dfmin>; +def HEXAGON_F2_dfcmpeq: + qi_ALU64_dfdf <"dfcmp.eq", int_hexagon_F2_dfcmpeq>; +def HEXAGON_F2_dfcmpgt: + qi_ALU64_dfdf <"dfcmp.gt", int_hexagon_F2_dfcmpgt>; +def HEXAGON_F2_dfcmpge: + qi_ALU64_dfdf <"dfcmp.ge", int_hexagon_F2_dfcmpge>; +def HEXAGON_F2_dfcmpuo: + qi_ALU64_dfdf <"dfcmp.uo", int_hexagon_F2_dfcmpuo>; +def HEXAGON_F2_dfclass: + qi_ALU64_dfu5 <"dfclass", int_hexagon_F2_dfclass>; +def HEXAGON_F2_dfimm_p: + df_ALU64_u10_pos <"dfmake", int_hexagon_F2_dfimm_p>; +def HEXAGON_F2_dfimm_n: + df_ALU64_u10_neg <"dfmake", int_hexagon_F2_dfimm_n>; +def HEXAGON_F2_dffixupn: + df_MInst_dfdf <"dffixupn", int_hexagon_F2_dffixupn>; +def HEXAGON_F2_dffixupd: + df_MInst_dfdf <"dffixupd", int_hexagon_F2_dffixupd>; +def HEXAGON_F2_dffixupr: + df_SInst_df <"dffixupr", int_hexagon_F2_dffixupr>; +def HEXAGON_F2_conv_sf2df: + df_SInst_sf <"convert_sf2df", int_hexagon_F2_conv_sf2df>; +def HEXAGON_F2_conv_df2sf: + sf_SInst_df <"convert_df2sf", int_hexagon_F2_conv_df2sf>; +def HEXAGON_F2_conv_uw2sf: + sf_SInst_si <"convert_uw2sf", int_hexagon_F2_conv_uw2sf>; +def HEXAGON_F2_conv_uw2df: + df_SInst_si <"convert_uw2df", int_hexagon_F2_conv_uw2df>; +def HEXAGON_F2_conv_w2sf: + sf_SInst_si <"convert_w2sf", int_hexagon_F2_conv_w2sf>; +def HEXAGON_F2_conv_w2df: + df_SInst_si <"convert_w2df", int_hexagon_F2_conv_w2df>; +def HEXAGON_F2_conv_ud2sf: + sf_SInst_di <"convert_ud2sf", int_hexagon_F2_conv_ud2sf>; +def HEXAGON_F2_conv_ud2df: + df_SInst_di <"convert_ud2df", int_hexagon_F2_conv_ud2df>; +def HEXAGON_F2_conv_d2sf: + sf_SInst_di <"convert_d2sf", int_hexagon_F2_conv_d2sf>; +def HEXAGON_F2_conv_d2df: + df_SInst_di <"convert_d2df", int_hexagon_F2_conv_d2df>; +def HEXAGON_F2_conv_sf2uw: + si_SInst_sf <"convert_sf2uw", int_hexagon_F2_conv_sf2uw>; +def HEXAGON_F2_conv_sf2w: + si_SInst_sf <"convert_sf2w", int_hexagon_F2_conv_sf2w>; +def HEXAGON_F2_conv_sf2ud: + di_SInst_sf <"convert_sf2ud", int_hexagon_F2_conv_sf2ud>; +def HEXAGON_F2_conv_sf2d: + di_SInst_sf <"convert_sf2d", int_hexagon_F2_conv_sf2d>; +def HEXAGON_F2_conv_df2uw: + si_SInst_df <"convert_df2uw", int_hexagon_F2_conv_df2uw>; +def HEXAGON_F2_conv_df2w: + si_SInst_df <"convert_df2w", int_hexagon_F2_conv_df2w>; +def HEXAGON_F2_conv_df2ud: + di_SInst_df <"convert_df2ud", int_hexagon_F2_conv_df2ud>; +def HEXAGON_F2_conv_df2d: + di_SInst_df <"convert_df2d", int_hexagon_F2_conv_df2d>; +def HEXAGON_F2_conv_sf2uw_chop: + si_SInst_sf <"convert_sf2uw", int_hexagon_F2_conv_sf2uw_chop>; +def HEXAGON_F2_conv_sf2w_chop: + si_SInst_sf <"convert_sf2w", int_hexagon_F2_conv_sf2w_chop>; +def HEXAGON_F2_conv_sf2ud_chop: + di_SInst_sf <"convert_sf2ud", int_hexagon_F2_conv_sf2ud_chop>; +def HEXAGON_F2_conv_sf2d_chop: + di_SInst_sf <"convert_sf2d", int_hexagon_F2_conv_sf2d_chop>; +def HEXAGON_F2_conv_df2uw_chop: + si_SInst_df <"convert_df2uw", int_hexagon_F2_conv_df2uw_chop>; +def HEXAGON_F2_conv_df2w_chop: + si_SInst_df <"convert_df2w", int_hexagon_F2_conv_df2w_chop>; +def HEXAGON_F2_conv_df2ud_chop: + di_SInst_df <"convert_df2ud", int_hexagon_F2_conv_df2ud_chop>; +def HEXAGON_F2_conv_df2d_chop: + di_SInst_df <"convert_df2d", int_hexagon_F2_conv_df2d_chop>; diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/lib/Target/Hexagon/HexagonRegisterInfo.cpp index 2a9de92329..f8ffdc44ac 100644 --- a/lib/Target/Hexagon/HexagonRegisterInfo.cpp +++ b/lib/Target/Hexagon/HexagonRegisterInfo.cpp @@ -63,6 +63,7 @@ const uint16_t* HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction return CalleeSavedRegsV2; case HexagonSubtarget::V3: case HexagonSubtarget::V4: + case HexagonSubtarget::V5: return CalleeSavedRegsV3; } llvm_unreachable("Callee saved registers requested for unknown architecture " @@ -109,6 +110,7 @@ HexagonRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { return CalleeSavedRegClassesV2; case HexagonSubtarget::V3: case HexagonSubtarget::V4: + case HexagonSubtarget::V5: return CalleeSavedRegClassesV3; } llvm_unreachable("Callee saved register classes requested for unknown " @@ -179,11 +181,13 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // r0 = add(r30, #10000) // r0 = memw(r0) if ( (MI.getOpcode() == Hexagon::LDriw) || - (MI.getOpcode() == Hexagon::LDrid) || - (MI.getOpcode() == Hexagon::LDrih) || - (MI.getOpcode() == Hexagon::LDriuh) || - (MI.getOpcode() == Hexagon::LDrib) || - (MI.getOpcode() == Hexagon::LDriub) ) { + (MI.getOpcode() == Hexagon::LDrid) || + (MI.getOpcode() == Hexagon::LDrih) || + (MI.getOpcode() == Hexagon::LDriuh) || + (MI.getOpcode() == Hexagon::LDrib) || + (MI.getOpcode() == Hexagon::LDriub) || + (MI.getOpcode() == Hexagon::LDriw_f) || + (MI.getOpcode() == Hexagon::LDrid_f)) { unsigned dstReg = (MI.getOpcode() == Hexagon::LDrid) ? *getSubRegisters(MI.getOperand(0).getReg()) : MI.getOperand(0).getReg(); @@ -203,10 +207,13 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, MI.getOperand(i).ChangeToRegister(dstReg, false, false, true); MI.getOperand(i+1).ChangeToImmediate(0); - } else if ((MI.getOpcode() == Hexagon::STriw) || + } else if ((MI.getOpcode() == Hexagon::STriw_indexed) || + (MI.getOpcode() == Hexagon::STriw) || (MI.getOpcode() == Hexagon::STrid) || (MI.getOpcode() == Hexagon::STrih) || - (MI.getOpcode() == Hexagon::STrib)) { + (MI.getOpcode() == Hexagon::STrib) || + (MI.getOpcode() == Hexagon::STrid_f) || + (MI.getOpcode() == Hexagon::STriw_f)) { // For stores, we need a reserved register. Change // memw(r30 + #10000) = r0 to: // diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.td b/lib/Target/Hexagon/HexagonRegisterInfo.td index d44eae3602..a6b9bdf4f1 100644 --- a/lib/Target/Hexagon/HexagonRegisterInfo.td +++ b/lib/Target/Hexagon/HexagonRegisterInfo.td @@ -131,6 +131,9 @@ let Namespace = "Hexagon" in { def SA1 : Rc<2, "sa1">, DwarfRegNum<[69]>; def LC1 : Rc<3, "lc1">, DwarfRegNum<[70]>; + def M0 : Rc<6, "m0">, DwarfRegNum<[71]>; + def M1 : Rc<7, "m1">, DwarfRegNum<[72]>; + def PC : Rc<9, "pc">, DwarfRegNum<[32]>; // is the Dwarf number correct? def GP : Rc<11, "gp">, DwarfRegNum<[33]>; // is the Dwarf number correct? } @@ -140,15 +143,13 @@ let Namespace = "Hexagon" in { // FIXME: the register order should be defined in terms of the preferred // allocation order... // -def IntRegs : RegisterClass<"Hexagon", [i32], 32, +def IntRegs : RegisterClass<"Hexagon", [i32,f32], 32, (add (sequence "R%u", 0, 9), (sequence "R%u", 12, 28), R10, R11, R29, R30, R31)> { } - - -def DoubleRegs : RegisterClass<"Hexagon", [i64], 64, +def DoubleRegs : RegisterClass<"Hexagon", [i64,f64], 64, (add (sequence "D%u", 0, 4), (sequence "D%u", 6, 13), D5, D14, D15)> { let SubRegClasses = [(IntRegs subreg_loreg, subreg_hireg)]; @@ -162,6 +163,7 @@ def PredRegs : RegisterClass<"Hexagon", [i1], 32, (add (sequence "P%u", 0, 3))> def CRRegs : RegisterClass<"Hexagon", [i32], 32, (add (sequence "LC%u", 0, 1), - (sequence "SA%u", 0, 1), PC, GP)> { + (sequence "SA%u", 0, 1), + (sequence "M%u", 0, 1), PC, GP)> { let Size = 32; } diff --git a/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp b/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp index d10c9f2d52..d574c182bd 100644 --- a/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp +++ b/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp @@ -14,7 +14,7 @@ // {p0 = cmp.eq(r0,r1)} // {r3 = mux(p0,#1,#3)} // -// This requires two packets. If we use .new predicated immediate transfers, +// This requires two packets. If we use .new predicated immediate transfers, // then we can do this in a single packet, e.g.: // // {p0 = cmp.eq(r0,r1) @@ -81,40 +81,124 @@ bool HexagonSplitTFRCondSets::runOnMachineFunction(MachineFunction &Fn) { for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end(); ++MII) { MachineInstr *MI = MII; - int Opc = MI->getOpcode(); - if (Opc == Hexagon::TFR_condset_rr) { - - int DestReg = MI->getOperand(0).getReg(); - int SrcReg1 = MI->getOperand(2).getReg(); - int SrcReg2 = MI->getOperand(3).getReg(); - - // Minor optimization: do not emit the predicated copy if the source and - // the destination is the same register - if (DestReg != SrcReg1) { - BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_cPt), - DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg1); + int Opc1, Opc2; + switch(MI->getOpcode()) { + case Hexagon::TFR_condset_rr: + case Hexagon::TFR_condset_rr_f: + case Hexagon::TFR_condset_rr64_f: { + int DestReg = MI->getOperand(0).getReg(); + int SrcReg1 = MI->getOperand(2).getReg(); + int SrcReg2 = MI->getOperand(3).getReg(); + + if (MI->getOpcode() == Hexagon::TFR_condset_rr || + MI->getOpcode() == Hexagon::TFR_condset_rr_f) { + Opc1 = Hexagon::TFR_cPt; + Opc2 = Hexagon::TFR_cNotPt; + } + else if (MI->getOpcode() == Hexagon::TFR_condset_rr64_f) { + Opc1 = Hexagon::TFR64_cPt; + Opc2 = Hexagon::TFR64_cNotPt; + } + + // Minor optimization: do not emit the predicated copy if the source + // and the destination is the same register. + if (DestReg != SrcReg1) { + BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Opc1), + DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg1); + } + if (DestReg != SrcReg2) { + BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Opc2), + DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg2); + } + MII = MBB->erase(MI); + --MII; + break; } - if (DestReg != SrcReg2) { - BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_cNotPt), - DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg2); + case Hexagon::TFR_condset_ri: + case Hexagon::TFR_condset_ri_f: { + int DestReg = MI->getOperand(0).getReg(); + int SrcReg1 = MI->getOperand(2).getReg(); + + // Do not emit the predicated copy if the source and the destination + // is the same register. + if (DestReg != SrcReg1) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFR_cPt), DestReg). + addReg(MI->getOperand(1).getReg()).addReg(SrcReg1); + } + if (MI->getOpcode() == Hexagon::TFR_condset_ri ) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cNotPt), DestReg). + addReg(MI->getOperand(1).getReg()). + addImm(MI->getOperand(3).getImm()); + } else if (MI->getOpcode() == Hexagon::TFR_condset_ri_f ) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cNotPt_f), DestReg). + addReg(MI->getOperand(1).getReg()). + addFPImm(MI->getOperand(3).getFPImm()); + } + + MII = MBB->erase(MI); + --MII; + break; + } + case Hexagon::TFR_condset_ir: + case Hexagon::TFR_condset_ir_f: { + int DestReg = MI->getOperand(0).getReg(); + int SrcReg2 = MI->getOperand(3).getReg(); + + if (MI->getOpcode() == Hexagon::TFR_condset_ir ) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cPt), DestReg). + addReg(MI->getOperand(1).getReg()). + addImm(MI->getOperand(2).getImm()); + } else if (MI->getOpcode() == Hexagon::TFR_condset_ir_f ) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cPt_f), DestReg). + addReg(MI->getOperand(1).getReg()). + addFPImm(MI->getOperand(2).getFPImm()); + } + + // Do not emit the predicated copy if the source and + // the destination is the same register. + if (DestReg != SrcReg2) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFR_cNotPt), DestReg). + addReg(MI->getOperand(1).getReg()).addReg(SrcReg2); + } + MII = MBB->erase(MI); + --MII; + break; + } + case Hexagon::TFR_condset_ii: + case Hexagon::TFR_condset_ii_f: { + int DestReg = MI->getOperand(0).getReg(); + int SrcReg1 = MI->getOperand(1).getReg(); + + if (MI->getOpcode() == Hexagon::TFR_condset_ii ) { + int Immed1 = MI->getOperand(2).getImm(); + int Immed2 = MI->getOperand(3).getImm(); + BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cPt), + DestReg).addReg(SrcReg1).addImm(Immed1); + BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cNotPt), + DestReg).addReg(SrcReg1).addImm(Immed2); + } else if (MI->getOpcode() == Hexagon::TFR_condset_ii_f ) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cPt_f), DestReg). + addReg(SrcReg1). + addFPImm(MI->getOperand(2).getFPImm()); + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cNotPt_f), DestReg). + addReg(SrcReg1). + addFPImm(MI->getOperand(3).getFPImm()); + } + MII = MBB->erase(MI); + --MII; + break; } - MII = MBB->erase(MI); - --MII; - } else if (Opc == Hexagon::TFR_condset_ii) { - int DestReg = MI->getOperand(0).getReg(); - int SrcReg1 = MI->getOperand(1).getReg(); - int Immed1 = MI->getOperand(2).getImm(); - int Immed2 = MI->getOperand(3).getImm(); - BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cPt), - DestReg).addReg(SrcReg1).addImm(Immed1); - BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cNotPt), - DestReg).addReg(SrcReg1).addImm(Immed2); - MII = MBB->erase(MI); - --MII; } } } - return true; } diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp index 25ef4d4c9d..f0e51e54e8 100644 --- a/lib/Target/Hexagon/HexagonSubtarget.cpp +++ b/lib/Target/Hexagon/HexagonSubtarget.cpp @@ -13,6 +13,7 @@ #include "HexagonSubtarget.h" #include "Hexagon.h" +#include "HexagonRegisterInfo.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" using namespace llvm; @@ -31,6 +32,12 @@ EnableMemOps( cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed, cl::desc("Generate V4 MEMOP in code generation for Hexagon target")); +static cl::opt<bool> +EnableIEEERndNear( + "enable-hexagon-ieee-rnd-near", + cl::Hidden, cl::ZeroOrMore, cl::init(false), + cl::desc("Generate non-chopped conversion from fp to int for Hexagon target.")); + HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS): HexagonGenSubtargetInfo(TT, CPU, FS), HexagonArchVersion(V2), @@ -45,6 +52,8 @@ HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS): break; case HexagonSubtarget::V4: break; + case HexagonSubtarget::V5: + break; default: llvm_unreachable("Unknown Architecture Version."); } @@ -59,4 +68,10 @@ HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS): UseMemOps = true; else UseMemOps = false; + + if (EnableIEEERndNear) + ModeIEEERndNear = true; + else + ModeIEEERndNear = false; } + diff --git a/lib/Target/Hexagon/HexagonSubtarget.h b/lib/Target/Hexagon/HexagonSubtarget.h index 3079086986..5d9d6d890d 100644 --- a/lib/Target/Hexagon/HexagonSubtarget.h +++ b/lib/Target/Hexagon/HexagonSubtarget.h @@ -22,16 +22,18 @@ #include "HexagonGenSubtargetInfo.inc" #define Hexagon_SMALL_DATA_THRESHOLD 8 +#define Hexagon_SLOTS 4 namespace llvm { class HexagonSubtarget : public HexagonGenSubtargetInfo { bool UseMemOps; + bool ModeIEEERndNear; public: enum HexagonArchEnum { - V1, V2, V3, V4 + V1, V2, V3, V4, V5 }; HexagonArchEnum HexagonArchVersion; @@ -55,7 +57,11 @@ public: bool hasV3TOps () const { return HexagonArchVersion >= V3; } bool hasV3TOpsOnly () const { return HexagonArchVersion == V3; } bool hasV4TOps () const { return HexagonArchVersion >= V4; } + bool hasV4TOpsOnly () const { return HexagonArchVersion == V4; } bool useMemOps () const { return HexagonArchVersion >= V4 && UseMemOps; } + bool hasV5TOps () const { return HexagonArchVersion >= V5; } + bool hasV5TOpsOnly () const { return HexagonArchVersion == V5; } + bool modeIEEERndNear () const { return ModeIEEERndNear; } bool isSubtargetV2() const { return HexagonArchVersion == V2;} const std::string &getCPUString () const { return CPUString; } diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp index d20c52c875..3c858f702c 100644 --- a/lib/Target/Hexagon/HexagonTargetMachine.cpp +++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp @@ -55,7 +55,9 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT, CodeModel::Model CM, CodeGenOpt::Level OL) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), - DataLayout("e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-a0:0") , + DataLayout("e-p:32:32:32-" + "i64:64:64-i32:32:32-i16:16:16-i1:32:32-" + "f64:64:64-f32:32:32-a0:0-n32") , Subtarget(TT, CPU, FS), InstrInfo(Subtarget), TLInfo(*this), TSInfo(*this), FrameLowering(Subtarget), |