aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorScott Michel <scottm@aero.org>2008-03-05 23:02:02 +0000
committerScott Michel <scottm@aero.org>2008-03-05 23:02:02 +0000
commitad2715e0d787feaecb66060ea638e373dee7f6fb (patch)
tree9b5b6a19405a2a8706e442a246d058ba01b446db /lib
parent53dec47f3b6ab0f4fdc533b422c6cf404d5d6771 (diff)
- Fix support for "special" i64 immediates that can be loaded
using IL, ILA, et. al. v2i64 and i64 are now supported by the select bits (SELB) instruction. - Add missing comparison operations (testcase forthcoming) - More multiclass refactoring. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@47973 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.cpp57
-rw-r--r--lib/Target/CellSPU/SPUInstrInfo.td938
-rw-r--r--lib/Target/CellSPU/SPUNodes.td2
-rw-r--r--lib/Target/CellSPU/SPUOperands.td11
4 files changed, 361 insertions, 647 deletions
diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp
index 99243d3a62..de1fff0cca 100644
--- a/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -243,15 +243,23 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setOperationAction(ISD::CTLZ , MVT::i32, Legal);
- // SPU does not have select or setcc
+ // SPU has a version of select
setOperationAction(ISD::SELECT, MVT::i1, Expand);
setOperationAction(ISD::SELECT, MVT::i8, Expand);
- setOperationAction(ISD::SELECT, MVT::i16, Expand);
- setOperationAction(ISD::SELECT, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::i16, Legal);
+ setOperationAction(ISD::SELECT, MVT::i32, Legal);
setOperationAction(ISD::SELECT, MVT::i64, Expand);
setOperationAction(ISD::SELECT, MVT::f32, Expand);
setOperationAction(ISD::SELECT, MVT::f64, Expand);
+ setOperationAction(ISD::SETCC, MVT::i1, Expand);
+ setOperationAction(ISD::SETCC, MVT::i8, Expand);
+ setOperationAction(ISD::SETCC, MVT::i16, Legal);
+ setOperationAction(ISD::SETCC, MVT::i32, Legal);
+ setOperationAction(ISD::SETCC, MVT::i64, Expand);
+ setOperationAction(ISD::SETCC, MVT::f32, Expand);
+ setOperationAction(ISD::SETCC, MVT::f64, Expand);
+
// Zero extension and sign extension for i64 have to be
// custom legalized
setOperationAction(ISD::ZERO_EXTEND, MVT::i64, Custom);
@@ -838,7 +846,6 @@ LowerConstant(SDOperand Op, SelectionDAG &DAG) {
SDOperand T = DAG.getConstant(CN->getValue(), MVT::i64);
return DAG.getNode(SPUISD::EXTRACT_ELT0, VT,
DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T));
-
} else {
cerr << "LowerConstant: unhandled constant type "
<< MVT::getValueTypeString(VT)
@@ -981,6 +988,7 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex)
break;
case MVT::v2f64:
case MVT::v4f32:
+ case MVT::v2i64:
case MVT::v4i32:
case MVT::v8i16:
case MVT::v16i8:
@@ -1359,24 +1367,9 @@ SDOperand SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
SDOperand SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
MVT::ValueType ValueType) {
if (ConstantSDNode *CN = getVecImm(N)) {
- if (ValueType == MVT::i32) {
- int Value = (int) CN->getValue();
- int SExtValue = ((Value & 0xffff) << 16) >> 16;
-
- if (Value == SExtValue)
- return DAG.getConstant(Value, ValueType);
- } else if (ValueType == MVT::i16) {
- short Value = (short) CN->getValue();
- int SExtValue = ((int) Value << 16) >> 16;
-
- if (Value == (short) SExtValue)
- return DAG.getConstant(Value, ValueType);
- } else if (ValueType == MVT::i64) {
- int64_t Value = CN->getValue();
- int64_t SExtValue = ((Value & 0xffff) << (64 - 16)) >> (64 - 16);
-
- if (Value == SExtValue)
- return DAG.getConstant(Value, ValueType);
+ int64_t Value = CN->getSignExtended();
+ if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) {
+ return DAG.getConstant(Value, ValueType);
}
}
@@ -1389,9 +1382,8 @@ SDOperand SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
SDOperand SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
MVT::ValueType ValueType) {
if (ConstantSDNode *CN = getVecImm(N)) {
- int Value = (int) CN->getValue();
- if ((ValueType == MVT::i32 && isS10Constant(Value))
- || (ValueType == MVT::i16 && isS10Constant((short) Value)))
+ int64_t Value = CN->getSignExtended();
+ if (isS10Constant(Value))
return DAG.getConstant(Value, ValueType);
}
@@ -1634,7 +1626,14 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
uint32_t upper = uint32_t(val >> 32);
uint32_t lower = uint32_t(val);
- if (val != 0) {
+ if (val == 0) {
+ SDOperand Zero = DAG.getTargetConstant(0, MVT::i64);
+ return DAG.getNode(ISD::BUILD_VECTOR, VT, Zero, Zero);
+ } else if (val == 0xffffffffffffffffULL) {
+ // For -1, this and has a chance of matching immAllOnesV.
+ SDOperand NegOne = DAG.getTargetConstant(-1, MVT::i64);
+ return DAG.getNode(ISD::BUILD_VECTOR, VT, NegOne, NegOne);
+ } else {
SDOperand LO32;
SDOperand HI32;
SmallVector<SDOperand, 16> ShufBytes;
@@ -1708,12 +1707,6 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
return DAG.getNode(SPUISD::SHUFB, VT, HI32, LO32,
DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
&ShufBytes[0], ShufBytes.size()));
- } else {
- // For zero, this can be lowered efficiently via v4i32 BUILD_VECTOR
- SDOperand Zero = DAG.getConstant(0, MVT::i32);
- return DAG.getNode(ISD::BIT_CONVERT, VT,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
- Zero, Zero, Zero, Zero));
}
}
}
diff --git a/lib/Target/CellSPU/SPUInstrInfo.td b/lib/Target/CellSPU/SPUInstrInfo.td
index cfe47c6d32..b76e03dc34 100644
--- a/lib/Target/CellSPU/SPUInstrInfo.td
+++ b/lib/Target/CellSPU/SPUInstrInfo.td
@@ -196,15 +196,13 @@ class StoreAFormVec<ValueType vectype>
: RI16Form<0b0010010, (outs), (ins VECREG:$rT, addr256k:$src),
"stqa\t$rT, $src",
LoadStore,
- [(store (vectype VECREG:$rT), aform_addr:$src)]>
-{ }
+ [(store (vectype VECREG:$rT), aform_addr:$src)]>;
class StoreAForm<RegisterClass rclass>
: RI16Form<0b001001, (outs), (ins rclass:$rT, addr256k:$src),
"stqa\t$rT, $src",
LoadStore,
- [(store rclass:$rT, aform_addr:$src)]>
-{ }
+ [(store rclass:$rT, aform_addr:$src)]>;
multiclass StoreAForms
{
@@ -326,87 +324,89 @@ def ILHr8:
[(set R8C:$rT, immSExt8:$val)]>;
// IL does sign extension!
-def ILr64:
- RI16Form<0b100000010, (outs R64C:$rT), (ins s16imm_i64:$val),
- "il\t$rT, $val", ImmLoad,
- [(set R64C:$rT, immSExt16:$val)]>;
-
-def ILv2i64:
- RI16Form<0b100000010, (outs VECREG:$rT), (ins s16imm_i64:$val),
- "il\t$rT, $val", ImmLoad,
- [(set VECREG:$rT, (v2i64 v2i64SExt16Imm:$val))]>;
-
-def ILv4i32:
- RI16Form<0b100000010, (outs VECREG:$rT), (ins s16imm:$val),
- "il\t$rT, $val", ImmLoad,
- [(set VECREG:$rT, (v4i32 v4i32SExt16Imm:$val))]>;
-
-def ILr32:
- RI16Form<0b100000010, (outs R32C:$rT), (ins s16imm_i32:$val),
- "il\t$rT, $val", ImmLoad,
- [(set R32C:$rT, immSExt16:$val)]>;
-
-def ILf32:
- RI16Form<0b100000010, (outs R32FP:$rT), (ins s16imm_f32:$val),
- "il\t$rT, $val", ImmLoad,
- [(set R32FP:$rT, fpimmSExt16:$val)]>;
-
-def ILf64:
- RI16Form<0b100000010, (outs R64FP:$rT), (ins s16imm_f64:$val),
- "il\t$rT, $val", ImmLoad,
- [(set R64FP:$rT, fpimmSExt16:$val)]>;
-
-def ILHUv4i32:
- RI16Form<0b010000010, (outs VECREG:$rT), (ins u16imm:$val),
- "ilhu\t$rT, $val", ImmLoad,
- [(set VECREG:$rT, (v4i32 immILHUvec:$val))]>;
-
-def ILHUr32:
- RI16Form<0b010000010, (outs R32C:$rT), (ins u16imm:$val),
- "ilhu\t$rT, $val", ImmLoad,
- [(set R32C:$rT, hi16:$val)]>;
-
-// ILHUf32: Used to custom lower float constant loads
-def ILHUf32:
- RI16Form<0b010000010, (outs R32FP:$rT), (ins f16imm:$val),
- "ilhu\t$rT, $val", ImmLoad,
- [(set R32FP:$rT, hi16_f32:$val)]>;
-
-// ILHUhi: Used for loading high portion of an address. Note the symbolHi
-// printer used for the operand.
-def ILHUhi:
- RI16Form<0b010000010, (outs R32C:$rT), (ins symbolHi:$val),
- "ilhu\t$rT, $val", ImmLoad,
- [(set R32C:$rT, hi16:$val)]>;
+
+class ILInst<dag OOL, dag IOL, list<dag> pattern>:
+ RI16Form<0b100000010, OOL, IOL, "il\t$rT, $val",
+ ImmLoad, pattern>;
+
+class ILVecInst<ValueType vectype, Operand immtype, PatLeaf xform>:
+ ILInst<(outs VECREG:$rT), (ins immtype:$val),
+ [(set (vectype VECREG:$rT), (vectype xform:$val))]>;
+
+class ILRegInst<RegisterClass rclass, Operand immtype, PatLeaf xform>:
+ ILInst<(outs rclass:$rT), (ins immtype:$val),
+ [(set rclass:$rT, xform:$val)]>;
+
+multiclass ImmediateLoad
+{
+ def v2i64: ILVecInst<v2i64, s16imm_i64, v2i64SExt16Imm>;
+ def v4i32: ILVecInst<v4i32, s16imm_i32, v4i32SExt16Imm>;
+
+ // TODO: Need v2f64, v4f32
+
+ def r64: ILRegInst<R64C, s16imm_i64, immSExt16>;
+ def r32: ILRegInst<R32C, s16imm_i32, immSExt16>;
+ def f32: ILRegInst<R32FP, s16imm_f32, fpimmSExt16>;
+ def f64: ILRegInst<R64FP, s16imm_f64, fpimmSExt16>;
+}
+
+defm IL : ImmediateLoad;
+
+class ILHUInst<dag OOL, dag IOL, list<dag> pattern>:
+ RI16Form<0b010000010, OOL, IOL, "ilhu\t$rT, $val",
+ ImmLoad, pattern>;
+
+class ILHUVecInst<ValueType vectype, Operand immtype, PatLeaf xform>:
+ ILHUInst<(outs VECREG:$rT), (ins immtype:$val),
+ [(set (vectype VECREG:$rT), (vectype xform:$val))]>;
+
+class ILHURegInst<RegisterClass rclass, Operand immtype, PatLeaf xform>:
+ ILHUInst<(outs rclass:$rT), (ins immtype:$val),
+ [(set rclass:$rT, xform:$val)]>;
+
+multiclass ImmLoadHalfwordUpper
+{
+ def v2i64: ILHUVecInst<v2i64, u16imm_i64, immILHUvec_i64>;
+ def v4i32: ILHUVecInst<v4i32, u16imm, immILHUvec>;
+
+ def r64: ILHURegInst<R64C, u16imm_i64, hi16>;
+ def r32: ILHURegInst<R32C, u16imm, hi16>;
+
+ // Loads the high portion of an address
+ def hi: ILHURegInst<R32C, symbolHi, hi16>;
+
+ // Used in custom lowering constant SFP loads:
+ def f32: ILHURegInst<R32FP, f16imm, hi16_f32>;
+}
+
+defm ILHU : ImmLoadHalfwordUpper;
// Immediate load address (can also be used to load 18-bit unsigned constants,
// see the zext 16->32 pattern)
+
class ILAInst<dag OOL, dag IOL, list<dag> pattern>:
RI18Form<0b1000010, OOL, IOL, "ila\t$rT, $val",
LoadNOP, pattern>;
-multiclass ImmLoadAddress
-{
- def v2i64: ILAInst<(outs VECREG:$rT), (ins u18imm:$val),
- [(set (v2i64 VECREG:$rT), v2i64Uns18Imm:$val)]>;
-
- def v4i32: ILAInst<(outs VECREG:$rT), (ins u18imm:$val),
- [(set (v4i32 VECREG:$rT), v4i32Uns18Imm:$val)]>;
+class ILAVecInst<ValueType vectype, Operand immtype, PatLeaf xform>:
+ ILAInst<(outs VECREG:$rT), (ins immtype:$val),
+ [(set (vectype VECREG:$rT), (vectype xform:$val))]>;
- def r64: ILAInst<(outs R64C:$rT), (ins u18imm_i64:$val),
- [(set R64C:$rT, imm18:$val)]>;
+class ILARegInst<RegisterClass rclass, Operand immtype, PatLeaf xform>:
+ ILAInst<(outs rclass:$rT), (ins immtype:$val),
+ [(set rclass:$rT, xform:$val)]>;
- def r32: ILAInst<(outs R32C:$rT), (ins u18imm:$val),
- [(set R32C:$rT, imm18:$val)]>;
-
- def f32: ILAInst<(outs R32FP:$rT), (ins f18imm:$val),
- [(set R32FP:$rT, fpimm18:$val)]>;
+multiclass ImmLoadAddress
+{
+ def v2i64: ILAVecInst<v2i64, u18imm, v2i64Uns18Imm>;
+ def v4i32: ILAVecInst<v4i32, u18imm, v4i32Uns18Imm>;
- def f64: ILAInst<(outs R64FP:$rT), (ins f18imm_f64:$val),
- [(set R64FP:$rT, fpimm18:$val)]>;
+ def r64: ILARegInst<R64C, u18imm_i64, imm18>;
+ def r32: ILARegInst<R32C, u18imm, imm18>;
+ def f32: ILARegInst<R32FP, f18imm, fpimm18>;
+ def f64: ILARegInst<R64FP, f18imm_f64, fpimm18>;
- def lo: ILAInst<(outs R32C:$rT), (ins symbolLo:$val),
- [(set R32C:$rT, imm18:$val)]>;
+ def lo: ILARegInst<R32C, symbolLo, imm18>;
def lsa: ILAInst<(outs R32C:$rT), (ins symbolLSA:$val),
[/* no pattern */]>;
@@ -419,43 +419,41 @@ defm ILA : ImmLoadAddress;
// Note that these are really two operand instructions, but they're encoded
// as three operands with the first two arguments tied-to each other.
-def IOHLvec:
- RI16Form<0b100000110, (outs VECREG:$rT), (ins VECREG:$rS, u16imm:$val),
- "iohl\t$rT, $val", ImmLoad,
- [/* insert intrinsic here */]>,
- RegConstraint<"$rS = $rT">,
- NoEncode<"$rS">;
-
-def IOHLr32:
- RI16Form<0b100000110, (outs R32C:$rT), (ins R32C:$rS, i32imm:$val),
- "iohl\t$rT, $val", ImmLoad,
- [/* insert intrinsic here */]>,
- RegConstraint<"$rS = $rT">,
- NoEncode<"$rS">;
-
-def IOHLf32:
- RI16Form<0b100000110, (outs R32FP:$rT), (ins R32FP:$rS, f32imm:$val),
- "iohl\t$rT, $val", ImmLoad,
- [/* insert intrinsic here */]>,
- RegConstraint<"$rS = $rT">,
- NoEncode<"$rS">;
-
-def IOHLlo:
- RI16Form<0b100000110, (outs R32C:$rT), (ins R32C:$rS, symbolLo:$val),
- "iohl\t$rT, $val", ImmLoad,
- [/* no pattern */]>,
- RegConstraint<"$rS = $rT">,
- NoEncode<"$rS">;
+class IOHLInst<dag OOL, dag IOL, list<dag> pattern>:
+ RI16Form<0b100000110, OOL, IOL, "iohl\t$rT, $val",
+ ImmLoad, pattern>,
+ RegConstraint<"$rS = $rT">,
+ NoEncode<"$rS">;
+
+class IOHLVecInst<ValueType vectype, Operand immtype /* , PatLeaf xform */>:
+ IOHLInst<(outs VECREG:$rT), (ins VECREG:$rS, immtype:$val),
+ [/* no pattern */]>;
+
+class IOHLRegInst<RegisterClass rclass, Operand immtype /* , PatLeaf xform */>:
+ IOHLInst<(outs rclass:$rT), (ins rclass:$rS, immtype:$val),
+ [/* no pattern */]>;
+
+multiclass ImmOrHalfwordLower
+{
+ def v2i64: IOHLVecInst<v2i64, u16imm_i64>;
+ def v4i32: IOHLVecInst<v4i32, u16imm>;
+
+ def r32: IOHLRegInst<R32C, i32imm>;
+ def f32: IOHLRegInst<R32FP, f32imm>;
+
+ def lo: IOHLRegInst<R32C, symbolLo>;
+}
+
+defm IOHL: ImmOrHalfwordLower;
// Form select mask for bytes using immediate, used in conjunction with the
// SELB instruction:
-class FSMBIVec<ValueType vectype>
- : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
- "fsmbi\t$rT, $val",
- SelectOp,
- [(set (vectype VECREG:$rT), (SPUfsmbi immU16:$val))]>
-{ }
+class FSMBIVec<ValueType vectype>:
+ RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
+ "fsmbi\t$rT, $val",
+ SelectOp,
+ [(set (vectype VECREG:$rT), (SPUfsmbi (i32 immU16:$val)))]>;
multiclass FormSelectMaskBytesImm
{
@@ -470,22 +468,22 @@ defm FSMBI : FormSelectMaskBytesImm;
// fsmb: Form select mask for bytes. N.B. Input operand, $rA, is 16-bits
def FSMB:
RRForm_1<0b01101101100, (outs VECREG:$rT), (ins R16C:$rA),
- "fsmb\t$rT, $rA", SelectOp,
- []>;
+ "fsmb\t$rT, $rA", SelectOp,
+ [(set (v16i8 VECREG:$rT), (SPUfsmbi R16C:$rA))]>;
// fsmh: Form select mask for halfwords. N.B., Input operand, $rA, is
// only 8-bits wide (even though it's input as 16-bits here)
def FSMH:
RRForm_1<0b10101101100, (outs VECREG:$rT), (ins R16C:$rA),
"fsmh\t$rT, $rA", SelectOp,
- []>;
+ [(set (v8i16 VECREG:$rT), (SPUfsmbi R16C:$rA))]>;
// fsm: Form select mask for words. Like the other fsm* instructions,
// only the lower 4 bits of $rA are significant.
def FSM:
RRForm_1<0b00101101100, (outs VECREG:$rT), (ins R16C:$rA),
"fsm\t$rT, $rA", SelectOp,
- []>;
+ [(set (v4i32 VECREG:$rT), (SPUfsmbi R16C:$rA))]>;
//===----------------------------------------------------------------------===//
// Integer and Logical Operations:
@@ -926,6 +924,10 @@ class ANDVecInst<ValueType vectype>:
[(set (vectype VECREG:$rT), (and (vectype VECREG:$rA),
(vectype VECREG:$rB)))]>;
+class ANDRegInst<RegisterClass rclass>:
+ ANDInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB),
+ [(set rclass:$rT, (and rclass:$rA, rclass:$rB))]>;
+
multiclass BitwiseAnd
{
def v16i8: ANDVecInst<v16i8>;
@@ -933,17 +935,11 @@ multiclass BitwiseAnd
def v4i32: ANDVecInst<v4i32>;
def v2i64: ANDVecInst<v2i64>;
- def r64: ANDInst<(outs R64C:$rT), (ins R64C:$rA, R64C:$rB),
- [(set R64C:$rT, (and R64C:$rA, R64C:$rB))]>;
-
- def r32: ANDInst<(outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
- [(set R32C:$rT, (and R32C:$rA, R32C:$rB))]>;
-
- def r16: ANDInst<(outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
- [(set R16C:$rT, (and R16C:$rA, R16C:$rB))]>;
-
- def r8: ANDInst<(outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
- [(set R8C:$rT, (and R8C:$rA, R8C:$rB))]>;
+ def r128: ANDRegInst<GPRC>;
+ def r64: ANDRegInst<R64C>;
+ def r32: ANDRegInst<R32C>;
+ def r16: ANDRegInst<R16C>;
+ def r8: ANDRegInst<R8C>;
//===---------------------------------------------
// Special instructions to perform the fabs instruction
@@ -1323,61 +1319,49 @@ def ORXv4i32:
[]>;
// XOR:
-def XORv16i8:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [(set (v16i8 VECREG:$rT), (xor (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)))]>;
-def XORv8i16:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [(set (v8i16 VECREG:$rT), (xor (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
+class XORInst<dag OOL, dag IOL, list<dag> pattern> :
+ RRForm<0b10010010000, OOL, IOL, "xor\t$rT, $rA, $rB",
+ IntegerOp, pattern>;
-def XORv4i32:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [(set (v4i32 VECREG:$rT), (xor (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
+class XORVecInst<ValueType vectype>:
+ XORInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
+ [(set (vectype VECREG:$rT), (xor (vectype VECREG:$rA),
+ (vectype VECREG:$rB)))]>;
-def XORr32:
- RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [(set R32C:$rT, (xor R32C:$rA, R32C:$rB))]>;
+class XORRegInst<RegisterClass rclass>:
+ XORInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB),
+ [(set rclass:$rT, (xor rclass:$rA, rclass:$rB))]>;
-//==----------------------------------------------------------
-// Special forms for floating point instructions.
-// Bitwise ORs and ANDs don't make sense for normal floating
-// point numbers. These operations (fneg and fabs), however,
-// require bitwise logical ops to manipulate the sign bit.
-def XORfneg32:
- RRForm<0b10010010000, (outs R32FP:$rT), (ins R32FP:$rA, R32C:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [/* Intentionally does not match a pattern, see fneg32 */]>;
-
-// KLUDGY! Better way to do this without a VECREG? bitconvert?
-// VECREG is assumed to contain two identical 64-bit masks, so
-// it doesn't matter which word we select for the xor
-def XORfneg64:
- RRForm<0b10010010000, (outs R64FP:$rT), (ins R64FP:$rA, VECREG:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [/* Intentionally does not match a pattern, see fneg64 */]>;
-
-// Could use XORv4i32, but will use this for clarity
-def XORfnegvec:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [/* Intentionally does not match a pattern, see fneg{32,64} */]>;
+multiclass BitwiseExclusiveOr
+{
+ def v16i8: XORVecInst<v16i8>;
+ def v8i16: XORVecInst<v8i16>;
+ def v4i32: XORVecInst<v4i32>;
+ def v2i64: XORVecInst<v2i64>;
-//==----------------------------------------------------------
+ def r128: XORRegInst<GPRC>;
+ def r64: XORRegInst<R64C>;
+ def r32: XORRegInst<R32C>;
+ def r16: XORRegInst<R16C>;
+ def r8: XORRegInst<R8C>;
-def XORr16:
- RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [(set R16C:$rT, (xor R16C:$rA, R16C:$rB))]>;
+ // Special forms for floating point instructions.
+ // fneg and fabs require bitwise logical ops to manipulate the sign bit.
-def XORr8:
- RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
- "xor\t$rT, $rA, $rB", IntegerOp,
- [(set R8C:$rT, (xor R8C:$rA, R8C:$rB))]>;
+ def fneg32: XORInst<(outs R32FP:$rT), (ins R32FP:$rA, R32C:$rB),
+ [/* no pattern */]>;
+
+ def fneg64: XORInst<(outs R64FP:$rT), (ins R64FP:$rA, VECREG:$rB),
+ [/* no pattern */]>;
+
+ def fnegvec: XORInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
+ [/* no pattern, see fneg{32,64} */]>;
+}
+
+defm XOR : BitwiseExclusiveOr;
+
+//==----------------------------------------------------------
class XORBIInst<dag OOL, dag IOL, list<dag> pattern>:
RI10Form<0b01100000, OOL, IOL, "xorbi\t$rT, $rA, $val",
@@ -1486,433 +1470,156 @@ def NORr8:
"nor\t$rT, $rA, $rB", IntegerOp,
[(set R8C:$rT, (not (or R8C:$rA, R8C:$rB)))]>;
-// EQV: Equivalence (1 for each same bit, otherwise 0)
-def EQVv16i8:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "eqv\t$rT, $rA, $rB", IntegerOp,
- [(set (v16i8 VECREG:$rT), (or (and (v16i8 VECREG:$rA),
- (v16i8 VECREG:$rB)),
- (and (vnot (v16i8 VECREG:$rA)),
- (vnot (v16i8 VECREG:$rB)))))]>;
-
-def : Pat<(xor (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rB))),
- (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
-
-def : Pat<(xor (vnot (v16i8 VECREG:$rA)), (v16i8 VECREG:$rB)),
- (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
-
-def EQVv8i16:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "eqv\t$rT, $rA, $rB", IntegerOp,
- [(set (v8i16 VECREG:$rT), (or (and (v8i16 VECREG:$rA),
- (v8i16 VECREG:$rB)),
- (and (vnot (v8i16 VECREG:$rA)),
- (vnot (v8i16 VECREG:$rB)))))]>;
-
-def : Pat<(xor (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rB))),
- (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
-
-def : Pat<(xor (vnot (v8i16 VECREG:$rA)), (v8i16 VECREG:$rB)),
- (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
-
-def EQVv4i32:
- RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
- "eqv\t$rT, $rA, $rB", IntegerOp,
- [(set (v4i32 VECREG:$rT), (or (and (v4i32 VECREG:$rA),
- (v4i32 VECREG:$rB)),
- (and (vnot (v4i32 VECREG:$rA)),
- (vnot (v4i32 VECREG:$rB)))))]>;
-
-def : Pat<(xor (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rB))),
- (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
-
-def : Pat<(xor (vnot (v4i32 VECREG:$rA)), (v4i32 VECREG:$rB)),
- (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
-
-def EQVr32:
- RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
- "eqv\t$rT, $rA, $rB", IntegerOp,
- [(set R32C:$rT, (or (and R32C:$rA, R32C:$rB),
- (and (not R32C:$rA), (not R32C:$rB))))]>;
-
-def : Pat<(xor R32C:$rA, (not R32C:$rB)),
- (EQVr32 R32C:$rA, R32C:$rB)>;
-
-def : Pat<(xor (not R32C:$rA), R32C:$rB),
- (EQVr32 R32C:$rA, R32C:$rB)>;
-
-def EQVr16:
- RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
- "eqv\t$rT, $rA, $rB", IntegerOp,
- [(set R16C:$rT, (or (and R16C:$rA, R16C:$rB),
- (and (not R16C:$rA), (not R16C:$rB))))]>;
-
-def : Pat<(xor R16C:$rA, (not R16C:$rB)),
- (EQVr16 R16C:$rA, R16C:$rB)>;
-
-def : Pat<(xor (not R16C:$rA), R16C:$rB),
- (EQVr16 R16C:$rA, R16C:$rB)>;
-
-def EQVr8:
- RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
- "eqv\t$rT, $rA, $rB", IntegerOp,
- [(set R8C:$rT, (or (and R8C:$rA, R8C:$rB),
- (and (not R8C:$rA), (not R8C:$rB))))]>;
-
-def : Pat<(xor R8C:$rA, (not R8C:$rB)),
- (EQVr8 R8C:$rA, R8C:$rB)>;
-
-def : Pat<(xor (not R8C:$rA), R8C:$rB),
- (EQVr8 R8C:$rA, R8C:$rB)>;
-
-// gcc optimizes (p & q) | (~p & ~q) -> ~(p | q) | (p & q), so match that
-// pattern also:
-def : Pat<(or (vnot (or (v16i8 VECREG:$rA), (v16i8 VECREG:$rB))),
- (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rB))),
- (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
-
-def : Pat<(or (vnot (or (v8i16 VECREG:$rA), (v8i16 VECREG:$rB))),
- (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rB))),
- (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
-
-def : Pat<(or (vnot (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB))),
- (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rB))),
- (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
-
-def : Pat<(or (not (or R32C:$rA, R32C:$rB)), (and R32C:$rA, R32C:$rB)),
- (EQVr32 R32C:$rA, R32C:$rB)>;
-
-def : Pat<(or (not (or R16C:$rA, R16C:$rB)), (and R16C:$rA, R16C:$rB)),
- (EQVr16 R16C:$rA, R16C:$rB)>;
-
-def : Pat<(or (not (or R8C:$rA, R8C:$rB)), (and R8C:$rA, R8C:$rB)),
- (EQVr8 R8C:$rA, R8C:$rB)>;
-
// Select bits:
-def SELBv16i8:
- RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
- "selb\t$rT, $rA, $rB, $rC", IntegerOp,
- [(set (v16i8 VECREG:$rT),
- (SPUselb (v16i8 VECREG:$rA), (v16i8 VECREG:$rB),
- (v16i8 VECREG:$rC)))]>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
- (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
- (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
- (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
- (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
- (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
- (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
- (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
- (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
- (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
- (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
- (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
- (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
- (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
- (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
- (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
- (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
- (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def SELBv8i16:
- RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
- "selb\t$rT, $rA, $rB, $rC", IntegerOp,
- [(set (v8i16 VECREG:$rT),
- (SPUselb (v8i16 VECREG:$rA), (v8i16 VECREG:$rB),
- (v8i16 VECREG:$rC)))]>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
- (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
- (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
- (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
- (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
- (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
- (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
- (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
- (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
- (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
- (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
- (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
- (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
- (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
- (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
- (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
- (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
- (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def SELBv4i32:
- RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
- "selb\t$rT, $rA, $rB, $rC", IntegerOp,
- [(set (v4i32 VECREG:$rT),
- (SPUselb (v4i32 VECREG:$rA), (v4i32 VECREG:$rB),
- (v4i32 VECREG:$rC)))]>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
- (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
- (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
- (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
- (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
- (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
- (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
- (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
- (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
- (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
- (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
- (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
- (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
- (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
- (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
- (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
- (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
- (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
-
-def SELBr32:
- RRRForm<0b1000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB, R32C:$rC),
- "selb\t$rT, $rA, $rB, $rC", IntegerOp,
- []>;
-
-// And the various patterns that can be matched... (all 8 of them :-)
-def : Pat<(or (and R32C:$rA, R32C:$rC),
- (and R32C:$rB, (not R32C:$rC))),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and R32C:$rC, R32C:$rA),
- (and R32C:$rB, (not R32C:$rC))),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and R32C:$rA, R32C:$rC),
- (and (not R32C:$rC), R32C:$rB)),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and R32C:$rC, R32C:$rA),
- (and (not R32C:$rC), R32C:$rB)),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and R32C:$rA, (not R32C:$rC)),
- (and R32C:$rB, R32C:$rC)),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and R32C:$rA, (not R32C:$rC)),
- (and R32C:$rC, R32C:$rB)),
- (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
-
-def : Pat<(or (and (not R32C:$rC), R32C:$rA),
- (and R32C:$rB, R32C:$rC)),