aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDale Johannesen <dalej@apple.com>2008-08-28 02:44:49 +0000
committerDale Johannesen <dalej@apple.com>2008-08-28 02:44:49 +0000
commite00a8a2a2e11a37fd1ddf2504bd22d225d0994d0 (patch)
tree484cf9b89070b672dd21616a49c0f15109bd51b8
parentf2c785edf0d7ceb4491333146e289fdbbba1dddf (diff)
Split the ATOMIC NodeType's to include the size, e.g.
ATOMIC_LOAD_ADD_{8,16,32,64} instead of ATOMIC_LOAD_ADD. Increased the Hardcoded Constant OpActionsCapacity to match. Large but boring; no functional change. This is to support partial-word atomics on ppc; i8 is not a valid type there, so by the time we get to lowering, the ATOMIC_LOAD nodes looks the same whether the type was i8 or i32. The information can be added to the AtomicSDNode, but that is the largest SDNode; I don't fully understand the SDNode allocation, but it is sensitive to the largest node size, so increasing that must be bad. This is the alternative. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55457 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h207
-rw-r--r--include/llvm/Target/TargetLowering.h2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp126
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp176
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp197
-rw-r--r--lib/Target/TargetSelectionDAG.td268
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp35
-rw-r--r--lib/Target/X86/X86Instr64bit.td16
-rw-r--r--lib/Target/X86/X86InstrInfo.td40
9 files changed, 717 insertions, 350 deletions
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index b509667528..8693173ca4 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -58,6 +58,8 @@ struct SDVTList {
/// ISD namespace - This namespace contains an enum which represents all of the
/// SelectionDAG node types and value types.
///
+/// If you add new elements here you should increase OpActionsCapacity in
+/// TargetLowering.h by the number of new elements.
namespace ISD {
//===--------------------------------------------------------------------===//
@@ -589,38 +591,64 @@ namespace ISD {
// this corresponds to the atomic.lcs intrinsic.
// cmp is compared to *ptr, and if equal, swap is stored in *ptr.
// the return is always the original value in *ptr
- ATOMIC_CMP_SWAP,
-
- // Val, OUTCHAIN = ATOMIC_LOAD_ADD(INCHAIN, ptr, amt)
- // this corresponds to the atomic.las intrinsic.
- // *ptr + amt is stored to *ptr atomically.
- // the return is always the original value in *ptr
- ATOMIC_LOAD_ADD,
+ ATOMIC_CMP_SWAP_8,
+ ATOMIC_CMP_SWAP_16,
+ ATOMIC_CMP_SWAP_32,
+ ATOMIC_CMP_SWAP_64,
// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
// this corresponds to the atomic.swap intrinsic.
// amt is stored to *ptr atomically.
// the return is always the original value in *ptr
- ATOMIC_SWAP,
+ ATOMIC_SWAP_8,
+ ATOMIC_SWAP_16,
+ ATOMIC_SWAP_32,
+ ATOMIC_SWAP_64,
- // Val, OUTCHAIN = ATOMIC_LOAD_SUB(INCHAIN, ptr, amt)
- // this corresponds to the atomic.lss intrinsic.
- // *ptr - amt is stored to *ptr atomically.
- // the return is always the original value in *ptr
- ATOMIC_LOAD_SUB,
-
// Val, OUTCHAIN = ATOMIC_L[OpName]S(INCHAIN, ptr, amt)
// this corresponds to the atomic.[OpName] intrinsic.
// op(*ptr, amt) is stored to *ptr atomically.
// the return is always the original value in *ptr
- ATOMIC_LOAD_AND,
- ATOMIC_LOAD_OR,
- ATOMIC_LOAD_XOR,
- ATOMIC_LOAD_NAND,
- ATOMIC_LOAD_MIN,
- ATOMIC_LOAD_MAX,
- ATOMIC_LOAD_UMIN,
- ATOMIC_LOAD_UMAX,
+ ATOMIC_LOAD_ADD_8,
+ ATOMIC_LOAD_SUB_8,
+ ATOMIC_LOAD_AND_8,
+ ATOMIC_LOAD_OR_8,
+ ATOMIC_LOAD_XOR_8,
+ ATOMIC_LOAD_NAND_8,
+ ATOMIC_LOAD_MIN_8,
+ ATOMIC_LOAD_MAX_8,
+ ATOMIC_LOAD_UMIN_8,
+ ATOMIC_LOAD_UMAX_8,
+ ATOMIC_LOAD_ADD_16,
+ ATOMIC_LOAD_SUB_16,
+ ATOMIC_LOAD_AND_16,
+ ATOMIC_LOAD_OR_16,
+ ATOMIC_LOAD_XOR_16,
+ ATOMIC_LOAD_NAND_16,
+ ATOMIC_LOAD_MIN_16,
+ ATOMIC_LOAD_MAX_16,
+ ATOMIC_LOAD_UMIN_16,
+ ATOMIC_LOAD_UMAX_16,
+ ATOMIC_LOAD_ADD_32,
+ ATOMIC_LOAD_SUB_32,
+ ATOMIC_LOAD_AND_32,
+ ATOMIC_LOAD_OR_32,
+ ATOMIC_LOAD_XOR_32,
+ ATOMIC_LOAD_NAND_32,
+ ATOMIC_LOAD_MIN_32,
+ ATOMIC_LOAD_MAX_32,
+ ATOMIC_LOAD_UMIN_32,
+ ATOMIC_LOAD_UMAX_32,
+ ATOMIC_LOAD_ADD_64,
+ ATOMIC_LOAD_SUB_64,
+ ATOMIC_LOAD_AND_64,
+ ATOMIC_LOAD_OR_64,
+ ATOMIC_LOAD_XOR_64,
+ ATOMIC_LOAD_NAND_64,
+ ATOMIC_LOAD_MIN_64,
+ ATOMIC_LOAD_MAX_64,
+ ATOMIC_LOAD_UMIN_64,
+ ATOMIC_LOAD_UMAX_64,
// BUILTIN_OP_END - This must be the last enum value in this list.
BUILTIN_OP_END
@@ -1512,20 +1540,59 @@ public:
// Methods to support isa and dyn_cast
static bool classof(const MemSDNode *) { return true; }
static bool classof(const SDNode *N) {
- return N->getOpcode() == ISD::LOAD ||
- N->getOpcode() == ISD::STORE ||
- N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
- N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
- N->getOpcode() == ISD::ATOMIC_SWAP ||
- N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
- N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
- N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
- N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
- N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
- N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
- N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
- N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
- N->getOpcode() == ISD::ATOMIC_LOAD_UMAX;
+ return N->getOpcode() == ISD::LOAD ||
+ N->getOpcode() == ISD::STORE ||
+ N->getOpcode() == ISD::ATOMIC_CMP_SWAP_8 ||
+ N->getOpcode() == ISD::ATOMIC_SWAP_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_8 ||
+
+ N->getOpcode() == ISD::ATOMIC_CMP_SWAP_16 ||
+ N->getOpcode() == ISD::ATOMIC_SWAP_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_16 ||
+
+ N->getOpcode() == ISD::ATOMIC_CMP_SWAP_32 ||
+ N->getOpcode() == ISD::ATOMIC_SWAP_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_32 ||
+
+ N->getOpcode() == ISD::ATOMIC_CMP_SWAP_64 ||
+ N->getOpcode() == ISD::ATOMIC_SWAP_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_64;
}
};
@@ -1567,23 +1634,65 @@ class AtomicSDNode : public MemSDNode {
const SDValue &getBasePtr() const { return getOperand(1); }
const SDValue &getVal() const { return getOperand(2); }
- bool isCompareAndSwap() const { return getOpcode() == ISD::ATOMIC_CMP_SWAP; }
+ bool isCompareAndSwap() const {
+ unsigned Op = getOpcode();
+ return Op == ISD::ATOMIC_CMP_SWAP_8 ||
+ Op == ISD::ATOMIC_CMP_SWAP_16 ||
+ Op == ISD::ATOMIC_CMP_SWAP_32 ||
+ Op == ISD::ATOMIC_CMP_SWAP_64;
+ }
// Methods to support isa and dyn_cast
static bool classof(const AtomicSDNode *) { return true; }
static bool classof(const SDNode *N) {
- return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
- N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
- N->getOpcode() == ISD::ATOMIC_SWAP ||
- N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
- N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
- N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
- N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
- N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
- N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
- N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
- N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
- N->getOpcode() == ISD::ATOMIC_LOAD_UMAX;
+ return N->getOpcode() == ISD::ATOMIC_CMP_SWAP_8 ||
+ N->getOpcode() == ISD::ATOMIC_SWAP_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_8 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_8 ||
+ N->getOpcode() == ISD::ATOMIC_CMP_SWAP_16 ||
+ N->getOpcode() == ISD::ATOMIC_SWAP_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_16 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_16 ||
+ N->getOpcode() == ISD::ATOMIC_CMP_SWAP_32 ||
+ N->getOpcode() == ISD::ATOMIC_SWAP_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_32 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_32 ||
+ N->getOpcode() == ISD::ATOMIC_CMP_SWAP_64 ||
+ N->getOpcode() == ISD::ATOMIC_SWAP_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_64 ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_64;
}
};
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index bb3105b5e8..c0c153c2fe 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -1386,7 +1386,7 @@ private:
MVT TransformToType[MVT::LAST_VALUETYPE];
// Defines the capacity of the TargetLowering::OpActions table
- static const int OpActionsCapacity = 176;
+ static const int OpActionsCapacity = 212;
/// OpActions - For each operation and each value type, keep a LegalizeAction
/// that indicates how instruction selection should deal with the operation.
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index f779894529..91c65e8fbe 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -1181,7 +1181,10 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
break;
}
- case ISD::ATOMIC_CMP_SWAP: {
+ case ISD::ATOMIC_CMP_SWAP_8:
+ case ISD::ATOMIC_CMP_SWAP_16:
+ case ISD::ATOMIC_CMP_SWAP_32:
+ case ISD::ATOMIC_CMP_SWAP_64: {
unsigned int num_operands = 4;
assert(Node->getNumOperands() == num_operands && "Invalid Atomic node!");
SDValue Ops[4];
@@ -1201,17 +1204,50 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1));
return Result.getValue(Op.getResNo());
}
- case ISD::ATOMIC_LOAD_ADD:
- case ISD::ATOMIC_LOAD_SUB:
- case ISD::ATOMIC_LOAD_AND:
- case ISD::ATOMIC_LOAD_OR:
- case ISD::ATOMIC_LOAD_XOR:
- case ISD::ATOMIC_LOAD_NAND:
- case ISD::ATOMIC_LOAD_MIN:
- case ISD::ATOMIC_LOAD_MAX:
- case ISD::ATOMIC_LOAD_UMIN:
- case ISD::ATOMIC_LOAD_UMAX:
- case ISD::ATOMIC_SWAP: {
+ case ISD::ATOMIC_LOAD_ADD_8:
+ case ISD::ATOMIC_LOAD_SUB_8:
+ case ISD::ATOMIC_LOAD_AND_8:
+ case ISD::ATOMIC_LOAD_OR_8:
+ case ISD::ATOMIC_LOAD_XOR_8:
+ case ISD::ATOMIC_LOAD_NAND_8:
+ case ISD::ATOMIC_LOAD_MIN_8:
+ case ISD::ATOMIC_LOAD_MAX_8:
+ case ISD::ATOMIC_LOAD_UMIN_8:
+ case ISD::ATOMIC_LOAD_UMAX_8:
+ case ISD::ATOMIC_SWAP_8:
+ case ISD::ATOMIC_LOAD_ADD_16:
+ case ISD::ATOMIC_LOAD_SUB_16:
+ case ISD::ATOMIC_LOAD_AND_16:
+ case ISD::ATOMIC_LOAD_OR_16:
+ case ISD::ATOMIC_LOAD_XOR_16:
+ case ISD::ATOMIC_LOAD_NAND_16:
+ case ISD::ATOMIC_LOAD_MIN_16:
+ case ISD::ATOMIC_LOAD_MAX_16:
+ case ISD::ATOMIC_LOAD_UMIN_16:
+ case ISD::ATOMIC_LOAD_UMAX_16:
+ case ISD::ATOMIC_SWAP_16:
+ case ISD::ATOMIC_LOAD_ADD_32:
+ case ISD::ATOMIC_LOAD_SUB_32:
+ case ISD::ATOMIC_LOAD_AND_32:
+ case ISD::ATOMIC_LOAD_OR_32:
+ case ISD::ATOMIC_LOAD_XOR_32:
+ case ISD::ATOMIC_LOAD_NAND_32:
+ case ISD::ATOMIC_LOAD_MIN_32:
+ case ISD::ATOMIC_LOAD_MAX_32:
+ case ISD::ATOMIC_LOAD_UMIN_32:
+ case ISD::ATOMIC_LOAD_UMAX_32:
+ case ISD::ATOMIC_SWAP_32:
+ case ISD::ATOMIC_LOAD_ADD_64:
+ case ISD::ATOMIC_LOAD_SUB_64:
+ case ISD::ATOMIC_LOAD_AND_64:
+ case ISD::ATOMIC_LOAD_OR_64:
+ case ISD::ATOMIC_LOAD_XOR_64:
+ case ISD::ATOMIC_LOAD_NAND_64:
+ case ISD::ATOMIC_LOAD_MIN_64:
+ case ISD::ATOMIC_LOAD_MAX_64:
+ case ISD::ATOMIC_LOAD_UMIN_64:
+ case ISD::ATOMIC_LOAD_UMAX_64:
+ case ISD::ATOMIC_SWAP_64: {
unsigned int num_operands = 3;
assert(Node->getNumOperands() == num_operands && "Invalid Atomic node!");
SDValue Ops[3];
@@ -4155,7 +4191,10 @@ SDValue SelectionDAGLegalize::PromoteOp(SDValue Op) {
break;
}
- case ISD::ATOMIC_CMP_SWAP: {
+ case ISD::ATOMIC_CMP_SWAP_8:
+ case ISD::ATOMIC_CMP_SWAP_16:
+ case ISD::ATOMIC_CMP_SWAP_32:
+ case ISD::ATOMIC_CMP_SWAP_64: {
AtomicSDNode* AtomNode = cast<AtomicSDNode>(Node);
Tmp2 = PromoteOp(Node->getOperand(2));
Tmp3 = PromoteOp(Node->getOperand(3));
@@ -4167,17 +4206,50 @@ SDValue SelectionDAGLegalize::PromoteOp(SDValue Op) {
AddLegalizedOperand(Op.getValue(1), LegalizeOp(Result.getValue(1)));
break;
}
- case ISD::ATOMIC_LOAD_ADD:
- case ISD::ATOMIC_LOAD_SUB:
- case ISD::ATOMIC_LOAD_AND:
- case ISD::ATOMIC_LOAD_OR:
- case ISD::ATOMIC_LOAD_XOR:
- case ISD::ATOMIC_LOAD_NAND:
- case ISD::ATOMIC_LOAD_MIN:
- case ISD::ATOMIC_LOAD_MAX:
- case ISD::ATOMIC_LOAD_UMIN:
- case ISD::ATOMIC_LOAD_UMAX:
- case ISD::ATOMIC_SWAP: {
+ case ISD::ATOMIC_LOAD_ADD_8:
+ case ISD::ATOMIC_LOAD_SUB_8:
+ case ISD::ATOMIC_LOAD_AND_8:
+ case ISD::ATOMIC_LOAD_OR_8:
+ case ISD::ATOMIC_LOAD_XOR_8:
+ case ISD::ATOMIC_LOAD_NAND_8:
+ case ISD::ATOMIC_LOAD_MIN_8:
+ case ISD::ATOMIC_LOAD_MAX_8:
+ case ISD::ATOMIC_LOAD_UMIN_8:
+ case ISD::ATOMIC_LOAD_UMAX_8:
+ case ISD::ATOMIC_SWAP_8:
+ case ISD::ATOMIC_LOAD_ADD_16:
+ case ISD::ATOMIC_LOAD_SUB_16:
+ case ISD::ATOMIC_LOAD_AND_16:
+ case ISD::ATOMIC_LOAD_OR_16:
+ case ISD::ATOMIC_LOAD_XOR_16:
+ case ISD::ATOMIC_LOAD_NAND_16:
+ case ISD::ATOMIC_LOAD_MIN_16:
+ case ISD::ATOMIC_LOAD_MAX_16:
+ case ISD::ATOMIC_LOAD_UMIN_16:
+ case ISD::ATOMIC_LOAD_UMAX_16:
+ case ISD::ATOMIC_SWAP_16:
+ case ISD::ATOMIC_LOAD_ADD_32:
+ case ISD::ATOMIC_LOAD_SUB_32:
+ case ISD::ATOMIC_LOAD_AND_32:
+ case ISD::ATOMIC_LOAD_OR_32:
+ case ISD::ATOMIC_LOAD_XOR_32:
+ case ISD::ATOMIC_LOAD_NAND_32:
+ case ISD::ATOMIC_LOAD_MIN_32:
+ case ISD::ATOMIC_LOAD_MAX_32:
+ case ISD::ATOMIC_LOAD_UMIN_32:
+ case ISD::ATOMIC_LOAD_UMAX_32:
+ case ISD::ATOMIC_SWAP_32:
+ case ISD::ATOMIC_LOAD_ADD_64:
+ case ISD::ATOMIC_LOAD_SUB_64:
+ case ISD::ATOMIC_LOAD_AND_64:
+ case ISD::ATOMIC_LOAD_OR_64:
+ case ISD::ATOMIC_LOAD_XOR_64:
+ case ISD::ATOMIC_LOAD_NAND_64:
+ case ISD::ATOMIC_LOAD_MIN_64:
+ case ISD::ATOMIC_LOAD_MAX_64:
+ case ISD::ATOMIC_LOAD_UMIN_64:
+ case ISD::ATOMIC_LOAD_UMAX_64:
+ case ISD::ATOMIC_SWAP_64: {
AtomicSDNode* AtomNode = cast<AtomicSDNode>(Node);
Tmp2 = PromoteOp(Node->getOperand(2));
Result = DAG.getAtomic(Node->getOpcode(), AtomNode->getChain(),
@@ -6092,7 +6164,11 @@ void SelectionDAGLegalize::ExpandOp(SDValue Op, SDValue &Lo, SDValue &Hi){
break;
}
- case ISD::ATOMIC_CMP_SWAP: {
+ // FIXME: should the LOAD_BIN and SWAP atomics get here too? Probably.
+ case ISD::ATOMIC_CMP_SWAP_8:
+ case ISD::ATOMIC_CMP_SWAP_16:
+ case ISD::ATOMIC_CMP_SWAP_32:
+ case ISD::ATOMIC_CMP_SWAP_64: {
SDValue Tmp = TLI.LowerOperation(Op, DAG);
assert(Tmp.Val && "Node must be custom expanded!");
ExpandOp(Tmp.getValue(0), Lo, Hi);
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 0bd1a4d2d0..45026dca41 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -439,18 +439,54 @@ static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
ID.AddInteger(ST->getRawFlags());
break;
}
- case ISD::ATOMIC_CMP_SWAP:
- case ISD::ATOMIC_LOAD_ADD:
- case ISD::ATOMIC_SWAP:
- case ISD::ATOMIC_LOAD_SUB:
- case ISD::ATOMIC_LOAD_AND:
- case ISD::ATOMIC_LOAD_OR:
- case ISD::ATOMIC_LOAD_XOR:
- case ISD::ATOMIC_LOAD_NAND:
- case ISD::ATOMIC_LOAD_MIN:
- case ISD::ATOMIC_LOAD_MAX:
- case ISD::ATOMIC_LOAD_UMIN:
- case ISD::ATOMIC_LOAD_UMAX: {
+ case ISD::ATOMIC_CMP_SWAP_8:
+ case ISD::ATOMIC_SWAP_8:
+ case ISD::ATOMIC_LOAD_ADD_8:
+ case ISD::ATOMIC_LOAD_SUB_8:
+ case ISD::ATOMIC_LOAD_AND_8:
+ case ISD::ATOMIC_LOAD_OR_8:
+ case ISD::ATOMIC_LOAD_XOR_8:
+ case ISD::ATOMIC_LOAD_NAND_8:
+ case ISD::ATOMIC_LOAD_MIN_8:
+ case ISD::ATOMIC_LOAD_MAX_8:
+ case ISD::ATOMIC_LOAD_UMIN_8:
+ case ISD::ATOMIC_LOAD_UMAX_8:
+ case ISD::ATOMIC_CMP_SWAP_16:
+ case ISD::ATOMIC_SWAP_16:
+ case ISD::ATOMIC_LOAD_ADD_16:
+ case ISD::ATOMIC_LOAD_SUB_16:
+ case ISD::ATOMIC_LOAD_AND_16:
+ case ISD::ATOMIC_LOAD_OR_16:
+ case ISD::ATOMIC_LOAD_XOR_16:
+ case ISD::ATOMIC_LOAD_NAND_16:
+ case ISD::ATOMIC_LOAD_MIN_16:
+ case ISD::ATOMIC_LOAD_MAX_16:
+ case ISD::ATOMIC_LOAD_UMIN_16:
+ case ISD::ATOMIC_LOAD_UMAX_16:
+ case ISD::ATOMIC_CMP_SWAP_32:
+ case ISD::ATOMIC_SWAP_32:
+ case ISD::ATOMIC_LOAD_ADD_32:
+ case ISD::ATOMIC_LOAD_SUB_32:
+ case ISD::ATOMIC_LOAD_AND_32:
+ case ISD::ATOMIC_LOAD_OR_32:
+ case ISD::ATOMIC_LOAD_XOR_32:
+ case ISD::ATOMIC_LOAD_NAND_32:
+ case ISD::ATOMIC_LOAD_MIN_32:
+ case ISD::ATOMIC_LOAD_MAX_32:
+ case ISD::ATOMIC_LOAD_UMIN_32:
+ case ISD::ATOMIC_LOAD_UMAX_32:
+ case ISD::ATOMIC_CMP_SWAP_64:
+ case ISD::ATOMIC_SWAP_64:
+ case ISD::ATOMIC_LOAD_ADD_64:
+ case ISD::ATOMIC_LOAD_SUB_64:
+ case ISD::ATOMIC_LOAD_AND_64:
+ case ISD::ATOMIC_LOAD_OR_64:
+ case ISD::ATOMIC_LOAD_XOR_64:
+ case ISD::ATOMIC_LOAD_NAND_64:
+ case ISD::ATOMIC_LOAD_MIN_64:
+ case ISD::ATOMIC_LOAD_MAX_64:
+ case ISD::ATOMIC_LOAD_UMIN_64:
+ case ISD::ATOMIC_LOAD_UMAX_64: {
const AtomicSDNode *AT = cast<AtomicSDNode>(N);
ID.AddInteger(AT->getRawFlags());
break;
@@ -3149,7 +3185,10 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, SDValue Chain,
SDValue Ptr, SDValue Cmp,
SDValue Swp, const Value* PtrVal,
unsigned Alignment) {
- assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
+ assert((Opcode == ISD::ATOMIC_CMP_SWAP_8 ||
+ Opcode == ISD::ATOMIC_CMP_SWAP_16 ||
+ Opcode == ISD::ATOMIC_CMP_SWAP_32 ||
+ Opcode == ISD::ATOMIC_CMP_SWAP_64) && "Invalid Atomic Op");
assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
MVT VT = Cmp.getValueType();
@@ -3175,13 +3214,50 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, SDValue Chain,
SDValue Ptr, SDValue Val,
const Value* PtrVal,
unsigned Alignment) {
- assert(( Opcode == ISD::ATOMIC_LOAD_ADD || Opcode == ISD::ATOMIC_LOAD_SUB
- || Opcode == ISD::ATOMIC_SWAP || Opcode == ISD::ATOMIC_LOAD_AND
- || Opcode == ISD::ATOMIC_LOAD_OR || Opcode == ISD::ATOMIC_LOAD_XOR
- || Opcode == ISD::ATOMIC_LOAD_NAND
- || Opcode == ISD::ATOMIC_LOAD_MIN || Opcode == ISD::ATOMIC_LOAD_MAX
- || Opcode == ISD::ATOMIC_LOAD_UMIN || Opcode == ISD::ATOMIC_LOAD_UMAX)
- && "Invalid Atomic Op");
+ assert((Opcode == ISD::ATOMIC_LOAD_ADD_8 ||
+ Opcode == ISD::ATOMIC_LOAD_SUB_8 ||
+ Opcode == ISD::ATOMIC_LOAD_AND_8 ||
+ Opcode == ISD::ATOMIC_LOAD_OR_8 ||
+ Opcode == ISD::ATOMIC_LOAD_XOR_8 ||
+ Opcode == ISD::ATOMIC_LOAD_NAND_8 ||
+ Opcode == ISD::ATOMIC_LOAD_MIN_8 ||
+ Opcode == ISD::ATOMIC_LOAD_MAX_8 ||
+ Opcode == ISD::ATOMIC_LOAD_UMIN_8 ||
+ Opcode == ISD::ATOMIC_LOAD_UMAX_8 ||
+ Opcode == ISD::ATOMIC_SWAP_8 ||
+ Opcode == ISD::ATOMIC_LOAD_ADD_16 ||
+ Opcode == ISD::ATOMIC_LOAD_SUB_16 ||
+ Opcode == ISD::ATOMIC_LOAD_AND_16 ||
+ Opcode == ISD::ATOMIC_LOAD_OR_16 ||
+ Opcode == ISD::ATOMIC_LOAD_XOR_16 ||
+ Opcode == ISD::ATOMIC_LOAD_NAND_16 ||
+ Opcode == ISD::ATOMIC_LOAD_MIN_16 ||
+ Opcode == ISD::ATOMIC_LOAD_MAX_16 ||
+ Opcode == ISD::ATOMIC_LOAD_UMIN_16 ||
+ Opcode == ISD::ATOMIC_LOAD_UMAX_16 ||
+ Opcode == ISD::ATOMIC_SWAP_16 ||
+ Opcode == ISD::ATOMIC_LOAD_ADD_32 ||
+ Opcode == ISD::ATOMIC_LOAD_SUB_32 ||
+ Opcode == ISD::ATOMIC_LOAD_AND_32 ||
+ Opcode == ISD::ATOMIC_LOAD_OR_32 ||
+ Opcode == ISD::ATOMIC_LOAD_XOR_32 ||
+ Opcode == ISD::ATOMIC_LOAD_NAND_32 ||
+ Opcode == ISD::ATOMIC_LOAD_MIN_32 ||
+ Opcode == ISD::ATOMIC_LOAD_MAX_32 ||
+ Opcode == ISD::ATOMIC_LOAD_UMIN_32 ||
+ Opcode == ISD::ATOMIC_LOAD_UMAX_32 ||
+ Opcode == ISD::ATOMIC_SWAP_32 ||
+ Opcode == ISD::ATOMIC_LOAD_ADD_64 ||
+ Opcode == ISD::ATOMIC_LOAD_SUB_64 ||
+ Opcode == ISD::ATOMIC_LOAD_AND_64 ||
+ Opcode == ISD::ATOMIC_LOAD_OR_64 ||
+ Opcode == ISD::ATOMIC_LOAD_XOR_64 ||
+ Opcode == ISD::ATOMIC_LOAD_NAND_64 ||
+ Opcode == ISD::ATOMIC_LOAD_MIN_64 ||
+ Opcode == ISD::ATOMIC_LOAD_MAX_64 ||
+ Opcode == ISD::ATOMIC_LOAD_UMIN_64 ||
+ Opcode == ISD::ATOMIC_LOAD_UMAX_64 ||
+ Opcode == ISD::ATOMIC_SWAP_64) && "Invalid Atomic Op");
MVT VT = Val.getValueType();
@@ -4721,18 +4797,54 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
#endif
case ISD::PREFETCH: return "Prefetch";
case ISD::MEMBARRIER: return "MemBarrier";
- case ISD::ATOMIC_CMP_SWAP: return "AtomicCmpSwap";
- case ISD::ATOMIC_LOAD_ADD: return "AtomicLoadAdd";
- case ISD::ATOMIC_LOAD_SUB: return "AtomicLoadSub";
- case ISD::ATOMIC_LOAD_AND: return "AtomicLoadAnd";
- case ISD::ATOMIC_LOAD_OR: return "AtomicLoadOr";
- case ISD::ATOMIC_LOAD_XOR: return "AtomicLoadXor";
- case ISD::ATOMIC_LOAD_NAND: return "AtomicLoadNand";
- case ISD::ATOMIC_LOAD_MIN: return "AtomicLoadMin";
- case ISD::ATOMIC_LOAD_MAX: return "AtomicLoadMax";
- case ISD::ATOMIC_LOAD_UMIN: return "AtomicLoadUMin";
- case ISD::ATOMIC_LOAD_UMAX: return "AtomicLoadUMax";
- case ISD::ATOMIC_SWAP: return "AtomicSWAP";
+ case ISD::ATOMIC_CMP_SWAP_8: return "AtomicCmpSwap8";
+ case ISD::ATOMIC_SWAP_8: return "AtomicSwap8";
+ case ISD::ATOMIC_LOAD_ADD_8: return "AtomicLoadAdd8";
+ case ISD::ATOMIC_LOAD_SUB_8: return "AtomicLoadSub8";
+ case ISD::ATOMIC_LOAD_AND_8: return "AtomicLoadAnd8";
+ case ISD::ATOMIC_LOAD_OR_8: return "AtomicLoadOr8";
+ case ISD::ATOMIC_LOAD_XOR_8: return "AtomicLoadXor8";
+ case ISD::ATOMIC_LOAD_NAND_8: return "AtomicLoadNand8";
+ case ISD::ATOMIC_LOAD_MIN_8: return "AtomicLoadMin8";
+ case ISD::ATOMIC_LOAD_MAX_8: return "AtomicLoadMax8";
+ case ISD::ATOMIC_LOAD_UMIN_8: return "AtomicLoadUMin8";
+ case ISD::ATOMIC_LOAD_UMAX_8: return "AtomicLoadUMax8";
+ case ISD::ATOMIC_CMP_SWAP_16: return "AtomicCmpSwap16";
+ case ISD::ATOMIC_SWAP_16: return "AtomicSwap16";
+ case ISD::ATOMIC_LOAD_ADD_16: return "AtomicLoadAdd16";
+ case ISD::ATOMIC_LOAD_SUB_16: return "AtomicLoadSub16";
+ case ISD::ATOMIC_LOAD_AND_16: return "AtomicLoadAnd16";
+ case ISD::ATOMIC_LOAD_OR_16: return "AtomicLoadOr16";
+ case ISD::ATOMIC_LOAD_XOR_16: return "AtomicLoadXor16";
+ case ISD::ATOMIC_LOAD_NAND_16: return "AtomicLoadNand16";
+ case ISD::ATOMIC_LOAD_MIN_16: return "AtomicLoadMin16";
+ case ISD::ATOMIC_LOAD_MAX_16: return "AtomicLoadMax16";
+ case ISD::ATOMIC_LOAD_UMIN_16: return "AtomicLoadUMin16";
+ case ISD::ATOMIC_LOAD_UMAX_16: return "AtomicLoadUMax16";
+ case ISD::ATOMIC_CMP_SWAP_32: return "AtomicCmpSwap32";
+ case ISD::ATOMIC_SWAP_32: return "AtomicSwap32";
+ case ISD::ATOMIC_LOAD_ADD_32: return "AtomicLoadAdd32";
+ case ISD::ATOMIC_LOAD_SUB_32: return "AtomicLoadSub32";
+ case ISD::ATOMIC_LOAD_AND_32: return "AtomicLoadAnd32";
+ case ISD::ATOMIC_LOAD_OR_32: return "AtomicLoadOr32";
+ case ISD::ATOMIC_LOAD_XOR_32: return "AtomicLoadXor32";
+ case ISD::ATOMIC_LOAD_NAND_32: return "AtomicLoadNand32";
+ case ISD::ATOMIC_LOAD_MIN_32: return "AtomicLoadMin32";
+ case ISD::ATOMIC_LOAD_MAX_32: return "AtomicLoadMax32";
+ case ISD::ATOMIC_LOAD_UMIN_32: return "AtomicLoadUMin32";
+ case ISD::ATOMIC_LOAD_UMAX_32: return "AtomicLoadUMax32";
+ case ISD::ATOMIC_CMP_SWAP_64: return "AtomicCmpSwap64";
+ case ISD::ATOMIC_SWAP_64: return "AtomicSwap64";
+ case ISD::ATOMIC_LOAD_ADD_64: return "AtomicLoadAdd64";
+ case ISD::ATOMIC_LOAD_SUB_64: return "AtomicLoadSub64";
+ case ISD::ATOMIC_LOAD_AND_64: return "AtomicLoadAnd64";
+ case ISD::ATOMIC_LOAD_OR_64: return "AtomicLoadOr64";
+ case ISD::ATOMIC_LOAD_XOR_64: return "AtomicLoadXor64";
+ case ISD::ATOMIC_LOAD_NAND_64: return "AtomicLoadNand64";
+ case ISD::ATOMIC_LOAD_MIN_64: return "AtomicLoadMin64";
+ case ISD::ATOMIC_LOAD_MAX_64: return "AtomicLoadMax64";
+ case ISD::ATOMIC_LOAD_UMIN_64: return "AtomicLoadUMin64";
+ case ISD::ATOMIC_LOAD_UMAX_64: return "AtomicLoadUMax64";
case ISD::PCMARKER: return "PCMarker";
case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
case ISD::SRCVALUE: return "SrcValue";
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index e0ecda4613..83fe4f5bfc 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -3664,37 +3664,198 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::atomic_cmp_swap: {
SDValue Root = getRoot();
- SDValue L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, Root,
- getValue(I.getOperand(1)),
- getValue(I.getOperand(2)),
- getValue(I.getOperand(3)),
- I.getOperand(1));
+ SDValue L;
+ switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
+ case MVT::i8:
+ L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_8, Root,
+ getValue(I.getOperand(1)),
+ getValue(I.getOperand(2)),
+ getValue(I.getOperand(3)),
+ I.getOperand(1));
+ break;
+ case MVT::i16:
+ L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_16, Root,
+ getValue(I.getOperand(1)),
+ getValue(I.getOperand(2)),
+ getValue(I.getOperand(3)),
+ I.getOperand(1));
+ break;
+ case MVT::i32:
+ L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_32, Root,
+ getValue(I.getOperand(1)),
+ getValue(I.getOperand(2)),
+ getValue(I.getOperand(3)),
+ I.getOperand(1));
+ break;
+ case MVT::i64:
+ L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_64, Root,
+ getValue(I.getOperand(1)),
+ getValue(I.getOperand(2)),
+ getValue(I.getOperand(3)),
+ I.getOperand(1));
+ break;
+ default:
+ assert(0 && "Invalid atomic type");
+ abort();
+ }
setValue(&I, L);
DAG.setRoot(L.getValue(1));
return 0;
}
case Intrinsic::atomic_load_add:
- return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
+ switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
+ case MVT::i8:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_8);
+ case MVT::i16:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_16);
+ case MVT::i32:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_32);
+ case MVT::i64:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_64);
+ default:
+ assert(0 && "Invalid atomic type");
+ abort();
+ }
case Intrinsic::atomic_load_sub:
- return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
- case Intrinsic::atomic_load_and:
- return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
+ switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
+ case MVT::i8:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_8);
+ case MVT::i16:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_16);
+ case MVT::i32:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_32);
+ case MVT::i64:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_64);
+ default:
+ assert(0 && "Invalid atomic type");
+ abort();
+ }
case Intrinsic::atomic_load_or:
- return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
+ switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
+ case MVT::i8:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_8);
+ case MVT::i16:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_16);
+ case MVT::i32:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_32);
+ case MVT::i64:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_64);
+ default:
+ assert(0 && "Invalid atomic type");
+ abort();
+ }
case Intrinsic::atomic_load_xor:
- return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
+ switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
+ case MVT::i8:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_8);
+ case MVT::i16:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_16);
+ case MVT::i32:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_32);
+ case MVT::i64:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_64);
+ default:
+ assert(0 && "Invalid atomic type");
+ abort();
+ }
+ case Intrinsic::atomic_load_and:
+ switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
+ case MVT::i8:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_8);
+ case MVT::i16:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_16);
+ case MVT::i32:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_32);
+ case MVT::i64:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_64);
+ default:
+ assert(0 && "Invalid atomic type");
+ abort();
+ }
case Intrinsic::atomic_load_nand:
- return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
- case Intrinsic::atomic_load_min:
- return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
+ switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
+ case MVT::i8:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_8);
+ case MVT::i16:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_16);
+ case MVT::i32:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_32);
+ case MVT::i64:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_64);
+ default:
+ assert(0 && "Invalid atomic type");
+ abort();
+ }
case Intrinsic::atomic_load_max:
- return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
+ switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
+ case MVT::i8:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_8);
+ case MVT::i16:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_16);
+ case MVT::i32:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_32);
+ case MVT::i64:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_64);
+ default:
+ assert(0 && "Invalid atomic type");
+ abort();
+ }
+ case Intrinsic::atomic_load_min:
+ switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
+ case MVT::i8:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_8);
+ case MVT::i16:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_16);
+ case MVT::i32:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_32);
+ case MVT::i64:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_64);
+ default:
+ assert(0 && "Invalid atomic type");
+ abort();
+ }
case Intrinsic::atomic_load_umin:
- return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
+ switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
+ case MVT::i8:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_8);
+ case MVT::i16:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_16);
+ case MVT::i32:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_32);
+ case MVT::i64:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_64);
+ default:
+ assert(0 && "Invalid atomic type");
+ abort();
+ }
case Intrinsic::atomic_load_umax:
- return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
+ switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
+ case MVT::i8:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_8);
+ case MVT::i16:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_16);
+ case MVT::i32:
+ return implVisitBinaryAtomic(I, ISD::ATO