aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/AsmParser/LLLexer.cpp5
-rw-r--r--lib/AsmParser/LLParser.cpp97
-rw-r--r--lib/AsmParser/LLParser.h2
-rw-r--r--lib/AsmParser/LLToken.h6
-rw-r--r--lib/Bitcode/Reader/BitcodeReader.cpp59
-rw-r--r--lib/Bitcode/Writer/BitcodeWriter.cpp39
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h2
-rw-r--r--lib/VMCore/AsmWriter.cpp25
-rw-r--r--lib/VMCore/Instruction.cpp2
-rw-r--r--lib/VMCore/Instructions.cpp111
-rw-r--r--lib/VMCore/Verifier.cpp36
12 files changed, 389 insertions, 1 deletions
diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp
index 970d7aa7ed..d16cac1af2 100644
--- a/lib/AsmParser/LLLexer.cpp
+++ b/lib/AsmParser/LLLexer.cpp
@@ -579,6 +579,9 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(oeq); KEYWORD(one); KEYWORD(olt); KEYWORD(ogt); KEYWORD(ole);
KEYWORD(oge); KEYWORD(ord); KEYWORD(uno); KEYWORD(ueq); KEYWORD(une);
+ KEYWORD(xchg); KEYWORD(nand); KEYWORD(max); KEYWORD(min); KEYWORD(umax);
+ KEYWORD(umin);
+
KEYWORD(x);
KEYWORD(blockaddress);
@@ -645,6 +648,8 @@ lltok::Kind LLLexer::LexIdentifier() {
INSTKEYWORD(alloca, Alloca);
INSTKEYWORD(load, Load);
INSTKEYWORD(store, Store);
+ INSTKEYWORD(cmpxchg, AtomicCmpXchg);
+ INSTKEYWORD(atomicrmw, AtomicRMW);
INSTKEYWORD(fence, Fence);
INSTKEYWORD(getelementptr, GetElementPtr);
diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp
index fe4bb2e637..f412c1c89a 100644
--- a/lib/AsmParser/LLParser.cpp
+++ b/lib/AsmParser/LLParser.cpp
@@ -2952,12 +2952,18 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
case lltok::kw_alloca: return ParseAlloc(Inst, PFS);
case lltok::kw_load: return ParseLoad(Inst, PFS, false);
case lltok::kw_store: return ParseStore(Inst, PFS, false);
+ case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS, false);
+ case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS, false);
case lltok::kw_fence: return ParseFence(Inst, PFS);
case lltok::kw_volatile:
if (EatIfPresent(lltok::kw_load))
return ParseLoad(Inst, PFS, true);
else if (EatIfPresent(lltok::kw_store))
return ParseStore(Inst, PFS, true);
+ else if (EatIfPresent(lltok::kw_cmpxchg))
+ return ParseCmpXchg(Inst, PFS, true);
+ else if (EatIfPresent(lltok::kw_atomicrmw))
+ return ParseAtomicRMW(Inst, PFS, true);
else
return TokError("expected 'load' or 'store'");
case lltok::kw_getelementptr: return ParseGetElementPtr(Inst, PFS);
@@ -3725,6 +3731,97 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
return AteExtraComma ? InstExtraComma : InstNormal;
}
+/// ParseCmpXchg
+/// ::= 'volatile'? 'cmpxchg' TypeAndValue ',' TypeAndValue ',' TypeAndValue
+/// 'singlethread'? AtomicOrdering
+int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS,
+ bool isVolatile) {
+ Value *Ptr, *Cmp, *New; LocTy PtrLoc, CmpLoc, NewLoc;
+ bool AteExtraComma = false;
+ AtomicOrdering Ordering = NotAtomic;
+ SynchronizationScope Scope = CrossThread;
+ if (ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
+ ParseToken(lltok::comma, "expected ',' after cmpxchg address") ||
+ ParseTypeAndValue(Cmp, CmpLoc, PFS) ||
+ ParseToken(lltok::comma, "expected ',' after cmpxchg cmp operand") ||
+ ParseTypeAndValue(New, NewLoc, PFS) ||
+ ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
+ return true;
+
+ if (Ordering == Unordered)
+ return TokError("cmpxchg cannot be unordered");
+ if (!Ptr->getType()->isPointerTy())
+ return Error(PtrLoc, "cmpxchg operand must be a pointer");
+ if (cast<PointerType>(Ptr->getType())->getElementType() != Cmp->getType())
+ return Error(CmpLoc, "compare value and pointer type do not match");
+ if (cast<PointerType>(Ptr->getType())->getElementType() != New->getType())
+ return Error(NewLoc, "new value and pointer type do not match");
+ if (!New->getType()->isIntegerTy())
+ return Error(NewLoc, "cmpxchg operand must be an integer");
+ unsigned Size = New->getType()->getPrimitiveSizeInBits();
+ if (Size < 8 || (Size & (Size - 1)))
+ return Error(NewLoc, "cmpxchg operand must be power-of-two byte-sized"
+ " integer");
+
+ AtomicCmpXchgInst *CXI =
+ new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, Scope);
+ CXI->setVolatile(isVolatile);
+ Inst = CXI;
+ return AteExtraComma ? InstExtraComma : InstNormal;
+}
+
+/// ParseAtomicRMW
+/// ::= 'volatile'? 'atomicrmw' BinOp TypeAndValue ',' TypeAndValue
+/// 'singlethread'? AtomicOrdering
+int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS,
+ bool isVolatile) {
+ Value *Ptr, *Val; LocTy PtrLoc, ValLoc;
+ bool AteExtraComma = false;
+ AtomicOrdering Ordering = NotAtomic;
+ SynchronizationScope Scope = CrossThread;
+ AtomicRMWInst::BinOp Operation;
+ switch (Lex.getKind()) {
+ default: return TokError("expected binary operation in atomicrmw");
+ case lltok::kw_xchg: Operation = AtomicRMWInst::Xchg; break;
+ case lltok::kw_add: Operation = AtomicRMWInst::Add; break;
+ case lltok::kw_sub: Operation = AtomicRMWInst::Sub; break;
+ case lltok::kw_and: Operation = AtomicRMWInst::And; break;
+ case lltok::kw_nand: Operation = AtomicRMWInst::Nand; break;
+ case lltok::kw_or: Operation = AtomicRMWInst::Or; break;
+ case lltok::kw_xor: Operation = AtomicRMWInst::Xor; break;
+ case lltok::kw_max: Operation = AtomicRMWInst::Max; break;
+ case lltok::kw_min: Operation = AtomicRMWInst::Min; break;
+ case lltok::kw_umax: Operation = AtomicRMWInst::UMax; break;
+ case lltok::kw_umin: Operation = AtomicRMWInst::UMin; break;
+ }
+ Lex.Lex(); // Eat the operation.
+
+ if (ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
+ ParseToken(lltok::comma, "expected ',' after atomicrmw address") ||
+ ParseTypeAndValue(Val, ValLoc, PFS) ||
+ ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
+ return true;
+
+ if (Ordering == Unordered)
+ return TokError("atomicrmw cannot be unordered");
+ if (!Ptr->getType()->isPointerTy())
+ return Error(PtrLoc, "atomicrmw operand must be a pointer");
+ if (cast<PointerType>(Ptr->getType())->getElementType() != Val->getType())
+ return Error(ValLoc, "atomicrmw value and pointer type do not match");
+ if (!Val->getType()->isIntegerTy())
+ return Error(ValLoc, "atomicrmw operand must be an integer");
+ unsigned Size = Val->getType()->getPrimitiveSizeInBits();
+ if (Size < 8 || (Size & (Size - 1)))
+ return Error(ValLoc, "atomicrmw operand must be power-of-two byte-sized"
+ " integer");
+
+ AtomicRMWInst *RMWI =
+ new AtomicRMWInst(Operation, Ptr, Val, Ordering, Scope);
+ RMWI->setVolatile(isVolatile);
+ Inst = RMWI;
+ return AteExtraComma ? InstExtraComma : InstNormal;
+}
+
/// ParseFence
/// ::= 'fence' 'singlethread'? AtomicOrdering
int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) {
diff --git a/lib/AsmParser/LLParser.h b/lib/AsmParser/LLParser.h
index 6d2a929cc4..7fd01b6a21 100644
--- a/lib/AsmParser/LLParser.h
+++ b/lib/AsmParser/LLParser.h
@@ -365,6 +365,8 @@ namespace llvm {
int ParseAlloc(Instruction *&I, PerFunctionState &PFS);
int ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
int ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
+ int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
+ int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
int ParseFence(Instruction *&I, PerFunctionState &PFS);
int ParseGetElementPtr(Instruction *&I, PerFunctionState &PFS);
int ParseExtractValue(Instruction *&I, PerFunctionState &PFS);
diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h
index f4c834ac23..a9e79c542c 100644
--- a/lib/AsmParser/LLToken.h
+++ b/lib/AsmParser/LLToken.h
@@ -110,6 +110,9 @@ namespace lltok {
kw_uge, kw_oeq, kw_one, kw_olt, kw_ogt, kw_ole, kw_oge, kw_ord, kw_uno,
kw_ueq, kw_une,
+ // atomicrmw operations that aren't also instruction keywords.
+ kw_xchg, kw_nand, kw_max, kw_min, kw_umax, kw_umin,
+
// Instruction Opcodes (Opcode in UIntVal).
kw_add, kw_fadd, kw_sub, kw_fsub, kw_mul, kw_fmul,
kw_udiv, kw_sdiv, kw_fdiv,
@@ -126,7 +129,8 @@ namespace lltok {
kw_ret, kw_br, kw_switch, kw_indirectbr, kw_invoke, kw_unwind, kw_resume,
kw_unreachable,
- kw_alloca, kw_load, kw_store, kw_fence, kw_getelementptr,
+ kw_alloca, kw_load, kw_store, kw_fence, kw_cmpxchg, kw_atomicrmw,
+ kw_getelementptr,
kw_extractelement, kw_insertelement, kw_shufflevector,
kw_extractvalue, kw_insertvalue, kw_blockaddress,
diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp
index bab33ed257..e0af683a24 100644
--- a/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -131,6 +131,23 @@ static int GetDecodedBinaryOpcode(unsigned Val, Type *Ty) {
}
}
+static AtomicRMWInst::BinOp GetDecodedRMWOperation(unsigned Val) {
+ switch (Val) {
+ default: return AtomicRMWInst::BAD_BINOP;
+ case bitc::RMW_XCHG: return AtomicRMWInst::Xchg;
+ case bitc::RMW_ADD: return AtomicRMWInst::Add;
+ case bitc::RMW_SUB: return AtomicRMWInst::Sub;
+ case bitc::RMW_AND: return AtomicRMWInst::And;
+ case bitc::RMW_NAND: return AtomicRMWInst::Nand;
+ case bitc::RMW_OR: return AtomicRMWInst::Or;
+ case bitc::RMW_XOR: return AtomicRMWInst::Xor;
+ case bitc::RMW_MAX: return AtomicRMWInst::Max;
+ case bitc::RMW_MIN: return AtomicRMWInst::Min;
+ case bitc::RMW_UMAX: return AtomicRMWInst::UMax;
+ case bitc::RMW_UMIN: return AtomicRMWInst::UMin;
+ }
+}
+
static AtomicOrdering GetDecodedOrdering(unsigned Val) {
switch (Val) {
case bitc::ORDERING_NOTATOMIC: return NotAtomic;
@@ -2595,6 +2612,48 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
InstructionList.push_back(I);
break;
}
+ case bitc::FUNC_CODE_INST_CMPXCHG: {
+ // CMPXCHG:[ptrty, ptr, cmp, new, vol, ordering, synchscope]
+ unsigned OpNum = 0;
+ Value *Ptr, *Cmp, *New;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
+ getValue(Record, OpNum,
+ cast<PointerType>(Ptr->getType())->getElementType(), Cmp) ||
+ getValue(Record, OpNum,
+ cast<PointerType>(Ptr->getType())->getElementType(), New) ||
+ OpNum+3 != Record.size())
+ return Error("Invalid CMPXCHG record");
+ AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+1]);
+ if (Ordering == NotAtomic)
+ return Error("Invalid CMPXCHG record");
+ SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+2]);
+ I = new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, SynchScope);
+ cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]);
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_ATOMICRMW: {
+ // ATOMICRMW:[ptrty, ptr, val, op, vol, ordering, synchscope]
+ unsigned OpNum = 0;
+ Value *Ptr, *Val;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
+ getValue(Record, OpNum,
+ cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
+ OpNum+4 != Record.size())
+ return Error("Invalid ATOMICRMW record");
+ AtomicRMWInst::BinOp Operation = GetDecodedRMWOperation(Record[OpNum]);
+ if (Operation < AtomicRMWInst::FIRST_BINOP ||
+ Operation > AtomicRMWInst::LAST_BINOP)
+ return Error("Invalid ATOMICRMW record");
+ AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
+ if (Ordering == NotAtomic)
+ return Error("Invalid ATOMICRMW record");
+ SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
+ I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);
+ cast<AtomicRMWInst>(I)->setVolatile(Record[OpNum+1]);
+ InstructionList.push_back(I);
+ break;
+ }
case bitc::FUNC_CODE_INST_FENCE: { // FENCE:[ordering, synchscope]
if (2 != Record.size())
return Error("Invalid FENCE record");
diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp
index 87154fc9c6..8fcaf1111f 100644
--- a/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -101,6 +101,23 @@ static unsigned GetEncodedBinaryOpcode(unsigned Opcode) {
}
}
+static unsigned GetEncodedRMWOperation(AtomicRMWInst::BinOp Op) {
+ switch (Op) {
+ default: llvm_unreachable("Unknown RMW operation!");
+ case AtomicRMWInst::Xchg: return bitc::RMW_XCHG;
+ case AtomicRMWInst::Add: return bitc::RMW_ADD;
+ case AtomicRMWInst::Sub: return bitc::RMW_SUB;
+ case AtomicRMWInst::And: return bitc::RMW_AND;
+ case AtomicRMWInst::Nand: return bitc::RMW_NAND;
+ case AtomicRMWInst::Or: return bitc::RMW_OR;
+ case AtomicRMWInst::Xor: return bitc::RMW_XOR;
+ case AtomicRMWInst::Max: return bitc::RMW_MAX;
+ case AtomicRMWInst::Min: return bitc::RMW_MIN;
+ case AtomicRMWInst::UMax: return bitc::RMW_UMAX;
+ case AtomicRMWInst::UMin: return bitc::RMW_UMIN;
+ }
+}
+
static unsigned GetEncodedOrdering(AtomicOrdering Ordering) {
switch (Ordering) {
default: llvm_unreachable("Unknown atomic ordering");
@@ -1186,6 +1203,28 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
Vals.push_back(Log2_32(cast<StoreInst>(I).getAlignment())+1);
Vals.push_back(cast<StoreInst>(I).isVolatile());
break;
+ case Instruction::AtomicCmpXchg:
+ Code = bitc::FUNC_CODE_INST_CMPXCHG;
+ PushValueAndType(I.getOperand(0), InstID, Vals, VE); // ptrty + ptr
+ Vals.push_back(VE.getValueID(I.getOperand(1))); // cmp.
+ Vals.push_back(VE.getValueID(I.getOperand(2))); // newval.
+ Vals.push_back(cast<AtomicCmpXchgInst>(I).isVolatile());
+ Vals.push_back(GetEncodedOrdering(
+ cast<AtomicCmpXchgInst>(I).getOrdering()));
+ Vals.push_back(GetEncodedSynchScope(
+ cast<AtomicCmpXchgInst>(I).getSynchScope()));
+ break;
+ case Instruction::AtomicRMW:
+ Code = bitc::FUNC_CODE_INST_ATOMICRMW;
+ PushValueAndType(I.getOperand(0), InstID, Vals, VE); // ptrty + ptr
+ Vals.push_back(VE.getValueID(I.getOperand(1))); // val.
+ Vals.push_back(GetEncodedRMWOperation(
+ cast<AtomicRMWInst>(I).getOperation()));
+ Vals.push_back(cast<AtomicRMWInst>(I).isVolatile());
+ Vals.push_back(GetEncodedOrdering(cast<AtomicRMWInst>(I).getOrdering()));
+ Vals.push_back(GetEncodedSynchScope(
+ cast<AtomicRMWInst>(I).getSynchScope()));
+ break;
case Instruction::Fence:
Code = bitc::FUNC_CODE_INST_FENCE;
Vals.push_back(GetEncodedOrdering(cast<FenceInst>(I).getOrdering()));
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index c5c9790456..6740bacbff 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3222,6 +3222,12 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
DAG.setRoot(StoreNode);
}
+void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
+}
+
+void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
+}
+
void SelectionDAGBuilder::visitFence(const FenceInst &I) {
DebugLoc dl = getCurDebugLoc();
SDValue Ops[3];
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 11c4a48384..0360ad28fc 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -506,6 +506,8 @@ private:
void visitAlloca(const AllocaInst &I);
void visitLoad(const LoadInst &I);
void visitStore(const StoreInst &I);
+ void visitAtomicCmpXchg(const AtomicCmpXchgInst &I);
+ void visitAtomicRMW(const AtomicRMWInst &I);
void visitFence(const FenceInst &I);
void visitPHI(const PHINode &I);
void visitCall(const CallInst &I);
diff --git a/lib/VMCore/AsmWriter.cpp b/lib/VMCore/AsmWriter.cpp
index e6cd418c32..e3e2484def 100644
--- a/lib/VMCore/AsmWriter.cpp
+++ b/lib/VMCore/AsmWriter.cpp
@@ -658,6 +658,23 @@ static const char *getPredicateText(unsigned predicate) {
return pred;
}
+static void writeAtomicRMWOperation(raw_ostream &Out,
+ AtomicRMWInst::BinOp Op) {
+ switch (Op) {
+ default: Out << " <unknown operation " << Op << ">"; break;
+ case AtomicRMWInst::Xchg: Out << " xchg"; break;
+ case AtomicRMWInst::Add: Out << " add"; break;
+ case AtomicRMWInst::Sub: Out << " sub"; break;
+ case AtomicRMWInst::And: Out << " and"; break;
+ case AtomicRMWInst::Nand: Out << " nand"; break;
+ case AtomicRMWInst::Or: Out << " or"; break;
+ case AtomicRMWInst::Xor: Out << " xor"; break;
+ case AtomicRMWInst::Max: Out << " max"; break;
+ case AtomicRMWInst::Min: Out << " min"; break;
+ case AtomicRMWInst::UMax: Out << " umax"; break;
+ case AtomicRMWInst::UMin: Out << " umin"; break;
+ }
+}
static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
if (const OverflowingBinaryOperator *OBO =
@@ -1670,6 +1687,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
if (const CmpInst *CI = dyn_cast<CmpInst>(&I))
Out << ' ' << getPredicateText(CI->getPredicate());
+ // Print out the atomicrmw operation
+ if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I))
+ writeAtomicRMWOperation(Out, RMWI->getOperation());
+
// Print out the type of the operands...
const Value *Operand = I.getNumOperands() ? I.getOperand(0) : 0;
@@ -1936,6 +1957,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << ", align " << cast<LoadInst>(I).getAlignment();
} else if (isa<StoreInst>(I) && cast<StoreInst>(I).getAlignment()) {
Out << ", align " << cast<StoreInst>(I).getAlignment();
+ } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
+ writeAtomic(CXI->getOrdering(), CXI->getSynchScope());
+ } else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
+ writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope());
} else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
writeAtomic(FI->getOrdering(), FI->getSynchScope());
}
diff --git a/lib/VMCore/Instruction.cpp b/lib/VMCore/Instruction.cpp
index 09d16e7d44..ad433ef22a 100644
--- a/lib/VMCore/Instruction.cpp
+++ b/lib/VMCore/Instruction.cpp
@@ -128,6 +128,8 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
case Alloca: return "alloca";
case Load: return "load";
case Store: return "store";
+ case AtomicCmpXchg: return "cmpxchg";
+ case AtomicRMW: return "atomicrmw";
case Fence: return "fence";
case GetElementPtr: return "getelementptr";
diff --git a/lib/VMCore/Instructions.cpp b/lib/VMCore/Instructions.cpp
index 9fdff0773a..abee7b741a 100644
--- a/lib/VMCore/Instructions.cpp
+++ b/lib/VMCore/Instructions.cpp
@@ -1106,6 +1106,101 @@ void StoreInst::setAlignment(unsigned Align) {
}
//===----------------------------------------------------------------------===//
+// AtomicCmpXchgInst Implementation
+//===----------------------------------------------------------------------===//
+
+void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope) {
+ Op<0>() = Ptr;
+ Op<1>() = Cmp;
+ Op<2>() = NewVal;
+ setOrdering(Ordering);
+ setSynchScope(SynchScope);
+
+ assert(getOperand(0) && getOperand(1) && getOperand(2) &&
+ "All operands must be non-null!");
+ assert(getOperand(0)->getType()->isPointerTy() &&
+ "Ptr must have pointer type!");
+ assert(getOperand(1)->getType() ==
+ cast<PointerType>(getOperand(0)->getType())->getElementType()
+ && "Ptr must be a pointer to Cmp type!");
+ assert(getOperand(2)->getType() ==
+ cast<PointerType>(getOperand(0)->getType())->getElementType()
+ && "Ptr must be a pointer to NewVal type!");
+ assert(Ordering != NotAtomic &&
+ "AtomicCmpXchg instructions must be atomic!");
+}
+
+AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBefore)
+ : Instruction(Cmp->getType(), AtomicCmpXchg,
+ OperandTraits<AtomicCmpXchgInst>::op_begin(this),
+ OperandTraits<AtomicCmpXchgInst>::operands(this),
+ InsertBefore) {
+ Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
+}
+
+AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Cmp->getType(), AtomicCmpXchg,
+ OperandTraits<AtomicCmpXchgInst>::op_begin(this),
+ OperandTraits<AtomicCmpXchgInst>::operands(this),
+ InsertAtEnd) {
+ Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
+}
+
+//===----------------------------------------------------------------------===//
+// AtomicRMWInst Implementation
+//===----------------------------------------------------------------------===//
+
+void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope) {
+ Op<0>() = Ptr;
+ Op<1>() = Val;
+ setOperation(Operation);
+ setOrdering(Ordering);
+ setSynchScope(SynchScope);
+
+ assert(getOperand(0) && getOperand(1) &&
+ "All operands must be non-null!");
+ assert(getOperand(0)->getType()->isPointerTy() &&
+ "Ptr must have pointer type!");
+ assert(getOperand(1)->getType() ==
+ cast<PointerType>(getOperand(0)->getType())->getElementType()
+ && "Ptr must be a pointer to Val type!");
+ assert(Ordering != NotAtomic &&
+ "AtomicRMW instructions must be atomic!");
+}
+
+AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBefore)
+ : Instruction(Val->getType(), AtomicRMW,
+ OperandTraits<AtomicRMWInst>::op_begin(this),
+ OperandTraits<AtomicRMWInst>::operands(this),
+ InsertBefore) {
+ Init(Operation, Ptr, Val, Ordering, SynchScope);
+}
+
+AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Val->getType(), AtomicRMW,
+ OperandTraits<AtomicRMWInst>::op_begin(this),
+ OperandTraits<AtomicRMWInst>::operands(this),
+ InsertAtEnd) {
+ Init(Operation, Ptr, Val, Ordering, SynchScope);
+}
+
+//===----------------------------------------------------------------------===//
// FenceInst Implementation
//===----------------------------------------------------------------------===//
@@ -3148,6 +3243,22 @@ StoreInst *StoreInst::clone_impl() const {
isVolatile(), getAlignment());
}
+AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const {
+ AtomicCmpXchgInst *Result =
+ new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
+ getOrdering(), getSynchScope());
+ Result->setVolatile(isVolatile());
+ return Result;
+}
+
+AtomicRMWInst *AtomicRMWInst::clone_impl() const {
+ AtomicRMWInst *Result =
+ new AtomicRMWInst(getOperation(),getOperand(0), getOperand(1),
+ getOrdering(), getSynchScope());
+ Result->setVolatile(isVolatile());
+ return Result;
+}
+
FenceInst *FenceInst::clone_impl() const {
return new FenceInst(getContext(), getOrdering(), getSynchScope());
}
diff --git a/lib/VMCore/Verifier.cpp b/lib/VMCore/Verifier.cpp
index 9ec2edf3fc..905e9a2623 100644
--- a/lib/VMCore/Verifier.cpp
+++ b/lib/VMCore/Verifier.cpp
@@ -288,6 +288,8 @@ namespace {
void visitUserOp1(Instruction &I);
void visitUserOp2(Instruction &I) { visitUserOp1(I); }
void visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI);
+ void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
+ void visitAtomicRMWInst(AtomicRMWInst &RMWI);
void visitFenceInst(FenceInst &FI);
void visitAllocaInst(AllocaInst &AI);
void visitExtractValueInst(ExtractValueInst &EVI);
@@ -1327,6 +1329,40 @@ void Verifier::visitAllocaInst(AllocaInst &AI) {
visitInstruction(AI);
}
+void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
+ Assert1(CXI.getOrdering() != NotAtomic,
+ "cmpxchg instructions must be atomic.", &CXI);
+ Assert1(CXI.getOrdering() != Unordered,
+ "cmpxchg instructions cannot be unordered.", &CXI);
+ PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
+ Assert1(PTy, "First cmpxchg operand must be a pointer.", &CXI);
+ Type *ElTy = PTy->getElementType();
+ Assert2(ElTy == CXI.getOperand(1)->getType(),
+ "Expected value type does not match pointer operand type!",
+ &CXI, ElTy);
+ Assert2(ElTy == CXI.getOperand(2)->getType(),
+ "Stored value type does not match pointer operand type!",
+ &CXI, ElTy);
+ visitInstruction(CXI);
+}
+
+void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
+ Assert1(RMWI.getOrdering() != NotAtomic,
+ "atomicrmw instructions must be atomic.", &RMWI);
+ Assert1(RMWI.getOrdering() != Unordered,
+ "atomicrmw instructions cannot be unordered.", &RMWI);
+ PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
+ Assert1(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
+ Type *ElTy = PTy->getElementType();
+ Assert2(ElTy == RMWI.getOperand(1)->getType(),
+ "Argument value type does not match pointer operand type!",
+ &RMWI, ElTy);
+ Assert1(AtomicRMWInst::FIRST_BINOP <= RMWI.getOperation() &&
+ RMWI.getOperation() <= AtomicRMWInst::LAST_BINOP,
+ "Invalid binary operation!", &RMWI);
+ visitInstruction(RMWI);
+}
+
void Verifier::visitFenceInst(FenceInst &FI) {
const AtomicOrdering Ordering = FI.getOrdering();
Assert1(Ordering == Acquire || Ordering == Release ||