diff options
author | Sylvestre Ledru <sylvestre@debian.org> | 2012-09-27 10:14:43 +0000 |
---|---|---|
committer | Sylvestre Ledru <sylvestre@debian.org> | 2012-09-27 10:14:43 +0000 |
commit | 94c22716d60ff5edf6a98a3c67e0faa001be1142 (patch) | |
tree | 2442b2b9658c29e72b1dd7abae926751854c68dc /lib | |
parent | 7e2c793a2b5c746344652b6579e958ee42fafdcc (diff) |
Revert 'Fix a typo 'iff' => 'if''. iff is an abreviation of if and only if. See: http://en.wikipedia.org/wiki/If_and_only_if Commit 164767
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164768 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
42 files changed, 128 insertions, 128 deletions
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp index 07b685082f..379a35ad37 100644 --- a/lib/Analysis/InstructionSimplify.cpp +++ b/lib/Analysis/InstructionSimplify.cpp @@ -2004,7 +2004,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, // LHS >u RHS. case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_UGE: - // Comparison is true if the LHS <s 0. + // Comparison is true iff the LHS <s 0. if (MaxRecurse) if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp, Constant::getNullValue(SrcTy), @@ -2013,7 +2013,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, break; case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: - // Comparison is true if the LHS >=s 0. + // Comparison is true iff the LHS >=s 0. if (MaxRecurse) if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp, Constant::getNullValue(SrcTy), @@ -2171,31 +2171,31 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, // Simplify comparisons involving max/min. Value *A, *B; CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE; - CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" if "A EqP B". + CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B". // Signed variants on "max(a,b)>=a -> true". if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { if (A != RHS) std::swap(A, B); // smax(A, B) pred A. - EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" if "A sge B". + EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". // We analyze this as smax(A, B) pred A. P = Pred; } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) && (A == LHS || B == LHS)) { if (A != LHS) std::swap(A, B); // A pred smax(A, B). - EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" if "A sge B". + EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". // We analyze this as smax(A, B) swapped-pred A. P = CmpInst::getSwappedPredicate(Pred); } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { if (A != RHS) std::swap(A, B); // smin(A, B) pred A. - EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" if "A sle B". + EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". // We analyze this as smax(-A, -B) swapped-pred -A. // Note that we do not need to actually form -A or -B thanks to EqP. P = CmpInst::getSwappedPredicate(Pred); } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) && (A == LHS || B == LHS)) { if (A != LHS) std::swap(A, B); // A pred smin(A, B). - EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" if "A sle B". + EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". // We analyze this as smax(-A, -B) pred -A. // Note that we do not need to actually form -A or -B thanks to EqP. P = Pred; @@ -2246,26 +2246,26 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, P = CmpInst::BAD_ICMP_PREDICATE; if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { if (A != RHS) std::swap(A, B); // umax(A, B) pred A. - EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" if "A uge B". + EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". // We analyze this as umax(A, B) pred A. P = Pred; } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) && (A == LHS || B == LHS)) { if (A != LHS) std::swap(A, B); // A pred umax(A, B). - EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" if "A uge B". + EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". // We analyze this as umax(A, B) swapped-pred A. P = CmpInst::getSwappedPredicate(Pred); } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { if (A != RHS) std::swap(A, B); // umin(A, B) pred A. - EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" if "A ule B". + EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". // We analyze this as umax(-A, -B) swapped-pred -A. // Note that we do not need to actually form -A or -B thanks to EqP. P = CmpInst::getSwappedPredicate(Pred); } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) && (A == LHS || B == LHS)) { if (A != LHS) std::swap(A, B); // A pred umin(A, B). - EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" if "A ule B". + EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". // We analyze this as umax(-A, -B) pred -A. // Note that we do not need to actually form -A or -B thanks to EqP. P = Pred; diff --git a/lib/Analysis/Interval.cpp b/lib/Analysis/Interval.cpp index 125d42e2dc..ca9cdcaf24 100644 --- a/lib/Analysis/Interval.cpp +++ b/lib/Analysis/Interval.cpp @@ -27,7 +27,7 @@ using namespace llvm; // isLoop - Find out if there is a back edge in this interval... // bool Interval::isLoop() const { - // There is a loop in this interval if one of the predecessors of the header + // There is a loop in this interval iff one of the predecessors of the header // node lives in the interval. for (::pred_iterator I = ::pred_begin(HeaderNode), E = ::pred_end(HeaderNode); I != E; ++I) diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp index d6dd627ba3..ec618fad22 100644 --- a/lib/Analysis/LazyValueInfo.cpp +++ b/lib/Analysis/LazyValueInfo.cpp @@ -1109,14 +1109,14 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C, // If this is an equality comparison, we can try to fold it knowing that // "V != C1". if (Pred == ICmpInst::ICMP_EQ) { - // !C1 == C -> false if C1 == C. + // !C1 == C -> false iff C1 == C. Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE, Result.getNotConstant(), C, TD, TLI); if (Res->isNullValue()) return False; } else if (Pred == ICmpInst::ICMP_NE) { - // !C1 != C -> true if C1 == C. + // !C1 != C -> true iff C1 == C. Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE, Result.getNotConstant(), C, TD, TLI); diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index fe5860e6c3..9b9c889496 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -1367,7 +1367,7 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, /// This form often exposes folding opportunities that are hidden in /// the original operand list. /// -/// Return true if it appears that any interesting folding opportunities +/// Return true iff it appears that any interesting folding opportunities /// may be exposed. This helps getAddRecExpr short-circuit extra work in /// the common case where no interesting opportunities are present, and /// is also used as a check to avoid infinite recursion. @@ -5598,7 +5598,7 @@ static bool HasSameValue(const SCEV *A, const SCEV *B) { } /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with -/// predicate Pred. Return true if any changes were made. +/// predicate Pred. Return true iff any changes were made. /// bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, const SCEV *&LHS, const SCEV *&RHS, diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 19fde0e419..491224a4b6 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -470,7 +470,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, return; } case Instruction::Shl: - // (shl X, C1) & C2 == 0 if (X & C2 >>u C1) == 0 + // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); @@ -482,7 +482,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, } break; case Instruction::LShr: - // (ushr X, C1) & C2 == 0 if (-1 >> C1) & C2 == 0 + // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { // Compute the new bits that are at the top now. uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); @@ -498,7 +498,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, } break; case Instruction::AShr: - // (ashr X, C1) & C2 == 0 if (-1 >> C1) & C2 == 0 + // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { // Compute the new bits that are at the top now. uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 2598f4b81e..2931d2de97 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -578,7 +578,7 @@ SDValue DAGCombiner::ReassociateOps(unsigned Opc, DebugLoc DL, return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode); } if (N0.hasOneUse()) { - // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) if x+c1 has one use + // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one use SDValue OpNode = DAG.getNode(Opc, N0.getDebugLoc(), VT, N0.getOperand(0), N1); AddToWorkList(OpNode.getNode()); @@ -596,7 +596,7 @@ SDValue DAGCombiner::ReassociateOps(unsigned Opc, DebugLoc DL, return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode); } if (N1.hasOneUse()) { - // reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) if x+c1 has one use + // reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one use SDValue OpNode = DAG.getNode(Opc, N0.getDebugLoc(), VT, N1.getOperand(0), N0); AddToWorkList(OpNode.getNode()); @@ -1455,7 +1455,7 @@ SDValue DAGCombiner::visitADD(SDNode *N) { if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0))) return SDValue(N, 0); - // fold (a+b) -> (a|b) if a and b share no bits. + // fold (a+b) -> (a|b) iff a and b share no bits. if (VT.isInteger() && !VT.isVector()) { APInt LHSZero, LHSOne; APInt RHSZero, RHSOne; @@ -1549,7 +1549,7 @@ SDValue DAGCombiner::visitADDC(SDNode *N) { return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(), MVT::Glue)); - // fold (addc a, b) -> (or a, b), CARRY_FALSE if a and b share no bits. + // fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits. APInt LHSZero, LHSOne; APInt RHSZero, RHSOne; DAG.ComputeMaskedBits(N0, LHSZero, LHSOne); @@ -1937,7 +1937,7 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) { return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, DAG.getConstant(N1C->getAPIntValue().logBase2(), getShiftAmountTy(N0.getValueType()))); - // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) if c is power of 2 + // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 if (N1.getOpcode() == ISD::SHL) { if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) { if (SHC->getAPIntValue().isPowerOf2()) { @@ -2642,7 +2642,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) { return SDValue(N, 0); // Return N so it doesn't get rechecked! } } - // fold (zext_inreg (sextload x)) -> (zextload x) if load has one use + // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); @@ -3038,7 +3038,7 @@ SDValue DAGCombiner::visitOR(SDNode *N) { // fold (or x, -1) -> -1 if (N1C && N1C->isAllOnesValue()) return N1; - // fold (or x, c) -> c if (x & ~c) == 0 + // fold (or x, c) -> c iff (x & ~c) == 0 if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue())) return N1; @@ -3055,7 +3055,7 @@ SDValue DAGCombiner::visitOR(SDNode *N) { if (ROR.getNode() != 0) return ROR; // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2) - // if (c1 & c2) == 0. + // iff (c1 & c2) == 0. if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && isa<ConstantSDNode>(N0.getOperand(1))) { ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1)); @@ -3392,7 +3392,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) { return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, V); } - // fold (not (or x, y)) -> (and (not x), (not y)) if x or y are setcc + // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc if (N1C && N1C->getAPIntValue() == 1 && VT == MVT::i1 && (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); @@ -3404,7 +3404,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) { return DAG.getNode(NewOpcode, N->getDebugLoc(), VT, LHS, RHS); } } - // fold (not (or x, y)) -> (and (not x), (not y)) if x or y are constants + // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants if (N1C && N1C->isAllOnesValue() && (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); @@ -3882,7 +3882,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) { return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0), N1); } - // fold (srl (ctlz x), "5") -> x if x has one bit set (the low bit). + // fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit). if (N1C && N0.getOpcode() == ISD::CTLZ && N1C->getAPIntValue() == Log2_32(VT.getSizeInBits())) { APInt KnownZero, KnownOne; @@ -4816,7 +4816,7 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) { if (N0.getOpcode() == ISD::TRUNCATE) { SDValue TruncOp = N0.getOperand(0); if (TruncOp.getValueType() == VT) - return TruncOp; // x if x size == zext size. + return TruncOp; // x iff x size == zext size. if (TruncOp.getValueType().bitsGT(VT)) return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, TruncOp); return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, TruncOp); @@ -5168,12 +5168,12 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { return NarrowLoad; // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24) - // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) if possible. + // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible. // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above. if (N0.getOpcode() == ISD::SRL) { if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1))) if (ShAmt->getZExtValue()+EVTBits <= VTBits) { - // We can turn this into an SRA if the input to the SRL is already sign + // We can turn this into an SRA iff the input to the SRL is already sign // extended enough. unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0)); if (VTBits-(ShAmt->getZExtValue()+EVTBits) < InSignBits) @@ -5199,7 +5199,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); return SDValue(N, 0); // Return N so it doesn't get rechecked! } - // fold (sext_inreg (zextload x)) -> (sextload x) if load has one use + // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse() && EVT == cast<LoadSDNode>(N0)->getMemoryVT() && @@ -5506,7 +5506,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { } } - // bitconvert(build_pair(ld, ld)) -> ld if load locations are consecutive. + // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive. if (N0.getOpcode() == ISD::BUILD_PAIR) { SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT); if (CombineLD.getNode()) @@ -6151,8 +6151,8 @@ SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) { if (N1CFP) { const APFloat& V = N1CFP->getValueAPF(); - // copysign(x, c1) -> fabs(x) if ispos(c1) - // copysign(x, c1) -> fneg(fabs(x)) if isneg(c1) + // copysign(x, c1) -> fabs(x) iff ispos(c1) + // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1) if (!V.isNegative()) { if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT)) return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0); @@ -8764,7 +8764,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1, EVT XType = N0.getValueType(); EVT AType = N2.getValueType(); if (XType.bitsGE(AType)) { - // and (sra X, size(X)-1, A) -> "and (srl X, C2), A" if A is a + // and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a // single-bit constant. if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue()-1)) == 0)) { unsigned ShCtV = N2C->getAPIntValue().logBase2(); diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index d93b35dfb8..1cccf1a057 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -447,7 +447,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SADDSUBO(SDNode *N, unsigned ResNo) { if (ResNo == 1) return PromoteIntRes_Overflow(N); - // The operation overflowed if the result in the larger type is not the + // The operation overflowed iff the result in the larger type is not the // sign extension of its truncation to the original type. SDValue LHS = SExtPromotedInteger(N->getOperand(0)); SDValue RHS = SExtPromotedInteger(N->getOperand(1)); @@ -610,7 +610,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_UADDSUBO(SDNode *N, unsigned ResNo) { if (ResNo == 1) return PromoteIntRes_Overflow(N); - // The operation overflowed if the result in the larger type is not the + // The operation overflowed iff the result in the larger type is not the // zero extension of its truncation to the original type. SDValue LHS = ZExtPromotedInteger(N->getOperand(0)); SDValue RHS = ZExtPromotedInteger(N->getOperand(1)); @@ -655,17 +655,17 @@ SDValue DAGTypeLegalizer::PromoteIntRes_XMULO(SDNode *N, unsigned ResNo) { } SDValue Mul = DAG.getNode(ISD::MUL, DL, LHS.getValueType(), LHS, RHS); - // Overflow occurred if the high part of the result does not + // Overflow occurred iff the high part of the result does not // zero/sign-extend the low part. SDValue Overflow; if (N->getOpcode() == ISD::UMULO) { - // Unsigned overflow occurred if the high part is non-zero. + // Unsigned overflow occurred iff the high part is non-zero. SDValue Hi = DAG.getNode(ISD::SRL, DL, Mul.getValueType(), Mul, DAG.getIntPtrConstant(SmallVT.getSizeInBits())); Overflow = DAG.getSetCC(DL, N->getValueType(1), Hi, DAG.getConstant(0, Hi.getValueType()), ISD::SETNE); } else { - // Signed overflow occurred if the high part does not sign extend the low. + // Signed overflow occurred iff the high part does not sign extend the low. SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Mul.getValueType(), Mul, DAG.getValueType(SmallVT)); Overflow = DAG.getSetCC(DL, N->getValueType(1), SExt, Mul, ISD::SETNE); @@ -2240,8 +2240,8 @@ void DAGTypeLegalizer::ExpandIntRes_UADDSUBO(SDNode *N, LHS, RHS); SplitInteger(Sum, Lo, Hi); - // Calculate the overflow: addition overflows if a + b < a, and subtraction - // overflows if a - b > a. + // Calculate the overflow: addition overflows iff a + b < a, and subtraction + // overflows iff a - b > a. SDValue Ofl = DAG.getSetCC(dl, N->getValueType(1), Sum, LHS, N->getOpcode () == ISD::UADDO ? ISD::SETULT : ISD::SETUGT); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 99d2aada41..a48a6256e5 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1825,7 +1825,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero, KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); return; case ISD::SHL: - // (shl X, C1) & C2 == 0 if (X & C2 >>u C1) == 0 + // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { unsigned ShAmt = SA->getZExtValue(); @@ -1842,7 +1842,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero, } return; case ISD::SRL: - // (ushr X, C1) & C2 == 0 if (-1 >> C1) & C2 == 0 + // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { unsigned ShAmt = SA->getZExtValue(); @@ -2356,7 +2356,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{ /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an /// ISD::OR with a ConstantSDNode that is guaranteed to have the same /// semantics as an ADD. This handles the equivalence: -/// X|Cst == X+Cst if X&Cst = 0. +/// X|Cst == X+Cst iff X&Cst = 0. bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || !isa<ConstantSDNode>(Op.getOperand(1))) @@ -2882,7 +2882,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && "Cannot FP_ROUND_INREG integer types"); assert(EVT.isVector() == VT.isVector() && - "FP_ROUND_INREG type should be vector if the operand " + "FP_ROUND_INREG type should be vector iff the operand " "type is vector!"); assert((!EVT.isVector() || EVT.getVectorNumElements() == VT.getVectorNumElements()) && @@ -2918,7 +2918,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, assert(VT.isInteger() && EVT.isInteger() && "Cannot *_EXTEND_INREG FP types"); assert(EVT.isVector() == VT.isVector() && - "SIGN_EXTEND_INREG type should be vector if the operand " + "SIGN_EXTEND_INREG type should be vector iff the operand " "type is vector!"); assert((!EVT.isVector() || EVT.getVectorNumElements() == VT.getVectorNumElements()) && diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 5ed1c03eb1..be3ecf34f7 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1330,7 +1330,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, // If all of the unknown bits are known to be zero on one side or the other // (but not both) turn this into an *inclusive* or. - // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) if C1&C2 == 0 + // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 if ((NewMask & ~KnownZero & ~KnownZero2) == 0) return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(), Op.getOperand(0), @@ -1344,7 +1344,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, // If all of the demanded bits on one side are known, and all of the set // bits on that side are also known to be set on the other side, turn this // into an AND, as we know the bits will be cleared. - // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 if (C1&C2) == C2 + // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 // NB: it is okay if more bits are known than are requested if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known on one side if (KnownOne == KnownOne2) { // set bits are the same on both sides @@ -1970,7 +1970,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, CTVT), CC); } - // TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 if ctpop is illegal. + // TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 iff ctpop is illegal. } // (zext x) == C --> x == (trunc C) @@ -2503,7 +2503,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, N0.getValueType()), Cond); } - // Turn (X^C1) == C2 into X == C1^C2 if X&~C1 = 0. + // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. if (N0.getOpcode() == ISD::XOR) // If we know that all of the inverted bits are zero, don't bother // performing the inversion. diff --git a/lib/MC/WinCOFFObjectWriter.cpp b/lib/MC/WinCOFFObjectWriter.cpp index a2a98251c4..f706cac8d3 100644 --- a/lib/MC/WinCOFFObjectWriter.cpp +++ b/lib/MC/WinCOFFObjectWriter.cpp @@ -289,7 +289,7 @@ size_t StringTable::size() const { return Data.size(); } -/// Add String to the table if it is not already there. +/// Add String to the table iff it is not already there. /// @returns the index into the string table where the string is now located. size_t StringTable::insert(llvm::StringRef String) { map::iterator i = Map.find(String); diff --git a/lib/Support/APInt.cpp b/lib/Support/APInt.cpp index 1703eeae27..38cfaed9d2 100644 --- a/lib/Support/APInt.cpp +++ b/lib/Support/APInt.cpp @@ -1860,7 +1860,7 @@ APInt APInt::udiv(const APInt& RHS) const { // 0 / X ===> 0 return APInt(BitWidth, 0); else if (lhsWords < rhsWords || this->ult(RHS)) { - // X / Y ===> 0, if X < Y + // X / Y ===> 0, iff X < Y return APInt(BitWidth, 0); } else if (*this == RHS) { // X / X ===> 1 @@ -1897,7 +1897,7 @@ APInt APInt::urem(const APInt& RHS) const { // 0 % Y ===> 0 return APInt(BitWidth, 0); } else if (lhsWords < rhsWords || this->ult(RHS)) { - // X % Y ===> X, if X < Y + // X % Y ===> X, iff X < Y return *this; } else if (*this == RHS) { // X % X == 0; @@ -1929,8 +1929,8 @@ void APInt::udivrem(const APInt &LHS, const APInt &RHS, } if (lhsWords < rhsWords || LHS.ult(RHS)) { - Remainder = LHS; // X % Y ===> X, if X < Y - Quotient = 0; // X / Y ===> 0, if X < Y + Remainder = LHS; // X % Y ===> X, iff X < Y + Quotient = 0; // X / Y ===> 0, iff X < Y return; } diff --git a/lib/Support/Statistic.cpp b/lib/Support/Statistic.cpp index 3e2a183fe1..d8a6ad35ba 100644 --- a/lib/Support/Statistic.cpp +++ b/lib/Support/Statistic.cpp @@ -96,7 +96,7 @@ struct NameCompare { } -// Print information when destroyed, if command line option is specified. +// Print information when destroyed, iff command line option is specified. StatisticInfo::~StatisticInfo() { llvm::PrintStatistics(); } diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 1458e33c62..8933a02701 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -276,7 +276,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { if (TII.isPredicable(MI) || isARMNEONPred(MI)) AddDefaultPred(MIB); - // Do we optionally set a predicate? Preds is size > 0 if the predicate + // Do we optionally set a predicate? Preds is size > 0 iff the predicate // defines CPSR. All other OptionalDefines in ARM are the CCR register. bool CPSR = false; if (DefinesOptionalPredicate(MI, &CPSR)) { diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index e9f0c8ad5a..1eea0cc61d 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -2117,7 +2117,7 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, if (N->getOpcode() == ISD::AND) { if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) { - // The immediate is a mask of the low bits if imm & (imm+1) == 0 + // The immediate is a mask of the low bits iff imm & (imm+1) == 0 if (And_imm & (And_imm + 1)) return NULL; diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 33c48350a7..f8455a4b0e 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -727,7 +727,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR - // if target supports vfp2. + // iff target supports vfp2. setOperationAction(ISD::BITCAST, MVT::i64, Custom); setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); } @@ -7692,12 +7692,12 @@ static SDValue PerformORCombine(SDNode *N, DebugLoc DL = N->getDebugLoc(); // 1) or (and A, mask), val => ARMbfi A, val, mask - // if (val & mask) == val + // iff (val & mask) == val // // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask - // 2a) if isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) + // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) // && mask == ~mask2 - // 2b) if isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) + // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) // && ~mask == mask2 // (i.e., copy a bitfield value into another bitfield of the same width) diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td index ca3d6d7dac..e171f8b092 100644 --- a/lib/Target/ARM/ARMInstrThumb.td +++ b/lib/Target/ARM/ARMInstrThumb.td @@ -698,7 +698,7 @@ def tLDMIA : T1I<(outs), (ins tGPR:$Rn, pred:$p, reglist:$regs, variable_ops), } // Writeback version is just a pseudo, as there's no encoding difference. -// Writeback happens if the base register is not in the destination register +// Writeback happens iff the base register is not in the destination register // list. def tLDMIA_UPD : InstTemplate<AddrModeNone, 0, IndexModeNone, Pseudo, GenericDomain, diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index a16931ebd8..aa5ba46ab2 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -5316,7 +5316,7 @@ validateInstruction(MCInst &Inst, // instruction. We'll make the transformation in processInstruction() // if necessary. // - // Thumb LDM instructions are writeback if the base register is not + // Thumb LDM instructions are writeback iff the base register is not // in the register list. unsigned Rn = Inst.getOperand(0).getReg(); bool hasWritebackToken = @@ -7023,7 +7023,7 @@ processInstruction(MCInst &Inst, Inst.addOperand(MCOperand::CreateReg(0)); // cc_out break; case ARM::tADDi8: - // If the immediate is in the range 0-7, we want tADDi3 if Rd was + // If the immediate is in the range 0-7, we want tADDi3 iff Rd was // explicitly specified. From the ARM ARM: "Encoding T1 is preferred // to encoding T2 if <Rd> is specified and encoding T2 is preferred // to encoding T1 if <Rd> is omitted." @@ -7033,7 +7033,7 @@ processInstruction(MCInst &Inst, } break; case ARM::tSUBi8: - // If the immediate is in the range 0-7, we want tADDi3 if Rd was + // If the immediate is in the range 0-7, we want tADDi3 iff Rd was // explicitly specified. From the ARM ARM: "Encoding T1 is preferred // to encoding T2 if <Rd> is specified and encoding T2 is preferred // to encoding T1 if <Rd> is omitted." diff --git a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp index e704548d39..5efc6a36b8 100644 --- a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp +++ b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp @@ -221,7 +221,7 @@ bool MSP430DAGToDAGISel::MatchAddress(SDValue N, MSP430ISelAddressMode &AM) { } case ISD::OR: - // Handle "X | C" as "X + C" if X is known to have C bits clear. + // Handle "X | C" as "X + C" iff X is known to have C bits clear. if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) { MSP430ISelAddressMode Backup = AM; uint64_t Offset = CN->getSExtValue(); diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 4f8aa4ccf1..e8f4d16997 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -74,7 +74,7 @@ namespace { return CurDAG->getTargetConstant(Imm, PPCLowering.getPointerTy()); } - /// isRunOfOnes - Returns true if Val consists of one contiguous run of 1s + /// isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s /// with any number of 0s on either side. The 1s are allowed to wrap from /// LSB to MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. /// 0x0F0F0000 is not, since all 1s are not contiguous. diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index 2d027ddfc6..791f5982af 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -1280,7 +1280,7 @@ void FPS::handleCompareFP(MachineBasicBlock::iterator &I) { } /// handleCondMovFP - Handle two address conditional move instructions. These -/// instructions move a st(i) register to st(0) if a condition is true. These +/// instructions move a st(i) register to st(0) iff a condition is true. These /// instructions require that the first operand is at the top of the stack, but /// otherwise don't modify the stack at all. void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) { diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 81be243a0e..40605afb69 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -1202,7 +1202,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, } case ISD::OR: - // Handle "X | C" as "X + C" if X is known to have C bits clear. + // Handle "X | C" as "X + C" iff X is known to have C bits clear. if (CurDAG->isBaseWithConstantOffset(N)) { X86ISelAddressMode Backup = AM; ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1)); diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 69c7c9a03c..f46989fb02 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -1652,7 +1652,7 @@ static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB, Iter = I; for (unsigned i = 0; i < 4; ++i) { // If we make it to the beginning of the block, it's safe to clobber - // EFLAGS if EFLAGS is not live-in. + // EFLAGS iff EFLAGS is not live-in. if (Iter == B) return !MBB.isLiveIn(X86::EFLAGS); diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 8231d1baef..3695ce2324 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -1361,7 +1361,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, return DAG.getMergeValues(Ops, 2, dl); } - // fold (ladd x, 0, y) -> 0, add x, y if carry is unused and y has only the + // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the // low bit set if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) { APInt KnownZero, KnownOne; @@ -1385,7 +1385,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); EVT VT = N0.getValueType(); - // fold (lsub 0, 0, x) -> x, -x if x has only the low bit set + // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { APInt KnownZero, KnownOne; APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), @@ -1400,7 +1400,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, } } - // fold (lsub x, 0, y) -> 0, sub x, y if borrow is unused and y has only the + // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the // low bit set if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) { APInt KnownZero, KnownOne; diff --git a/lib/Transforms/IPO/ConstantMerge.cpp b/lib/Transforms/IPO/ConstantMerge.cpp index 988c9e5359..d8fae8a4b2 100644 --- a/lib/Transforms/IPO/ConstantMerge.cpp +++ b/lib/Transforms/IPO/ConstantMerge.cpp @@ -43,7 +43,7 @@ namespace { // duplicate constants. bool runOnModule(Module &M); - // Return true if we can determine the alignment of this global variable. + // Return true iff we can determine the alignment of this global variable. bool hasKnownAlignment(GlobalVariable *GV) const; // Return the alignment of the global, including converting the default diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index bde6064838..b888e95982 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -172,7 +172,7 @@ static AtomicOrdering StrongerOrdering(AtomicOrdering X, AtomicOrdering Y) { return (AtomicOrdering)std::max(X, Y); } -/// SafeToDestroyConstant - It is safe to destroy a constant if it is only used +/// SafeToDestroyConstant - It is safe to destroy a constant iff it is only used /// by constants itself. Note that constants cannot be cyclic, so this test is /// pretty easy to implement recursively. /// diff --git a/lib/Transforms/IPO/PruneEH.cpp b/lib/Transforms/IPO/PruneEH.cpp index ae1e3c7b9e..c8cc8fd193 100644 --- a/lib/Transforms/IPO/PruneEH.cpp +++ b/lib/Transforms/IPO/PruneEH.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // // This file implements a simple interprocedural pass which walks the -// call-graph, turning invoke instructions into calls, if the callee cannot +// call-graph, turning invoke instructions into calls, iff the callee cannot // throw an exception, and marking functions 'nounwind' if they cannot throw. // It implements this as a bottom-up traversal of the call-graph. // diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp index 874bb8f292..99b62f8d05 100644 --- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -200,7 +200,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { if (dyn_castFoldableMul(RHS, C2) == LHS) return BinaryOperator::CreateMul(LHS, AddOne(C2)); - // A+B --> A|B if A and B have no bits set in common. + // A+B --> A|B iff A and B have no bits set in common. if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) { APInt LHSKnownOne(IT->getBitWidth(), 0); APInt LHSKnownZero(IT->getBitWidth(), 0); @@ -216,7 +216,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { } } - // W*X + Y*Z --> W * (X+Z) if W == Y + // W*X + Y*Z --> W * (X+Z) iff W == Y { Value *W, *X, *Y, *Z; if (match(LHS, m_Mul(m_Value(W), m_Value(X))) && diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index 85e18a3591..7d0af0d802 100644 --- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -315,7 +315,7 @@ Value *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi, return Builder->CreateICmpUGT(Add, LowerBound); } -// isRunOfOnes - Returns true if Val consists of one contiguous run of 1s with +// isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with // any number of 0s on either side. The 1s are allowed to wrap from LSB to // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is // not, since all 1s are not contiguous. @@ -335,9 +335,9 @@ static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) { /// where isSub determines whether the operator is a sub. If we can fold one of /// the following xforms: /// -/// ((A & N) +/- B) & Mask -> (A +/- B) & Mask if N&Mask == Mask -/// ((A | N) +/- B) & Mask -> (A +/- B) & Mask if N&Mask == 0 -/// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask if N&Mask == 0 +/// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask +/// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0 +/// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0 /// /// return (A +/- B). /// @@ -752,7 +752,7 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) { // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2 // where CMAX is the all ones value for the truncated type, - // if the lower bits of C2 and CA are zero. + // iff the lower bits of C2 and CA are zero. if (LHSCC == ICmpInst::ICMP_EQ && LHSCC == RHSCC && LHS->hasOneUse() && RHS->hasOneUse()) { Value *V; @@ -1062,9 +1062,9 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { break; } case Instruction::Add: - // ((A & N) + B) & AndRHS -> (A + B) & AndRHS if N&AndRHS == AndRHS. - // ((A | N) + B) & AndRHS -> (A + B) & AndRHS if N&AndRHS == 0 - // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS if N&AndRHS == 0 + // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS. + // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0 + // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I)) return BinaryOperator::CreateAnd(V, AndRHS); if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I)) @@ -1072,13 +1072,13 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { break; case Instruction::Sub: - // ((A & N) - B) & AndRHS -> (A - B) & AndRHS if N&AndRHS == AndRHS. - // ((A | N) - B) & AndRHS -> (A - B) & AndRHS if N&AndRHS == 0 - // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS if N&AndRHS == 0 + // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS. + // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0 + // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I)) return BinaryOperator::CreateAnd(V, AndRHS); - // (A - N) & AndRHS -> -N & AndRHS if A&AndRHS==0 and AndRHS + // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS // has 1's for all bits that the subtraction with A might affect. if (Op0I->hasOneUse() && !match(Op0LHS, m_Zero())) { uint32_t BitWidth = AndRHSMask.getBitWidth(); @@ -1472,7 +1472,7 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) { } // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1) - // if C2 + CA == C1. + // iff C2 + CA == C1. if (LHSCC == ICmpInst::ICMP_ULT && RHSCC == ICmpInst::ICMP_EQ) { ConstantInt *AddCst; if (match(Val, m_Add(m_Specific(Val2), m_ConstantInt(AddCst)))) @@ -1735,7 +1735,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) { ConstantInt *C1 = 0; Value *X = 0; // (X & C1) | C2 --> (X | C2) & (C1|C2) - // if (C1 & C2) == 0. + // iff (C1 & C2) == 0. if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) && (RHS->getValue() & C1->getValue()) != 0 && Op0->hasOneUse()) { @@ -1779,7 +1779,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { return BSwap; } - // (X^C)|Y -> (X|Y)^C if Y&C == 0 + // (X^C)|Y -> (X|Y)^C iff Y&C == 0 if (Op0->hasOneUse() && match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) && MaskedValueIsZero(Op1, C1->getValue())) { @@ -1788,7 +1788,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { return BinaryOperator::CreateXor(NOr, C1); } - // Y|(X^C) -> (X|Y)^C if Y&C == 0 + // Y|(X^C) -> (X|Y)^C iff Y&C == 0 if (Op1->hasOneUse() && match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) && MaskedValueIsZero(Op0, C1->getValue())) { @@ -1830,7 +1830,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { if ((C1->getValue() & C2->getValue()) == 0) { // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2) - // if (C1&C2) == 0 and (N&~C1) == 0 + // iff (C1&C2) == 0 and (N&~C1) == 0 if (match(A, m_Or(m_Value(V1), m_Value(V2))) && ((V1 == B && MaskedValueIsZero(V2, ~C1->getValue())) || // (V|N) (V2 == B && MaskedValueIsZero(V1, ~C1->getValue())))) // (N|V) @@ -1846,7 +1846,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { C1->getValue()|C2->getValue())); // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2) - // if (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0. + // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0. ConstantInt *C3 = 0, *C4 = 0; if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) && (C3->getValue() & ~C1->getValue()) == 0 && @@ -2146,7 +2146,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { } } else if (Op0I->getOpcode() == Instruction::Or) { - // (X|C1)^C2 -> X^(C1|C2) if X&~C1 == 0 + // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) { Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS); // Anything in both C1 and C2 is known to be zero, remove it from diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp index b8b61d721a..555b4428d2 100644 --- a/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -381,7 +381,7 @@ static bool CanEvaluateTruncated(Value *V, Type *Ty) { break; case Instruction::LShr: // If this is a truncate of a logical shr, we can truncate it to a smaller - // lshr if we know that the bits we would otherwise be shifting in are + // lshr iff we know that the bits we would otherwise be shifting in are // already zeros. if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); @@ -527,14 +527,14 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI, return ReplaceInstUsesWith(CI, In); } - // zext (X == 0) to i32 --> X^1 if X has only the low bit set. - // zext (X == 0) to i32 --> (X>>1)^1 if X has only the 2nd bit set. - // zext (X == 1) to i32 --> X if X has only the low bit set. - // zext (X == 2) to i32 --> X>>1 if X has only the 2nd bit set. - // zext (X != 0) to i32 --> X if X has only the low bit set. - // zext (X != 0) to i32 --> X>>1 if X has only the 2nd bit set. - // zext (X != 1) to i32 --> X^1 if X has only the low bit set. - // zext (X != 2) to i32 --> (X>>1)^1 if X has only the 2nd bit set. + // zext (X == 0) to i32 --> X^1 iff X has only the low bit set. + // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. + // zext (X == 1) to i32 --> X iff X has only the low bit set. + // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set. + // zext (X != 0) to i32 --> X iff X has only the low bit set. + // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set. + // zext (X != 1) to i32 --> X^1 iff X has only the low bit set. + // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. if ((Op1CV == 0 || Op1CV.isPowerOf2()) && // This only works for EQ and NE ICI->isEquality()) { diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index 89972f610c..a446e427e5 100644 --- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -368,7 +368,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { LI.setAlignment(EffectiveLoadAlign); } - // load (cast X) --> cast (load X) if safe. + // load (cast X) --> cast (load X) iff safe. if (isa<CastInst>(Op)) if (Instruction *Res = InstCombineLoadCast(*this, LI, TD)) return Res; diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp index 6d81d6dff8..3361a1e7fb 100644 --- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -550,7 +550,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) { APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits())); if (MaskedValueIsZero(Op0, Mask)) { if (MaskedValueIsZero(Op1, Mask)) { - // X sdiv Y -> X udiv Y, if X and Y don't have sign bit set + // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set return BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); } @@ -692,7 +692,7 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) { if (I.getType()->isIntegerTy()) { APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits())); if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) { - // X srem Y -> X urem Y, if X and Y don't have sign bit set + // X srem Y -> X urem Y, iff X and Y don't have sign bit set return BinaryOperator::CreateURem(Op0, Op1, I.getName()); } } diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp index 598b4d3bd8..4bb2403299 100644 --- a/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -37,7 +37,7 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) { if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I)) return Res; - // X shift (A srem B) -> X shift (A and B-1) if B is a power of 2. + // X shift (A srem B) -> X shift (A and B-1) iff B is a power of 2. // Because shifts by negative values (which could occur if A were negative) // are undefined. Value *A; const APInt *B; @@ -85,7 +85,7 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift, // TODO: Check that the input bits are already zero with MaskedValueIsZero #if 0 // If this is a truncate of a logical shr, we can truncate it to a smaller - // lshr if we know that the bits we would otherwise be shifting in are + // lshr iff we know that the bits we would otherwise be shifting in are // already zeros. uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); uint32_t BitWidth = Ty->getScalarSizeInBits(); diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp index 9857f6ab5a..54be8ed3fa 100644 --- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -304,7 +304,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If all of the demanded bits are known to be zero on one side or the // other, turn this into an *inclusive* or. - // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) if C1&C2 == 0 + // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 if ((DemandedMask & ~RHSKnownZero & ~LHSKnownZero) == 0) { Instruction *Or = BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1), @@ -315,7 +315,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If all of the demanded bits on one side are known, and all of the set // bits on that side are also known to be set on the other side, turn this // into an AND, as we know the bits will be cleared. - // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 if (C1&C2) == C2 + // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) { // all known if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) { diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp index faa51aa0ba..ff758c40af 100644 --- a/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1724,7 +1724,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) { continue; // At this point we know that LFilter has at least one element. if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros. - // Filter is a subset of LFilter if Filter contains only zeros (as we + // Filter is a subset of LFilter iff Filter contains only zeros (as we // already know that Filter is not longer than LFilter). if (isa<ConstantAggregateZero>(Filter)) { assert(FElts <= LElts && "Should have handled this case earlier!"); @@ -1738,7 +1738,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) { ConstantArray *LArray = cast<ConstantArray>(LFilter); if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros. // Since Filter is non-empty and contains only zeros, it is a subset of - // LFilter if LFilter contains a zero. + // LFilter iff LFilter contains a zero. assert(FElts > 0 && "Should have eliminated the empty filter earlier!"); for (unsigned l = 0; l != LElts; ++l) if (LArray->getOperand(l)->isNullValue()) { diff --git a/lib/Transforms/Instrumentation/PathProfiling.cpp b/lib/Transforms/Instrumentation/PathProfiling.cpp index 38f785be0b..cc27146ebc 100644 --- a/lib/Transforms/Instrumentation/PathProfiling.cpp +++ b/lib/Transforms/Instrumentation/PathProfiling.cpp @@ -469,7 +469,7 @@ void BLInstrumentationEdge::setIncrement(long increment) { _increment = increment; } -// True if the edge has already been instrumented. +// True iff the edge has already been instrumented. bool BLInstrumentationEdge::hasInstrumentation() { return(_hasInstrumentation); } diff --git a/lib/Transforms/Utils/AddrModeMatcher.cpp b/lib/Transforms/Utils/AddrModeMatcher.cpp index 40e151c76a..0f9dfddf57 100644 --- a/lib/Transforms/Utils/AddrModeMatcher.cpp +++ b/lib/Transforms/Utils/AddrModeMatcher.cpp @@ -201,7 +201,7 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode, break; } //case Instruction::Or: - // TODO: We can handle "Or Val, Imm" if this OR is equivalent to an ADD. + // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. //break; case Instruction::Mul: case Instruction::Shl: { diff --git a/lib/Transforms/Utils/BreakCriticalEdges.cpp b/lib/Transforms/Utils/BreakCriticalEdges.cpp index 2a604b642d..6b04e3d17b 100644 --- a/lib/Transforms/Utils/BreakCriticalEdges.cpp +++ b/lib/Transforms/Utils/BreakCriticalEdges.cpp @@ -104,7 +104,7 @@ bool llvm::isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum, return I != E; // If AllowIdenticalEdges is true, then we allow this edge to be considered - // non-critical if all preds come from TI's block. + // non-critical iff all preds come from TI's block. while (I != E) { const BasicBlock *P = *I; if (P != FirstPred) diff --git a/lib/Transforms/Vectorize/BBVectorize.cpp b/lib/Transforms/Vectorize/BBVectorize.cpp index 8c26dd5a51..c09dcd2ea9 100644 --- a/lib/Transforms/Vectorize/BBVectorize.cpp +++ b/lib/Transforms/Vectorize/BBVectorize.cpp @@ -557,7 +557,7 @@ namespace { // Now we have a map of all of the pairable instructions and we need to // select the best possible pairing. A good pairing is one such that the // users of the pair are also paired. This defines a (directed) forest - // over the pairs such that two pairs are connected if the second pair + // over the pairs such that two pairs are connected iff the second pair // uses the first. // Note that it only matters that both members of the second pair use some diff --git a/lib/VMCore/Dominators.cpp b/lib/VMCore/Dominators.cpp index f7582028fc..77b2403d87 100644 --- a/lib/VMCore/Dominators.cpp +++ b/lib/VMCore/Dominators.cpp @@ -193,7 +193,7 @@ bool DominatorTree::dominates(const BasicBlockEdge &BBE, // . NormalDest // . // - // Given the definition of dominance, NormalDest is dominated by X if X + // Given the definition of dominance, NormalDest is dominated by X iff X // dominates all of NormalDest's predecessors (X, B, C in the example). X // trivially dominates itself, so we only have to find if it dominates the // other predecessors. Since the only way out of X is via NormalDest, X can diff --git a/lib/VMCore/Pass.cpp b/lib/VMCore/Pass.cpp index 2d483a09fc..994a7ffcee 100644 --- a/lib/VMCore/Pass.cpp +++ b/lib/VMCore/Pass.cpp @@ -258,7 +258,7 @@ namespace { }; } -// setPreservesCFG - This function should be called to by the pass, if they do +// setPreservesCFG - This function should be called to by the pass, iff they do // not: // // 1. Add or remove basic blocks from the function diff --git a/lib/VMCore/PassManager.cpp b/lib/VMCore/PassManager.cpp index 476fdf0906..53f11499e4 100644 --- a/lib/VMCore/PassManager.cpp +++ b/lib/VMCore/PassManager.cpp @@ -1700,7 +1700,7 @@ EnableTiming("time-passes", cl::location(TimePassesIsEnabled), void TimingInfo::createTheTimeInfo() { if (!TimePassesIsEnabled || TheTimeInfo) return; - // Constructed the first time this is called, if -time-passes is enabled. + // Constructed the first time this is called, iff -time-passes is enabled. // This guarantees that the object will be constructed before static globals, // thus it will be destroyed before them. static ManagedStatic<TimingInfo> TTI; diff --git a/lib/VMCore/Type.cpp b/lib/VMCore/Type.cpp index 3ef47277a1..1a7a650989 100644 --- a/lib/VMCore/Type.cpp +++ b/lib/VMCore/Type.cpp @@ -170,7 +170,7 @@ int Type::getFPMantissaWidth() const { } /// isSizedDerivedType - Derived types like structures and arrays are sized -/// if all of the members of the type are sized as well. Since asking for +/// iff all of the members of the type are sized as well. Since asking for /// their size is relatively uncommon, move this operation out of line. bool Type::isSizedDerivedType() const { if (this->isIntegerTy()) |