aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorMon P Wang <wangmp@apple.com>2010-04-02 18:43:02 +0000
committerMon P Wang <wangmp@apple.com>2010-04-02 18:43:02 +0000
commite754d3fb852abdeaf910c7331eed60f6303597c1 (patch)
tree29dd19b0d64199a333a2c057e99452ae065ed3d7 /lib
parente33c848fa481b038d5ad0c7c898c33b2b27ec71e (diff)
Revert r100191 since it breaks objc in clang
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@100199 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp47
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp32
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp9
-rw-r--r--lib/Target/ARM/ARMISelLowering.h2
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp2
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp14
-rw-r--r--lib/Target/X86/X86ISelLowering.h5
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp32
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp55
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp32
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp29
-rw-r--r--lib/Transforms/Utils/BuildLibCalls.cpp43
-rw-r--r--lib/Transforms/Utils/InlineFunction.cpp11
-rw-r--r--lib/VMCore/AutoUpgrade.cpp70
15 files changed, 116 insertions, 269 deletions
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index b511c018c3..0ba65ab7f9 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -3263,8 +3263,7 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain, SDValue Dst,
SDValue Src, uint64_t Size,
- unsigned Align, bool isVol,
- bool AlwaysInline,
+ unsigned Align, bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -3320,7 +3319,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
Value = getMemsetStringVal(VT, dl, DAG, TLI, Str, SrcOff);
Store = DAG.getStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, isVol, false, Align);
+ DstSV, DstSVOff + DstOff, false, false, Align);
} else {
// The type might not be legal for the target. This should only happen
// if the type is smaller than a legal type, as on PPC, so the right
@@ -3331,11 +3330,11 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
assert(NVT.bitsGE(VT));
Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
- SrcSV, SrcSVOff + SrcOff, VT, isVol, false,
+ SrcSV, SrcSVOff + SrcOff, VT, false, false,
MinAlign(SrcAlign, SrcOff));
Store = DAG.getTruncStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, VT, isVol, false,
+ DstSV, DstSVOff + DstOff, VT, false, false,
Align);
}
OutChains.push_back(Store);
@@ -3350,8 +3349,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain, SDValue Dst,
SDValue Src, uint64_t Size,
- unsigned Align, bool isVol,
- bool AlwaysInline,
+ unsigned Align,bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -3399,7 +3397,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
Value = DAG.getLoad(VT, dl, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
- SrcSV, SrcSVOff + SrcOff, isVol, false, SrcAlign);
+ SrcSV, SrcSVOff + SrcOff, false, false, SrcAlign);
LoadValues.push_back(Value);
LoadChains.push_back(Value.getValue(1));
SrcOff += VTSize;
@@ -3414,7 +3412,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
Store = DAG.getStore(Chain, dl, LoadValues[i],
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, isVol, false, Align);
+ DstSV, DstSVOff + DstOff, false, false, Align);
OutChains.push_back(Store);
DstOff += VTSize;
}
@@ -3426,7 +3424,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain, SDValue Dst,
SDValue Src, uint64_t Size,
- unsigned Align, bool isVol,
+ unsigned Align,
const Value *DstSV, uint64_t DstSVOff) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -3465,7 +3463,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
SDValue Value = getMemsetValue(Src, VT, DAG, dl);
SDValue Store = DAG.getStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, isVol, false, 0);
+ DstSV, DstSVOff + DstOff, false, false, 0);
OutChains.push_back(Store);
DstOff += VTSize;
}
@@ -3476,7 +3474,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
- unsigned Align, bool isVol, bool AlwaysInline,
+ unsigned Align, bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff) {
@@ -3490,7 +3488,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
ConstantSize->getZExtValue(),Align,
- isVol, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
+ false, DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.getNode())
return Result;
}
@@ -3499,7 +3497,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
// code. If the target chooses to do this, this is the next best.
SDValue Result =
TLI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
- isVol, AlwaysInline,
+ AlwaysInline,
DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.getNode())
return Result;
@@ -3509,12 +3507,11 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
if (AlwaysInline) {
assert(ConstantSize && "AlwaysInline requires a constant size!");
return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
- ConstantSize->getZExtValue(), Align, isVol,
- true, DstSV, DstSVOff, SrcSV, SrcSVOff);
+ ConstantSize->getZExtValue(), Align, true,
+ DstSV, DstSVOff, SrcSV, SrcSVOff);
}
// Emit a library call.
- assert(!isVol && "library memcpy does not support volatile");
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext());
@@ -3535,7 +3532,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
- unsigned Align, bool isVol,
+ unsigned Align,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff) {
@@ -3549,8 +3546,8 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Result =
getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
- ConstantSize->getZExtValue(), Align, isVol,
- false, DstSV, DstSVOff, SrcSV, SrcSVOff);
+ ConstantSize->getZExtValue(),
+ Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.getNode())
return Result;
}
@@ -3558,13 +3555,12 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
// Then check to see if we should lower the memmove with target-specific
// code. If the target chooses to do this, this is the next best.
SDValue Result =
- TLI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
+ TLI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align,
DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.getNode())
return Result;
// Emit a library call.
- assert(!isVol && "library memmove does not support volatile");
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext());
@@ -3585,7 +3581,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
- unsigned Align, bool isVol,
+ unsigned Align,
const Value *DstSV, uint64_t DstSVOff) {
// Check to see if we should lower the memset to stores first.
@@ -3598,7 +3594,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Result =
getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
- Align, isVol, DstSV, DstSVOff);
+ Align, DstSV, DstSVOff);
if (Result.getNode())
return Result;
}
@@ -3606,13 +3602,12 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
// Then check to see if we should lower the memset with target-specific
// code. If the target chooses to do this, this is the next best.
SDValue Result =
- TLI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
+ TLI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align,
DstSV, DstSVOff);
if (Result.getNode())
return Result;
// Emit a library call.
- assert(!isVol && "library memset does not support volatile");
const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index fabb7bd634..922c6e8b02 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3731,50 +3731,28 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
case Intrinsic::longjmp:
return "_longjmp"+!TLI.usesUnderscoreLongJmp();
case Intrinsic::memcpy: {
- // Assert for address < 256 since we support only user defined address
- // spaces.
- assert(cast<PointerType>(I.getOperand(1)->getType())->getAddressSpace()
- < 256 &&
- cast<PointerType>(I.getOperand(2)->getType())->getAddressSpace()
- < 256 &&
- "Unknown address space");
SDValue Op1 = getValue(I.getOperand(1));
SDValue Op2 = getValue(I.getOperand(2));
SDValue Op3 = getValue(I.getOperand(3));
unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
- bool isVol = cast<ConstantInt>(I.getOperand(5))->getZExtValue();
- DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol, false,
+ DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
I.getOperand(1), 0, I.getOperand(2), 0));
return 0;
}
case Intrinsic::memset: {
- // Assert for address < 256 since we support only user defined address
- // spaces.
- assert(cast<PointerType>(I.getOperand(1)->getType())->getAddressSpace()
- < 256 &&
- "Unknown address space");
SDValue Op1 = getValue(I.getOperand(1));
SDValue Op2 = getValue(I.getOperand(2));
SDValue Op3 = getValue(I.getOperand(3));
unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
- bool isVol = cast<ConstantInt>(I.getOperand(5))->getZExtValue();
- DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
+ DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
I.getOperand(1), 0));
return 0;
}
case Intrinsic::memmove: {
- // Assert for address < 256 since we support only user defined address
- // spaces.
- assert(cast<PointerType>(I.getOperand(1)->getType())->getAddressSpace()
- < 256 &&
- cast<PointerType>(I.getOperand(2)->getType())->getAddressSpace()
- < 256 &&
- "Unknown address space");
SDValue Op1 = getValue(I.getOperand(1));
SDValue Op2 = getValue(I.getOperand(2));
SDValue Op3 = getValue(I.getOperand(3));
unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
- bool isVol = cast<ConstantInt>(I.getOperand(5))->getZExtValue();
// If the source and destination are known to not be aliases, we can
// lower memmove as memcpy.
@@ -3783,12 +3761,12 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
Size = C->getZExtValue();
if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
AliasAnalysis::NoAlias) {
- DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
- false, I.getOperand(1), 0, I.getOperand(2), 0));
+ DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
+ I.getOperand(1), 0, I.getOperand(2), 0));
return 0;
}
- DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
+ DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
I.getOperand(1), 0, I.getOperand(2), 0));
return 0;
}
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 77fb0c3cdb..b6c81f6910 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -861,8 +861,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
DebugLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
- /*isVolatile=*/false, /*AlwaysInline=*/false,
- NULL, 0, NULL, 0);
+ /*AlwaysInline=*/false, NULL, 0, NULL, 0);
}
/// LowerMemOpCallTo - Store the argument to the stack.
@@ -2054,7 +2053,7 @@ ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
+ bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff){
// Do repeated 4-byte loads and stores. To be improved.
@@ -2090,7 +2089,7 @@ ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
Loads[i] = DAG.getLoad(VT, dl, Chain,
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
DAG.getConstant(SrcOff, MVT::i32)),
- SrcSV, SrcSVOff + SrcOff, isVolatile, false, 0);
+ SrcSV, SrcSVOff + SrcOff, false, false, 0);
TFOps[i] = Loads[i].getValue(1);
SrcOff += VTSize;
}
@@ -2101,7 +2100,7 @@ ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
DAG.getConstant(DstOff, MVT::i32)),
- DstSV, DstSVOff + DstOff, isVolatile, false, 0);
+ DstSV, DstSVOff + DstOff, false, false, 0);
DstOff += VTSize;
}
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index fa33ad3075..f8f8adc70a 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -305,7 +305,7 @@ namespace llvm {
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
+ bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff);
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 32a372d036..e67666d804 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2392,7 +2392,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
DebugLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
- false, false, NULL, 0, NULL, 0);
+ false, NULL, 0, NULL, 0);
}
/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 5a52ac2b76..b24d5a1707 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1433,8 +1433,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
DebugLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
- /*isVolatile*/false, /*AlwaysInline=*/true,
- NULL, 0, NULL, 0);
+ /*AlwaysInline=*/true, NULL, 0, NULL, 0);
}
/// IsTailCallConvention - Return true if the calling convention is one that
@@ -6551,7 +6550,6 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
- bool isVolatile,
const Value *DstSV,
uint64_t DstSVOff) {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
@@ -6680,7 +6678,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
DAG.getConstant(Offset, AddrVT)),
Src,
DAG.getConstant(BytesLeft, SizeVT),
- Align, isVolatile, DstSV, DstSVOff + Offset);
+ Align, DstSV, DstSVOff + Offset);
}
// TODO: Use a Tokenfactor, as in memcpy, instead of a single chain.
@@ -6691,7 +6689,7 @@ SDValue
X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
+ bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff) {
// This requires the copy size to be a constant, preferrably
@@ -6750,7 +6748,7 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
DAG.getNode(ISD::ADD, dl, SrcVT, Src,
DAG.getConstant(Offset, SrcVT)),
DAG.getConstant(BytesLeft, SizeVT),
- Align, isVolatile, AlwaysInline,
+ Align, AlwaysInline,
DstSV, DstSVOff + Offset,
SrcSV, SrcSVOff + Offset));
}
@@ -6833,8 +6831,8 @@ SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
return DAG.getMemcpy(Chain, dl, DstPtr, SrcPtr,
- DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
- false, DstSV, 0, SrcSV, 0);
+ DAG.getIntPtrConstant(24), 8, false,
+ DstSV, 0, SrcSV, 0);
}
SDValue
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 6c9a7a5980..4549cba6f3 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -737,13 +737,12 @@ namespace llvm {
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
- bool isVolatile,
const Value *DstSV, uint64_t DstSVOff);
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
+ bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff);
@@ -753,7 +752,7 @@ namespace llvm {
/// block, the number of args, and whether or not the second arg is
/// in memory or not.
MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB,
- unsigned argNum, bool inMem) const;
+ unsigned argNum, bool inMem) const;
/// Utility function to emit atomic bitwise operations (and, or, xor).
/// It takes the bitwise instruction to expand, the associated machine basic
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 27e5233246..bf1a457627 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1443,7 +1443,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
LD->getBasePtr(),
DAG.getConstant(StoreBits/8, MVT::i32),
- Alignment, false, ST->getSrcValue(),
+ Alignment, ST->getSrcValue(),
ST->getSrcValueOffset(), LD->getSrcValue(),
LD->getSrcValueOffset());
}
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index e025b05376..76c815da86 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -136,14 +136,8 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
return 0; // If not 1/2/4/8 bytes, exit.
// Use an integer load+store unless we can find something better.
- unsigned SrcAddrSp =
- cast<PointerType>(MI->getOperand(2)->getType())->getAddressSpace();
- unsigned DstAddrSp =
- cast<PointerType>(MI->getOperand(1)->getType())->getAddressSpace();
-
- const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
- Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
- Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
+ Type *NewPtrTy =
+ PointerType::getUnqual(IntegerType::get(MI->getContext(), Size<<3));
// Memcpy forces the use of i8* for the source and destination. That means
// that if you're using memcpy to move one double around, you'll get a cast
@@ -173,10 +167,8 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
break;
}
- if (SrcETy->isSingleValueType()) {
- NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
- NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
- }
+ if (SrcETy->isSingleValueType())
+ NewPtrTy = PointerType::getUnqual(SrcETy);
}
}
@@ -186,12 +178,11 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
SrcAlign = std::max(SrcAlign, CopyAlign);
DstAlign = std::max(DstAlign, CopyAlign);
- Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewSrcPtrTy);
- Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewDstPtrTy);
- Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign);
+ Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy);
+ Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy);
+ Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
InsertNewInstBefore(L, *MI);
- InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign),
- *MI);
+ InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
// Set the size of the copy to 0, it will be deleted on the next iteration.
MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
@@ -284,11 +275,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (GVSrc->isConstant()) {
Module *M = CI.getParent()->getParent()->getParent();
Intrinsic::ID MemCpyID = Intrinsic::memcpy;
- const Type *Tys[3] = { CI.getOperand(1)->getType(),
- CI.getOperand(2)->getType(),
- CI.getOperand(3)->getType() };
+ const Type *Tys[1];
+ Tys[0] = CI.getOperand(3)->getType();
CI.setOperand(0,
- Intrinsic::getDeclaration(M, MemCpyID, Tys, 3));
+ Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
Changed = true;
}
}
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 3b305ae766..62e2977058 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -413,6 +413,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// interesting as a small compile-time optimization.
Ranges.addStore(0, SI);
+ Function *MemSetF = 0;
// Now that we have full information about ranges, loop over the ranges and
// emit memset's for anything big enough to be worthwhile.
@@ -432,40 +433,29 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// memset block. This ensure that the memset is dominated by any addressing
// instruction needed by the start of the block.
BasicBlock::iterator InsertPt = BI;
-
+
+ if (MemSetF == 0) {
+ const Type *Ty = Type::getInt64Ty(Context);
+ MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, &Ty, 1);
+ }
+
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
-
- // Determine alignment
- unsigned Alignment = Range.Alignment;
- if (Alignment == 0) {
- const Type *EltType =
- cast<PointerType>(StartPtr->getType())->getElementType();
- Alignment = TD->getABITypeAlignment(EltType);
- }
-
+
// Cast the start ptr to be i8* as memset requires.
- const PointerType* StartPTy = cast<PointerType>(StartPtr->getType());
- const PointerType *i8Ptr = Type::getInt8PtrTy(Context,
- StartPTy->getAddressSpace());
- if (StartPTy!= i8Ptr)
+ const Type *i8Ptr = Type::getInt8PtrTy(Context);
+ if (StartPtr->getType() != i8Ptr)
StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(),
InsertPt);
-
+
Value *Ops[] = {
StartPtr, ByteVal, // Start, value
// size
ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start),
// align
- ConstantInt::get(Type::getInt32Ty(Context), Alignment),
- // volatile
- ConstantInt::get(Type::getInt1Ty(Context), 0),
+ ConstantInt::get(Type::getInt32Ty(Context), Range.Alignment)
};
- const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() };
-
- Function *MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2);
-
- Value *C = CallInst::Create(MemSetF, Ops, Ops+5, "", InsertPt);
+ Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt);
DEBUG(dbgs() << "Replace stores:\n";
for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
dbgs() << *Range.TheStores[i];
@@ -690,19 +680,16 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
return false;
// If all checks passed, then we can transform these memcpy's
- const Type *ArgTys[3] = { M->getRawDest()->getType(),
- MDep->getRawSource()->getType(),
- M->getLength()->getType() };
+ const Type *Ty = M->getLength()->getType();
Function *MemCpyFun = Intrinsic::getDeclaration(
M->getParent()->getParent()->getParent(),
- M->getIntrinsicID(), ArgTys, 3);
+ M->getIntrinsicID(), &Ty, 1);
- Value *Args[5] = {
- M->getRawDest(), MDep->getRawSource(), M->getLength(),
- M->getAlignmentCst(), M->getVolatileCst()
+ Value *Args[4] = {
+ M->getRawDest(), MDep->getRawSource(), M->getLength(), M->getAlignmentCst()
};
- CallInst *C = CallInst::Create(MemCpyFun, Args, Args+5, "", M);
+ CallInst *C = CallInst::Create(MemCpyFun, Args, Args+4, "", M);
// If C and M don't interfere, then this is a valid transformation. If they
@@ -741,10 +728,8 @@ bool MemCpyOpt::processMemMove(MemMoveInst *M) {
// If not, then we know we can transform this.
Module *Mod = M->getParent()->getParent()->getParent();
- const Type *ArgTys[3] = { M->getRawDest()->getType(),
- M->getRawSource()->getType(),
- M->getLength()->getType() };
- M->setOperand(0,Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, ArgTys, 3));
+ const Type *Ty = M->getLength()->getType();
+ M->setOperand(0, Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, &Ty, 1));
// MemDep may have over conservative information about this instruction, just
// conservatively flush it from the cache.
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 6211beb70b..bbe6270655 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -858,17 +858,8 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getName(), MI);
// Cast the other pointer (if we have one) to BytePtrTy.
- if (OtherElt && OtherElt->getType() != BytePtrTy) {
- // Preserve address space of OtherElt
- const PointerType* OtherPTy = cast<PointerType>(OtherElt->getType());
- const PointerType* PTy = cast<PointerType>(BytePtrTy);
- if (OtherPTy->getElementType() != PTy->getElementType()) {
- Type *NewOtherPTy = PointerType::get(PTy->getElementType(),
- OtherPTy->getAddressSpace());
- OtherElt = new BitCastInst(OtherElt, NewOtherPTy,
- OtherElt->getNameStr(), MI);
- }
- }
+ if (OtherElt && OtherElt->getType() != BytePtrTy)
+ OtherElt = new BitCastInst(OtherElt, BytePtrTy, OtherElt->getName(), MI);
unsigned EltSize = TD->getTypeAllocSize(EltTy);
@@ -879,28 +870,17 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
SROADest ? OtherElt : EltPtr, // Src ptr
ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
// Align
- ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign),
- MI->getVolatileCst()
+ ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign)
};
- // In case we fold the address space overloaded memcpy of A to B
- // with memcpy of B to C, change the function to be a memcpy of A to C.
- const Type *Tys[] = { Ops[0]->getType(), Ops[1]->getType(),
- Ops[2]->getType() };
- Module *M = MI->getParent()->getParent()->getParent();
- TheFn = Intrinsic::getDeclaration(M, MI->getIntrinsicID(), Tys, 3);
- CallInst::Create(TheFn, Ops, Ops + 5, "", MI);
+ CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
} else {
assert(isa<MemSetInst>(MI));
Value *Ops[] = {
EltPtr, MI->getOperand(2), // Dest, Value,
ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
- Zero, // Align
- ConstantInt::get(Type::getInt1Ty(MI->getContext()), 0) // isVolatile
+ Zero // Align
};
- const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() };
- Module *M = MI->getParent()->getParent()->getParent();
- TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2);
- CallInst::Create(TheFn, Ops, Ops + 5, "", MI);
+ CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
}
}
DeadInsts.push_back(MI);
diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index b053cfc3b4..5941ea6571 100644
--- a/