aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorMon P Wang <wangmp@apple.com>2010-03-30 20:55:56 +0000
committerMon P Wang <wangmp@apple.com>2010-03-30 20:55:56 +0000
commit808bab0169ab7d2e8dfdc72dd2c991cd8ff2396d (patch)
tree38c7008c1e4c69ab277cae82ffd341ce3eca121f /lib
parent04e3b1ef788cfac266896c6e89050c4ff60114e2 (diff)
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) A update of langref will occur in a subsequent checkin. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@99928 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp49
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp32
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp9
-rw-r--r--lib/Target/ARM/ARMISelLowering.h2
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp2
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp14
-rw-r--r--lib/Target/X86/X86ISelLowering.h5
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp32
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp55
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp32
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp29
-rw-r--r--lib/Transforms/Utils/BuildLibCalls.cpp43
-rw-r--r--lib/Transforms/Utils/InlineFunction.cpp11
-rw-r--r--lib/VMCore/AutoUpgrade.cpp70
15 files changed, 270 insertions, 117 deletions
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 4a207ede91..a7f762ce73 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -3275,7 +3275,8 @@ bool MeetsMaxMemopRequirement(std::vector<EVT> &MemOps,
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain, SDValue Dst,
SDValue Src, uint64_t Size,
- unsigned Align, bool AlwaysInline,
+ unsigned Align, bool isVol,
+ bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -3312,7 +3313,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
Value = getMemsetStringVal(VT, dl, DAG, TLI, Str, SrcOff);
Store = DAG.getStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, false, false, DstAlign);
+ DstSV, DstSVOff + DstOff, isVol, false, DstAlign);
} else {
// The type might not be legal for the target. This should only happen
// if the type is smaller than a legal type, as on PPC, so the right
@@ -3323,10 +3324,10 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
assert(NVT.bitsGE(VT));
Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
- SrcSV, SrcSVOff + SrcOff, VT, false, false, Align);
+ SrcSV, SrcSVOff + SrcOff, VT, isVol, false, Align);
Store = DAG.getTruncStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, VT, false, false,
+ DstSV, DstSVOff + DstOff, VT, isVol, false,
DstAlign);
}
OutChains.push_back(Store);
@@ -3341,7 +3342,8 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain, SDValue Dst,
SDValue Src, uint64_t Size,
- unsigned Align, bool AlwaysInline,
+ unsigned Align, bool isVol,
+ bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff){
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -3372,7 +3374,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
Value = DAG.getLoad(VT, dl, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
- SrcSV, SrcSVOff + SrcOff, false, false, Align);
+ SrcSV, SrcSVOff + SrcOff, isVol, false, Align);
LoadValues.push_back(Value);
LoadChains.push_back(Value.getValue(1));
SrcOff += VTSize;
@@ -3387,7 +3389,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
Store = DAG.getStore(Chain, dl, LoadValues[i],
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, false, false, DstAlign);
+ DstSV, DstSVOff + DstOff, isVol, false, DstAlign);
OutChains.push_back(Store);
DstOff += VTSize;
}
@@ -3399,7 +3401,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain, SDValue Dst,
SDValue Src, uint64_t Size,
- unsigned Align,
+ unsigned Align, bool isVol,
const Value *DstSV, uint64_t DstSVOff) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -3422,7 +3424,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
SDValue Value = getMemsetValue(Src, VT, DAG, dl);
SDValue Store = DAG.getStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, false, false, 0);
+ DstSV, DstSVOff + DstOff, isVol, false, 0);
OutChains.push_back(Store);
DstOff += VTSize;
}
@@ -3433,7 +3435,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
- unsigned Align, bool AlwaysInline,
+ unsigned Align, bool isVol, bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff) {
@@ -3447,8 +3449,8 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Result =
getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
- ConstantSize->getZExtValue(),
- Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
+ ConstantSize->getZExtValue(), Align, isVol,
+ false, DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.getNode())
return Result;
}
@@ -3457,7 +3459,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
// code. If the target chooses to do this, this is the next best.
SDValue Result =
TLI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
- AlwaysInline,
+ isVol, AlwaysInline,
DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.getNode())
return Result;
@@ -3467,11 +3469,12 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
if (AlwaysInline) {
assert(ConstantSize && "AlwaysInline requires a constant size!");
return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
- ConstantSize->getZExtValue(), Align, true,
- DstSV, DstSVOff, SrcSV, SrcSVOff);
+ ConstantSize->getZExtValue(), Align, isVol,
+ true, DstSV, DstSVOff, SrcSV, SrcSVOff);
}
// Emit a library call.
+ assert(!isVol && "library memcpy does not support volatile");
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext());
@@ -3492,7 +3495,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
- unsigned Align,
+ unsigned Align, bool isVol,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff) {
@@ -3506,8 +3509,8 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Result =
getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
- ConstantSize->getZExtValue(),
- Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
+ ConstantSize->getZExtValue(), Align, isVol,
+ false, DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.getNode())
return Result;
}
@@ -3515,12 +3518,13 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
// Then check to see if we should lower the memmove with target-specific
// code. If the target chooses to do this, this is the next best.
SDValue Result =
- TLI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align,
+ TLI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.getNode())
return Result;
// Emit a library call.
+ assert(!isVol && "library memmove does not support volatile");
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext());
@@ -3541,7 +3545,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
- unsigned Align,
+ unsigned Align, bool isVol,
const Value *DstSV, uint64_t DstSVOff) {
// Check to see if we should lower the memset to stores first.
@@ -3554,7 +3558,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Result =
getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
- Align, DstSV, DstSVOff);
+ Align, isVol, DstSV, DstSVOff);
if (Result.getNode())
return Result;
}
@@ -3562,12 +3566,13 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
// Then check to see if we should lower the memset with target-specific
// code. If the target chooses to do this, this is the next best.
SDValue Result =
- TLI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align,
+ TLI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
DstSV, DstSVOff);
if (Result.getNode())
return Result;
// Emit a library call.
+ assert(!isVol && "library memset does not support volatile");
const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 7a0daf69c8..3184ed7eba 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3731,28 +3731,50 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
case Intrinsic::longjmp:
return "_longjmp"+!TLI.usesUnderscoreLongJmp();
case Intrinsic::memcpy: {
+ // Assert for address < 256 since we support only user defined address
+ // spaces.
+ assert(cast<PointerType>(I.getOperand(1)->getType())->getAddressSpace()
+ < 256 &&
+ cast<PointerType>(I.getOperand(2)->getType())->getAddressSpace()
+ < 256 &&
+ "Unknown address space");
SDValue Op1 = getValue(I.getOperand(1));
SDValue Op2 = getValue(I.getOperand(2));
SDValue Op3 = getValue(I.getOperand(3));
unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
- DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
+ bool isVol = cast<ConstantInt>(I.getOperand(5))->getZExtValue();
+ DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol, false,
I.getOperand(1), 0, I.getOperand(2), 0));
return 0;
}
case Intrinsic::memset: {
+ // Assert for address < 256 since we support only user defined address
+ // spaces.
+ assert(cast<PointerType>(I.getOperand(1)->getType())->getAddressSpace()
+ < 256 &&
+ "Unknown address space");
SDValue Op1 = getValue(I.getOperand(1));
SDValue Op2 = getValue(I.getOperand(2));
SDValue Op3 = getValue(I.getOperand(3));
unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
- DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
+ bool isVol = cast<ConstantInt>(I.getOperand(5))->getZExtValue();
+ DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
I.getOperand(1), 0));
return 0;
}
case Intrinsic::memmove: {
+ // Assert for address < 256 since we support only user defined address
+ // spaces.
+ assert(cast<PointerType>(I.getOperand(1)->getType())->getAddressSpace()
+ < 256 &&
+ cast<PointerType>(I.getOperand(2)->getType())->getAddressSpace()
+ < 256 &&
+ "Unknown address space");
SDValue Op1 = getValue(I.getOperand(1));
SDValue Op2 = getValue(I.getOperand(2));
SDValue Op3 = getValue(I.getOperand(3));
unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
+ bool isVol = cast<ConstantInt>(I.getOperand(5))->getZExtValue();
// If the source and destination are known to not be aliases, we can
// lower memmove as memcpy.
@@ -3761,12 +3783,12 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
Size = C->getZExtValue();
if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
AliasAnalysis::NoAlias) {
- DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
- I.getOperand(1), 0, I.getOperand(2), 0));
+ DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
+ false, I.getOperand(1), 0, I.getOperand(2), 0));
return 0;
}
- DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
+ DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
I.getOperand(1), 0, I.getOperand(2), 0));
return 0;
}
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index b6c81f6910..77fb0c3cdb 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -861,7 +861,8 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
DebugLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
- /*AlwaysInline=*/false, NULL, 0, NULL, 0);
+ /*isVolatile=*/false, /*AlwaysInline=*/false,
+ NULL, 0, NULL, 0);
}
/// LowerMemOpCallTo - Store the argument to the stack.
@@ -2053,7 +2054,7 @@ ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
- bool AlwaysInline,
+ bool isVolatile, bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff){
// Do repeated 4-byte loads and stores. To be improved.
@@ -2089,7 +2090,7 @@ ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
Loads[i] = DAG.getLoad(VT, dl, Chain,
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
DAG.getConstant(SrcOff, MVT::i32)),
- SrcSV, SrcSVOff + SrcOff, false, false, 0);
+ SrcSV, SrcSVOff + SrcOff, isVolatile, false, 0);
TFOps[i] = Loads[i].getValue(1);
SrcOff += VTSize;
}
@@ -2100,7 +2101,7 @@ ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
DAG.getConstant(DstOff, MVT::i32)),
- DstSV, DstSVOff + DstOff, false, false, 0);
+ DstSV, DstSVOff + DstOff, isVolatile, false, 0);
DstOff += VTSize;
}
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index f8f8adc70a..fa33ad3075 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -305,7 +305,7 @@ namespace llvm {
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
- bool AlwaysInline,
+ bool isVolatile, bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff);
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 2c072c1290..24b8442c7d 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2392,7 +2392,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
DebugLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
- false, NULL, 0, NULL, 0);
+ false, false, NULL, 0, NULL, 0);
}
/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index b94f76efa6..2cfd2d4db6 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1422,7 +1422,8 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
DebugLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
- /*AlwaysInline=*/true, NULL, 0, NULL, 0);
+ /*isVolatile*/false, /*AlwaysInline=*/true,
+ NULL, 0, NULL, 0);
}
/// IsTailCallConvention - Return true if the calling convention is one that
@@ -6539,6 +6540,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
+ bool isVolatile,
const Value *DstSV,
uint64_t DstSVOff) {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
@@ -6667,7 +6669,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
DAG.getConstant(Offset, AddrVT)),
Src,
DAG.getConstant(BytesLeft, SizeVT),
- Align, DstSV, DstSVOff + Offset);
+ Align, isVolatile, DstSV, DstSVOff + Offset);
}
// TODO: Use a Tokenfactor, as in memcpy, instead of a single chain.
@@ -6678,7 +6680,7 @@ SDValue
X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
- bool AlwaysInline,
+ bool isVolatile, bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff) {
// This requires the copy size to be a constant, preferrably
@@ -6737,7 +6739,7 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
DAG.getNode(ISD::ADD, dl, SrcVT, Src,
DAG.getConstant(Offset, SrcVT)),
DAG.getConstant(BytesLeft, SizeVT),
- Align, AlwaysInline,
+ Align, isVolatile, AlwaysInline,
DstSV, DstSVOff + Offset,
SrcSV, SrcSVOff + Offset));
}
@@ -6820,8 +6822,8 @@ SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
return DAG.getMemcpy(Chain, dl, DstPtr, SrcPtr,
- DAG.getIntPtrConstant(24), 8, false,
- DstSV, 0, SrcSV, 0);
+ DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
+ false, DstSV, 0, SrcSV, 0);
}
SDValue
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 46fa3cefdc..52e548e822 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -737,12 +737,13 @@ namespace llvm {
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
+ bool isVolatile,
const Value *DstSV, uint64_t DstSVOff);
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
- bool AlwaysInline,
+ bool isVolatile, bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff);
@@ -752,7 +753,7 @@ namespace llvm {
/// block, the number of args, and whether or not the second arg is
/// in memory or not.
MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB,
- unsigned argNum, bool inMem) const;
+ unsigned argNum, bool inMem) const;
/// Utility function to emit atomic bitwise operations (and, or, xor).
/// It takes the bitwise instruction to expand, the associated machine basic
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index bf1a457627..27e5233246 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1443,7 +1443,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
LD->getBasePtr(),
DAG.getConstant(StoreBits/8, MVT::i32),
- Alignment, ST->getSrcValue(),
+ Alignment, false, ST->getSrcValue(),
ST->getSrcValueOffset(), LD->getSrcValue(),
LD->getSrcValueOffset());
}
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 76c815da86..e025b05376 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -136,8 +136,14 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
return 0; // If not 1/2/4/8 bytes, exit.
// Use an integer load+store unless we can find something better.
- Type *NewPtrTy =
- PointerType::getUnqual(IntegerType::get(MI->getContext(), Size<<3));
+ unsigned SrcAddrSp =
+ cast<PointerType>(MI->getOperand(2)->getType())->getAddressSpace();
+ unsigned DstAddrSp =
+ cast<PointerType>(MI->getOperand(1)->getType())->getAddressSpace();
+
+ const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
+ Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
+ Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
// Memcpy forces the use of i8* for the source and destination. That means
// that if you're using memcpy to move one double around, you'll get a cast
@@ -167,8 +173,10 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
break;
}
- if (SrcETy->isSingleValueType())
- NewPtrTy = PointerType::getUnqual(SrcETy);
+ if (SrcETy->isSingleValueType()) {
+ NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
+ NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
+ }
}
}
@@ -178,11 +186,12 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
SrcAlign = std::max(SrcAlign, CopyAlign);
DstAlign = std::max(DstAlign, CopyAlign);
- Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy);
- Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy);
- Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
+ Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewSrcPtrTy);
+ Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewDstPtrTy);
+ Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign);
InsertNewInstBefore(L, *MI);
- InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
+ InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign),
+ *MI);
// Set the size of the copy to 0, it will be deleted on the next iteration.
MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
@@ -275,10 +284,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (GVSrc->isConstant()) {
Module *M = CI.getParent()->getParent()->getParent();
Intrinsic::ID MemCpyID = Intrinsic::memcpy;
- const Type *Tys[1];
- Tys[0] = CI.getOperand(3)->getType();
+ const Type *Tys[3] = { CI.getOperand(1)->getType(),
+ CI.getOperand(2)->getType(),
+ CI.getOperand(3)->getType() };
CI.setOperand(0,
- Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
+ Intrinsic::getDeclaration(M, MemCpyID, Tys, 3));
Changed = true;
}
}
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 62e2977058..d655f0e888 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -413,7 +413,6 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// interesting as a small compile-time optimization.
Ranges.addStore(0, SI);
- Function *MemSetF = 0;
// Now that we have full information about ranges, loop over the ranges and
// emit memset's for anything big enough to be worthwhile.
@@ -433,29 +432,40 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// memset block. This ensure that the memset is dominated by any addressing
// instruction needed by the start of the block.
BasicBlock::iterator InsertPt = BI;
-
- if (MemSetF == 0) {
- const Type *Ty = Type::getInt64Ty(Context);
- MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, &Ty, 1);
- }
-
+
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
-
+
+ // Determine alignment
+ unsigned Alignment = Range.Alignment;
+ if (Alignment == 0) {
+ const Type *EltType =
+ cast<PointerType>(StartPtr->getType())->getElementType();
+ Alignment = TD->getABITypeAlignment(EltType);
+ }
+
// Cast the start ptr to be i8* as memset requires.
- const Type *i8Ptr = Type::getInt8PtrTy(Context);
- if (StartPtr->getType() != i8Ptr)
+ const PointerType* StartPTy = cast<PointerType>(StartPtr->getType());
+ const PointerType *i8Ptr = Type::getInt8PtrTy(Context,
+ StartPTy->getAddressSpace());
+ if (StartPTy!= i8Ptr)
StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(),
InsertPt);
-
+
Value *Ops[] = {
StartPtr, ByteVal, // Start, value
// size
ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start),
// align
- ConstantInt::get(Type::getInt32Ty(Context), Range.Alignment)
+ ConstantInt::get(Type::getInt32Ty(Context), Alignment),
+ // volatile
+ ConstantInt::get(Type::getInt1Ty(Context), 0),
};
- Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt);
+ const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() };
+
+ Function *MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2);
+
+ Value *C = CallInst::Create(MemSetF, Ops, Ops+5, "", InsertPt);
DEBUG(dbgs() << "Replace stores:\n";
for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
dbgs() << *Range.TheStores[i];
@@ -680,16 +690,19 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
return false;
// If all checks passed, then we can transform these memcpy's
- const Type *Ty = M->getLength()->getType();
+ const Type *ArgTys[3] = { M->getRawDest()->getType(),
+ MDep->getRawSource()->getType(),
+ M->getLength()->getType() };
Function *MemCpyFun = Intrinsic::getDeclaration(
M->getParent()->getParent()->getParent(),
- M->getIntrinsicID(), &Ty, 1);
+ M->getIntrinsicID(), ArgTys, 3);
- Value *Args[4] = {
- M->getRawDest(), MDep->getRawSource(), M->getLength(), M->getAlignmentCst()
+ Value *Args[5] = {
+ M->getRawDest(), MDep->getRawSource(), M->getLength(),
+ M->getAlignmentCst(), M->getVolatileCst()
};
- CallInst *C = CallInst::Create(MemCpyFun, Args, Args+4, "", M);
+ CallInst *C = CallInst::Create(MemCpyFun, Args, Args+5, "", M);
// If C and M don't interfere, then this is a valid transformation. If they
@@ -728,8 +741,10 @@ bool MemCpyOpt::processMemMove(MemMoveInst *M) {
// If not, then we know we can transform this.
Module *Mod = M->getParent()->getParent()->getParent();
- const Type *Ty = M->getLength()->getType();
- M->setOperand(0, Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, &Ty, 1));
+ const Type *ArgTys[3] = { M->getDest()->getType(), M->getSource()->getType(),
+ M->getLength()->getType() };
+ M->setOperand(0,
+ Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, ArgTys, 3));
// MemDep may have over conservative information about this instruction, just
// conservatively flush it from the cache.
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index bbe6270655..6211beb70b 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -858,8 +858,17 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getName(), MI);
// Cast the other pointer (if we have one) to BytePtrTy.
- if (OtherElt && OtherElt->getType() != BytePtrTy)
- OtherElt = new BitCastInst(OtherElt, BytePtrTy, OtherElt->getName(), MI);
+ if (OtherElt && OtherElt->getType() != BytePtrTy) {
+ // Preserve address space of OtherElt
+ const PointerType* OtherPTy = cast<PointerType>(OtherElt->getType());
+ const PointerType* PTy = cast<PointerType>(BytePtrTy);
+ if (OtherPTy->getElementType() != PTy->getElementType()) {
+ Type *NewOtherPTy = PointerType::get(PTy->getElementType(),
+ OtherPTy->getAddressSpace());
+ OtherElt = new BitCastInst(OtherElt, NewOtherPTy,
+ OtherElt->getNameStr(), MI);
+ }
+ }
unsigned EltSize = TD->getTypeAllocSize(EltTy);
@@ -870,17 +879,28 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
SROADest ? OtherElt : EltPtr, // Src ptr
ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
// Align
- ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign)
+ ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign),
+ MI->getVolatileCst()
};
- CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
+ // In case we fold the address space overloaded memcpy of A to B
+ // with memcpy of B to C, change the function to be a memcpy of A to C.
+ const Type *Tys[] = { Ops[0]->getType(), Ops[1]->getType(),
+ Ops[2]->getType() };
+ Module *M = MI->getParent()->getParent()->getParent();
+ TheFn = Intrinsic::getDeclaration(M, MI->getIntrinsicID(), Tys, 3);
+ CallInst::Create(TheFn, Ops, Ops + 5, "", MI);
} else {
assert(isa<MemSetInst>(MI));
Value *Ops[] = {
EltPtr, MI->getOperand(2), // Dest, Value,
ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
- Zero // Align
+ Zero, // Align
+ ConstantInt::get(Type::getInt1Ty(MI->getContext()), 0) // isVolatile
};
- CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
+ const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() };
+ Module *M = MI->getParent()->getParent()->getParent();
+ TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2);
+ CallInst::Create(TheFn, Ops, Ops + 5, "", MI);
}
}
DeadInsts.push_back(MI);
diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index 5941ea6571..b053cfc3b4 100644
--- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -142,7 +142,8 @@ struct StrCatOpt : public LibCallOptimization {
// We have enough information to now generate the memcpy call to do the
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
EmitMemCpy(CpyDst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len+1), 1, B, TD);
+ ConstantInt::get(TD->getIntPtrType(*Context), Len+1),
+ 1, false, B, TD);
}
};
@@ -383,7 +384,8 @@ struct StrCpyOpt : public LibCallOptimization {
CI->getOperand(3), B, TD);
else
EmitMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len), 1, B, TD);
+ ConstantInt::get(TD->getIntPtrType(*Context), Len),
+ 1, false, B, TD);
return Dst;
}
};
@@ -411,8 +413,8 @@ struct StrNCpyOpt : public LibCallOptimization {
if (SrcLen == 0) {
// strncpy(x, "", y) -> memset(x, '\0', y, 1)
- EmitMemSet(Dst, ConstantInt::get(Type::getInt8Ty(*Context), '\0'), LenOp,
- B, TD);
+ EmitMemSet(Dst, ConstantInt::get(Type::getInt8Ty(*Context), '\0'),
+ LenOp, false, B, TD);
return Dst;
}
@@ -432,7 +434,8 @@ struct StrNCpyOpt : public LibCallOptimization {
// strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
EmitMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len), 1, B, TD);
+ ConstantInt::get(TD->getIntPtrType(*Context), Len),
+ 1, false, B, TD);
return Dst;
}
@@ -593,7 +596,7 @@ struct MemCpyOpt : public LibCallOptimization {
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
EmitMemCpy(CI->getOperand(1), CI->getOperand(2),
- CI->getOperand(3), 1, B, TD);
+ CI->getOperand(3), 1, false, B, TD);
return CI->getOperand(1);
}
};
@@ -615,7 +618,7 @@ struct MemMoveOpt : public LibCallOptimization {
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
EmitMemMove(CI->getOperand(1), CI->getOperand(2),
- CI->getOperand(3), 1, B, TD);
+ CI->getOperand(3), 1, false, B, TD);
return CI->getOperand(1);
}
};
@@ -637,8 +640,8 @@ struct MemSetOpt : public LibCallOptimization {
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
Value *Val = B.CreateIntCast(CI->getOperand(2), Type::getInt8Ty(*Context),
- false);
- EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), B, TD);
+ false);
+ EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), false, B, TD);
return CI->getOperand(1);
}
};
@@ -999,7 +1002,7 @@ struct SPrintFOpt : public LibCallOptimization {
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
EmitMemCpy(CI->getOperand(1), CI->getOperand(2), // Copy the nul byte.
ConstantInt::get(TD->getIntPtrType(*Context),
- FormatStr.size()+1), 1, B, TD);
+ FormatStr.size()+1), 1, false, B, TD);
return ConstantInt::get(CI->getType(), FormatStr.size());
}
@@ -1013,11 +1016,11 @@ struct SPrintFOpt : public LibCallOptimization {
// sprintf(dst, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
if (!CI->getOperand(3)->getType()->isIntegerTy()) return 0;
Value *V = B.CreateTrunc(CI->getOperand(3),
- Type::getInt8Ty(*Context), "char");
+ Type::getInt8Ty(*Context), "char");
Value *Ptr = CastToCStr(CI->getOperand(1), B);
B.CreateStore(V, Ptr);
Ptr = B.CreateGEP(Ptr, ConstantInt::get(Type::getInt32Ty(*Context), 1),
- "nul");
+ "nul");
B.CreateStore(Constant::getNullValue(Type::getInt8Ty(*Context)), Ptr);
return ConstantInt::get(CI->getType(), 1);
@@ -1034,7 +1037,7 @@ struct SPrintFOpt : public LibCallOptimization {
Value *IncLen = B.CreateAdd(Len,
ConstantInt::get(Len->getType(), 1),
"leninc");
- EmitMemCpy(CI->getOperand(1), CI->getOperand(3), IncLen, 1, B, TD);
+ EmitMemCpy(CI->getOperand(1), CI->getOperand(3), IncLen, 1, false, B, TD);
// The sprintf result is the unincremented number of bytes in the string.
return B.CreateIntCast(Len, CI->getType(), false);
diff --git a/lib/Transforms/Utils/BuildLibCalls.cpp b/lib/Transforms/Utils/BuildLibCalls.cpp
index 0afccf42f6..fff817928f 100644
--- a/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -109,15 +109,16 @@ Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
/// EmitMemCpy - Emit a call to the memcpy function to the builder. This always
/// expects that Len has type 'intptr_t' and Dst/Src are pointers.
-Value *llvm::EmitMemCpy(Value *Dst, Value *Src, Value *Len,
- unsigned Align, IRBuilder<> &B, const TargetData *TD) {
+Value *llvm::EmitMemCpy(Value *Dst, Value *Src, Value *Len, unsigned Align,
+ bool isVolatile, IRBuilder<> &B, const TargetData *TD) {
Module *M = B.GetInsertBlock()->getParent()->getParent();
- const Type *Ty = Len->getType();
- Value *MemCpy = Intrinsic::getDeclaration(M, Intrinsic::memcpy, &Ty, 1);
+ const Type *ArgTys[3] = { Dst->getType(), Src->getType(), Len->getType() };
+ Value *MemCpy = Intrinsic::getDeclaration(M, Intrinsic::memcpy, ArgTys, 3);
Dst = CastToCStr(Dst, B);
Src = CastToCStr(Src, B);
- return B.CreateCall4(MemCpy, Dst, Src, Len,
- ConstantInt::get(B.getInt32Ty(), Align));
+ return B.CreateCall5(MemCpy, Dst, Src, Len,
+ ConstantInt::get(B.getInt32Ty(), Align),
+ ConstantInt::get(B.getInt1Ty(), isVolatile));
}
/// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
@@ -146,16 +147,18 @@ Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
/// EmitMemMove - Emit a call to the memmove function to the builder. This
/// always expects that the size has type 'intptr_t' and Dst/Src are pointers.
-Value *llvm::EmitMemMove(Value *Dst, Value *Src, Value *Len,
- unsigned Align, IRBuilder<> &B, const TargetData *TD) {
+Value *llvm::EmitMemMove(Value *Dst, Value *Src, Value *Len, unsigned Align,
+ bool isVolatile, IRBuilder<> &B, const TargetData *TD) {
Module *M = B.GetInsertBlock()->getParent()->getParent();
LLVMContext &Context = B.GetInsertBlock()->getContext();
- const Type *Ty = TD->getIntPtrType(Context);
- Value *MemMove = Intrinsic::getDeclaration(M, Intrinsic::memmove, &Ty, 1);
+ const Type *ArgTys[3] = { Dst->getType(), Src->getType(),
+ TD->getIntPtrType(Context) };
+ Value *MemMove = Intrinsic::getDeclaration(M, Intrinsic::memmove, ArgTys, 3);
Dst = CastToCStr(Dst, B);
Src = CastToCStr(Src, B);
Value *A = ConstantInt::get(B.getInt32Ty(), Align);
- return B.CreateCall4(MemMove, Dst, Src, Len, A);
+ Value *Vol = ConstantInt::get(B.getInt1Ty(), isVolatile);
+ return B.CreateCall5(MemMove, Dst, Src, Len, A, Vol);
}
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
@@ -206,15 +209,15 @@ Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
}
/// EmitMemSet - Emit a call to the memset function
-Value *llvm::EmitMemSet(Value *Dst, Value *Val,
- Value *Len, IRBuilder<> &B, const TargetData *TD) {
+Value *llvm::EmitMemSet(Value *Dst, Value *Val, Value *Len, bool isVolatile,
+ IRBuilder<> &B, const TargetData *TD) {
Module *M = B.GetInsertBlock()->getParent()->getParent();
Intrinsic::ID IID = Intrinsic::memset;
- const Type *Tys[1];
- Tys[0] = Len->getType();
- Value *MemSet = Intrinsic::getDeclaration(M, IID, Tys, 1);
+ const Type *Tys[2] = { Dst->getType(), Len->getType() };
+ Value *MemSet = Intrinsic::getDeclaration(M, IID, Tys, 2);
Value *Align = ConstantInt::get(B.getInt32Ty(), 1);
- return B.CreateCall4(MemSet, CastToCStr(Dst, B), Val, Len, Align);
+ Value *Vol = ConstantInt::get(B.getInt1Ty(), isVolatile);
+ return B.CreateCall5(MemSet, CastToCStr(Dst, B), Val, Len, Align, Vol);
}
/// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name' (e.g.
@@ -381,7 +384,7 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
if (Name == "__memcpy_chk") {
if (isFoldable(4, 3, false)) {
EmitMemCpy(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3),
- 1, B, TD);
+ 1, false, B, TD);
replaceCall(CI->getOperand(1));
return true;
}
@@ -396,7 +399,7 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
if (Name == "__memmove_chk") {
if (isFoldable(4, 3, false)) {
EmitMemMove(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3),
- 1, B, TD);
+ 1, false, B, TD);
replaceCall(CI->getOperand(1));
return true;
}
@@ -407,7 +410,7 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
if (isFoldable(4, 3, false)) {
Value *Val = B.CreateIntCast(CI->getOperand(2), B.getInt8Ty(),
false);
- EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), B, TD);
+ EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), false, B, TD);
replaceCall(CI->getOperand(1));
return true;
}
diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp
index 17f8827fd5..75c9ccdd7a 100644
--- a/lib/Transforms/Utils/InlineFunction.cpp
+++ b/lib/Transforms/Utils/InlineFunction.cpp
@@ -297,10 +297,10 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
I->getName(),
&*Caller->begin()->begin());
// Emit a memcpy.
- const Type *Tys[] = { Type::getInt64Ty(Context) };
+ const Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
Intrinsic::memcpy,
- Tys, 1);
+ Tys, 3);
Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);
@@ -309,17 +309,18 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
Size = ConstantExpr::getSizeOf(AggTy);
else
Size = ConstantInt::get(Type::getInt64Ty(Context),
- TD->getTypeStoreSize(AggTy));
+ TD->getTypeStoreSize(AggTy));
// Always generate a memcpy of alignment 1 here because we don't know
// the alignment of the src pointer. Other optimizations can infer
// better alignment.
Value *CallArgs[] = {
DestCast, SrcCast, Size,
- ConstantInt::get(Type::getInt32Ty(Context), 1)
+ ConstantInt::get(Type::getInt32Ty(Context), 1),
+ ConstantInt::get(Type::getInt1Ty(Context), 0)
};
CallInst *TheMemCpy =
- CallInst::Create(MemCpyFn, CallArgs, CallArgs+4, "", TheCall);
+ CallInst::Create(MemCpyFn, CallArgs, CallArgs+5, "", TheCall);
// If we have a call graph, update it.
if (CG) {
diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp
index b9aa5c3467..4d06b66681 100644
--- a/lib/VMCore/AutoUpgrade.cpp
+++ b/lib/VMCore/AutoUpgrade.cpp
@@ -145,6 +145,54 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
}
break;
+ case 'm': {
+ // This upgrades the llvm.memcpy, llvm.memmove, and llvm.memset to the
+ // new format that allows overloading the pointer for different address
+ // space (e.g., llvm.memcpy.i16 => llvm.memcpy.p0i8.p0i8.i16)
+ const char* NewFnName = NULL;
+ if (Name.compare(5,8,"memcpy.i",8) == 0) {
+ if (Name[13] == '8')
+ NewFnName = "llvm.memcpy.p0i8.p0i8.i8";
+ else if (Name.compare(13,2,"16") == 0)
+ NewFnName = "llvm.memcpy.p0i8.p0i8.i16";
+ else if (Name.compare(13,2,"32") == 0)
+ NewFnName = "llvm.memcpy.p0i8.p0i8.i32";
+ else if (Name.compare(13,2,"64") == 0)
+ NewFnName = "llvm.memcpy.p0i8.p0i8.i64";
+ } else if (Name.compare(5,9,"memmove.i",9) == 0) {
+ if (Name[14] == '8')
+ NewFnName = "llvm.memmove.p0i8.p0i8.i8";
+ else if (Name.compare(14,2,"16") == 0)
+ NewFnName = "llvm.memmove.p0i8.p0i8.i16";
+ else if (Name.compare(14,2,"32") == 0)
+ NewFnName = "llvm.memmove.p0i8.p0i8.i32";
+ else if (Name.compare(14,2,"64") == 0)
+ NewFnName = "llvm.memmove.p0i8.p0i8.i64";
+ }
+ else if (Name.compare(5,8,"memset.i",8) == 0) {
+ if (Name[13] == '8')
+ NewFnName = "llvm.memset.p0i8.i8";
+ else if (Name.compare(13,2,"16") == 0)
+ NewFnName = "llvm.memset.p0i8.i16";
+ else if (Name.compare(13,2,"32") == 0)
+ NewFnName = "llvm.memset.p0i8.i32";
+ else if (Name.compare(13,2,"64") == 0)
+ NewFnName = "llvm.memset.p0i8.i64";
+ }
+ if (NewFnName) {
+ const FunctionType *FTy = F->getFunctionType();
+ NewFn = cast<Function>(M->getOrInsertFunction(NewFnName,
+ FTy->getReturnType(),
+ FTy->getParamType(0),
+ FTy->getParamType(1),
+ FTy->getParamType(2),
+ FTy->getParamType(3),
+ Type::getInt1Ty(F->getContext()),
+ (Type *)0));
+ return true;
+ }
+ break;
+ }
case 'p':
// This upgrades the llvm.part.select overloaded intrinsic names to only
// use one type specifier in the name. We only care about the old format
@@ -472,6 +520,28 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
CI->eraseFromParent();
}
break;
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::memset: {
+ // Add isVolatile
+ const llvm::Type *I1Ty = llvm::Type::getInt1Ty(CI->getContext());
+ Value *Operands[5] = { CI->getOperand(1), CI->getOperand(2),
+ CI->getOperand(3), CI->getOperand(4),
+ llvm::ConstantInt::get(I1Ty, 0) };
+ CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+5,
+ CI->getName(), CI);
+ NewCI->setTailCall(CI->isTailCall());
+ NewCI->setCallingConv(CI->getCallingConv());
+ // Handle any uses of the old CallInst.
+ if (!CI->use_empty())
+ // Replace all uses of the old call with the new cast which has the
+ // correct type.
+ CI->replaceAllUsesWith(NewCI);
+
+ // Clean up the old call now that it has been completely upgraded.
+ CI->eraseFromParent();
+ break;
+ }
}
}