aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp12
-rw-r--r--lib/Transforms/NaCl/StripAttributes.cpp9
-rw-r--r--test/NaCl/PNaClABI/abi-alignment.ll41
-rw-r--r--test/Transforms/NaCl/strip-attributes.ll18
4 files changed, 79 insertions, 1 deletions
diff --git a/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp b/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp
index 1fe79757a0..9a96d19ed4 100644
--- a/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp
+++ b/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp
@@ -315,6 +315,18 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) {
isa<MDNode>(Arg)))
return "bad intrinsic operand";
}
+ // Disallow alignments other than 1 on memcpy() etc., for the
+ // same reason that we disallow them on integer loads and
+ // stores.
+ if (const MemIntrinsic *MemOp = dyn_cast<MemIntrinsic>(Call)) {
+ // Avoid the getAlignment() method here because it aborts if
+ // the alignment argument is not a Constant.
+ Value *AlignArg = MemOp->getArgOperand(3);
+ if (!isa<ConstantInt>(AlignArg) ||
+ cast<ConstantInt>(AlignArg)->getZExtValue() != 1) {
+ return "bad alignment";
+ }
+ }
// Allow the instruction and skip the later checks.
return NULL;
}
diff --git a/lib/Transforms/NaCl/StripAttributes.cpp b/lib/Transforms/NaCl/StripAttributes.cpp
index 9c3dd6c83e..fb3a080e84 100644
--- a/lib/Transforms/NaCl/StripAttributes.cpp
+++ b/lib/Transforms/NaCl/StripAttributes.cpp
@@ -14,6 +14,7 @@
// calls.
// * Calling conventions from functions and function calls.
// * The "align" attribute on functions.
+// * The alignment argument of memcpy/memmove/memset intrinsic calls.
// * The "unnamed_addr" attribute on functions and global variables.
// * The distinction between "internal" and "private" linkage.
// * "protected" and "internal" visibility of functions and globals.
@@ -25,6 +26,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/Pass.h"
@@ -182,6 +184,13 @@ void stripFunctionAttrs(DataLayout *DL, Function *Func) {
CheckAttributes(Call.getAttributes());
Call.setAttributes(AttributeSet());
Call.setCallingConv(CallingConv::C);
+
+ // Set memcpy(), memmove() and memset() to use pessimistic
+ // alignment assumptions.
+ if (MemIntrinsic *MemOp = dyn_cast<MemIntrinsic>(Inst)) {
+ Type *AlignTy = MemOp->getAlignmentCst()->getType();
+ MemOp->setAlignment(ConstantInt::get(AlignTy, 1));
+ }
} else if (OverflowingBinaryOperator *Op =
dyn_cast<OverflowingBinaryOperator>(Inst)) {
cast<BinaryOperator>(Op)->setHasNoUnsignedWrap(false);
diff --git a/test/NaCl/PNaClABI/abi-alignment.ll b/test/NaCl/PNaClABI/abi-alignment.ll
index 0ef1f86604..443fe44507 100644
--- a/test/NaCl/PNaClABI/abi-alignment.ll
+++ b/test/NaCl/PNaClABI/abi-alignment.ll
@@ -5,6 +5,11 @@
; "align" attributes, so are not tested here.
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
+declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
+declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1)
+
+
define void @allowed_cases(i32 %ptr, float %f, double %d) {
%ptr.i32 = inttoptr i32 %ptr to i32*
load i32* %ptr.i32, align 1
@@ -30,12 +35,21 @@ define void @allowed_cases(i32 %ptr, float %f, double %d) {
load atomic double* %ptr.double seq_cst, align 8
store atomic double %d, double* %ptr.double seq_cst, align 8
+ ; memcpy() et el take an alignment parameter, which is allowed to be 1.
+ %ptr.p = inttoptr i32 %ptr to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p,
+ i32 10, i32 1, i1 false)
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p,
+ i32 10, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %ptr.p, i8 99,
+ i32 10, i32 1, i1 false)
+
ret void
}
; CHECK-NOT: disallowed
-define void @rejected_cases(i32 %ptr, float %f, double %d) {
+define void @rejected_cases(i32 %ptr, float %f, double %d, i32 %align) {
%ptr.i32 = inttoptr i32 %ptr to i32*
load i32* %ptr.i32, align 4
store i32 123, i32* %ptr.i32, align 4
@@ -79,5 +93,30 @@ define void @rejected_cases(i32 %ptr, float %f, double %d) {
; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic float{{.*}} align 8
; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic double{{.*}} align 16
+ ; Non-pessimistic alignments for memcpy() et al are rejected.
+ %ptr.p = inttoptr i32 %ptr to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p,
+ i32 10, i32 4, i1 false)
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p,
+ i32 10, i32 4, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %ptr.p, i8 99,
+ i32 10, i32 4, i1 false)
+; CHECK-NEXT: bad alignment: call void @llvm.memcpy
+; CHECK-NEXT: bad alignment: call void @llvm.memmove
+; CHECK-NEXT: bad alignment: call void @llvm.memset
+
+ ; Check that the verifier does not crash if the alignment argument
+ ; is not a constant.
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p,
+ i32 10, i32 %align, i1 false)
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %ptr.p, i8* %ptr.p,
+ i32 10, i32 %align, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %ptr.p, i8 99,
+ i32 10, i32 %align, i1 false)
+; CHECK-NEXT: bad alignment: call void @llvm.memcpy
+; CHECK-NEXT: bad alignment: call void @llvm.memmove
+; CHECK-NEXT: bad alignment: call void @llvm.memset
+
ret void
}
+; CHECK-NOT: disallowed
diff --git a/test/Transforms/NaCl/strip-attributes.ll b/test/Transforms/NaCl/strip-attributes.ll
index e8960d0f71..66224a8977 100644
--- a/test/Transforms/NaCl/strip-attributes.ll
+++ b/test/Transforms/NaCl/strip-attributes.ll
@@ -100,3 +100,21 @@ define void @reduce_alignment_assumptions() {
; CHECK-NEXT: load atomic i32* null seq_cst, align 4
; CHECK-NEXT: store atomic i32 100, i32* null seq_cst, align 4
; CHECK-NEXT: store atomic i32 100, i32* null seq_cst, align 4
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
+declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
+declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1)
+
+define void @reduce_memcpy_alignment_assumptions(i8* %ptr) {
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %ptr,
+ i32 20, i32 4, i1 false)
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %ptr, i8* %ptr,
+ i32 20, i32 4, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %ptr, i8 99,
+ i32 20, i32 4, i1 false)
+ ret void
+}
+; CHECK: define void @reduce_memcpy_alignment_assumptions
+; CHECK-NEXT: call void @llvm.memcpy.{{.*}} i32 20, i32 1, i1 false)
+; CHECK-NEXT: call void @llvm.memmove.{{.*}} i32 20, i32 1, i1 false)
+; CHECK-NEXT: call void @llvm.memset.{{.*}} i32 20, i32 1, i1 false)