aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Seaborn <mseaborn@chromium.org>2013-06-20 14:49:57 -0700
committerMark Seaborn <mseaborn@chromium.org>2013-06-20 14:49:57 -0700
commit402eecfe0abd2f99580d81440f264067095b7a43 (patch)
treefe58b1ab2ec7d7696db6d27f4eccb9b4a7cd7e58
parent3f6504d96476bc8f717f077eda7a7061cb7672da (diff)
PNaCl ABI: Reduce the set of allowed "align" attributes on loads/stores
Change the ABI verifier to require "align 1" on non-atomic integer accesses to prevent non-portable behaviour. Allow larger alignments on floating point accesses as a concession to performance, because ARM might not be able to do unaligned FP loads/stores efficiently. Change StripAttributes to make pexes pass the ABI verifier. Also update comments in StripAttributes to match some recent changes. BUG=https://code.google.com/p/nativeclient/issues/detail?id=3445 TEST=*.ll tests + PNaCl toolchain trybots Review URL: https://codereview.chromium.org/17094009
-rw-r--r--lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp49
-rw-r--r--lib/Transforms/NaCl/StripAttributes.cpp37
-rw-r--r--test/NaCl/PNaClABI/abi-alignment.ll83
-rw-r--r--test/NaCl/PNaClABI/abi-stripped-pointers.ll14
-rw-r--r--test/NaCl/PNaClABI/instructions.ll4
-rw-r--r--test/NaCl/PNaClABI/types-function.ll2
-rw-r--r--test/Transforms/NaCl/strip-attributes.ll55
7 files changed, 224 insertions, 20 deletions
diff --git a/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp b/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp
index b17fc1ab69..a090bdd9a9 100644
--- a/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp
+++ b/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp
@@ -138,6 +138,31 @@ static bool isValidScalarOperand(const Value *Val) {
isa<UndefValue>(Val));
}
+static bool isAllowedAlignment(unsigned Alignment, Type *Ty, bool IsAtomic) {
+ if (IsAtomic) {
+ // For atomic operations, the alignment must match the size of the type.
+ if (Ty->isIntegerTy()) {
+ unsigned Bits = Ty->getIntegerBitWidth();
+ return Bits % 8 == 0 && Alignment == Bits / 8;
+ }
+ return (Ty->isDoubleTy() && Alignment == 8) ||
+ (Ty->isFloatTy() && Alignment == 4);
+ }
+ // Non-atomic integer operations must always use "align 1", since we
+ // do not want the backend to generate code with non-portable
+ // undefined behaviour (such as misaligned access faults) if user
+ // code specifies "align 4" but uses a misaligned pointer. As a
+ // concession to performance, we allow larger alignment values for
+ // floating point types.
+ //
+ // To reduce the set of alignment values that need to be encoded in
+ // pexes, we disallow other alignment values. We require alignments
+ // to be explicit by disallowing Alignment == 0.
+ return Alignment == 1 ||
+ (Ty->isDoubleTy() && Alignment == 8) ||
+ (Ty->isFloatTy() && Alignment == 4);
+}
+
// Check the instruction's opcode and its operands. The operands may
// require opcode-specific checking.
//
@@ -215,18 +240,34 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) {
break;
// Memory accesses.
- case Instruction::Load:
- case Instruction::AtomicCmpXchg:
- case Instruction::AtomicRMW:
+ case Instruction::Load: {
+ const LoadInst *Load = cast<LoadInst>(Inst);
+ if (!isAllowedAlignment(Load->getAlignment(),
+ Load->getType(),
+ Load->isAtomic()))
+ return "bad alignment";
PtrOperandIndex = 0;
if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex)))
return "bad pointer";
break;
- case Instruction::Store:
+ }
+ case Instruction::Store: {
+ const StoreInst *Store = cast<StoreInst>(Inst);
+ if (!isAllowedAlignment(Store->getAlignment(),
+ Store->getValueOperand()->getType(),
+ Store->isAtomic()))
+ return "bad alignment";
PtrOperandIndex = 1;
if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex)))
return "bad pointer";
break;
+ }
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
+ PtrOperandIndex = 0;
+ if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex)))
+ return "bad pointer";
+ break;
// Casts.
case Instruction::BitCast:
diff --git a/lib/Transforms/NaCl/StripAttributes.cpp b/lib/Transforms/NaCl/StripAttributes.cpp
index e1ce855679..9c3dd6c83e 100644
--- a/lib/Transforms/NaCl/StripAttributes.cpp
+++ b/lib/Transforms/NaCl/StripAttributes.cpp
@@ -15,13 +15,15 @@
// * Calling conventions from functions and function calls.
// * The "align" attribute on functions.
// * The "unnamed_addr" attribute on functions and global variables.
-//
-// TODO(mseaborn): Strip out the following too:
-//
-// * "align" attributes from integer memory accesses.
+// * The distinction between "internal" and "private" linkage.
+// * "protected" and "internal" visibility of functions and globals.
+// * The arithmetic attributes "nsw", "nuw" and "exact".
+// * It reduces the set of possible "align" attributes on memory
+// accesses.
//
//===----------------------------------------------------------------------===//
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
@@ -153,7 +155,19 @@ void stripGlobalValueAttrs(GlobalValue *GV) {
GV->setLinkage(GlobalValue::InternalLinkage);
}
-void stripFunctionAttrs(Function *Func) {
+static unsigned normalizeAlignment(DataLayout *DL, unsigned Alignment,
+ Type *Ty, bool IsAtomic) {
+ unsigned MaxAllowed = 1;
+ if (Ty->isDoubleTy() || Ty->isFloatTy() || IsAtomic)
+ MaxAllowed = DL->getTypeAllocSize(Ty);
+ // If the alignment is set to 0, this means "use the default
+ // alignment for the target", which we fill in explicitly.
+ if (Alignment == 0 || Alignment >= MaxAllowed)
+ return MaxAllowed;
+ return 1;
+}
+
+void stripFunctionAttrs(DataLayout *DL, Function *Func) {
CheckAttributes(Func->getAttributes());
Func->setAttributes(AttributeSet());
Func->setCallingConv(CallingConv::C);
@@ -175,12 +189,23 @@ void stripFunctionAttrs(Function *Func) {
} else if (PossiblyExactOperator *Op =
dyn_cast<PossiblyExactOperator>(Inst)) {
cast<BinaryOperator>(Op)->setIsExact(false);
+ } else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
+ Load->setAlignment(normalizeAlignment(
+ DL, Load->getAlignment(),
+ Load->getType(),
+ Load->isAtomic()));
+ } else if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) {
+ Store->setAlignment(normalizeAlignment(
+ DL, Store->getAlignment(),
+ Store->getValueOperand()->getType(),
+ Store->isAtomic()));
}
}
}
}
bool StripAttributes::runOnModule(Module &M) {
+ DataLayout DL(&M);
for (Module::iterator Func = M.begin(), E = M.end(); Func != E; ++Func) {
// Avoid stripping attributes from intrinsics because the
// constructor for Functions just adds them back again. It would
@@ -188,7 +213,7 @@ bool StripAttributes::runOnModule(Module &M) {
// intrinsics and sometimes not.
if (!Func->isIntrinsic()) {
stripGlobalValueAttrs(Func);
- stripFunctionAttrs(Func);
+ stripFunctionAttrs(&DL, Func);
}
}
for (Module::global_iterator GV = M.global_begin(), E = M.global_end();
diff --git a/test/NaCl/PNaClABI/abi-alignment.ll b/test/NaCl/PNaClABI/abi-alignment.ll
new file mode 100644
index 0000000000..0ef1f86604
--- /dev/null
+++ b/test/NaCl/PNaClABI/abi-alignment.ll
@@ -0,0 +1,83 @@
+; RUN: pnacl-abicheck < %s | FileCheck %s
+
+; Test the "align" attributes that are allowed on load and store
+; instructions. Note that "cmpxchg" and "atomicrmw" do not take
+; "align" attributes, so are not tested here.
+
+
+define void @allowed_cases(i32 %ptr, float %f, double %d) {
+ %ptr.i32 = inttoptr i32 %ptr to i32*
+ load i32* %ptr.i32, align 1
+ store i32 123, i32* %ptr.i32, align 1
+
+ %ptr.float = inttoptr i32 %ptr to float*
+ load float* %ptr.float, align 1
+ load float* %ptr.float, align 4
+ store float %f, float* %ptr.float, align 1
+ store float %f, float* %ptr.float, align 4
+
+ %ptr.double = inttoptr i32 %ptr to double*
+ load double* %ptr.double, align 1
+ load double* %ptr.double, align 8
+ store double %d, double* %ptr.double, align 1
+ store double %d, double* %ptr.double, align 8
+
+ ; Stricter alignments are required for atomics.
+ load atomic i32* %ptr.i32 seq_cst, align 4
+ store atomic i32 123, i32* %ptr.i32 seq_cst, align 4
+ load atomic float* %ptr.float seq_cst, align 4
+ store atomic float %f, float* %ptr.float seq_cst, align 4
+ load atomic double* %ptr.double seq_cst, align 8
+ store atomic double %d, double* %ptr.double seq_cst, align 8
+
+ ret void
+}
+; CHECK-NOT: disallowed
+
+
+define void @rejected_cases(i32 %ptr, float %f, double %d) {
+ %ptr.i32 = inttoptr i32 %ptr to i32*
+ load i32* %ptr.i32, align 4
+ store i32 123, i32* %ptr.i32, align 4
+; CHECK: disallowed: bad alignment: {{.*}} load i32{{.*}} align 4
+; CHECK-NEXT: disallowed: bad alignment: store i32{{.*}} align 4
+
+ ; Unusual, not-very-useful alignments are rejected.
+ %ptr.float = inttoptr i32 %ptr to float*
+ load float* %ptr.float, align 2
+ load float* %ptr.float, align 8
+ store float %f, float* %ptr.float, align 2
+ store float %f, float* %ptr.float, align 8
+; CHECK-NEXT: disallowed: bad alignment: {{.*}} load float{{.*}} align 2
+; CHECK-NEXT: disallowed: bad alignment: {{.*}} load float{{.*}} align 8
+; CHECK-NEXT: disallowed: bad alignment: store float{{.*}} align 2
+; CHECK-NEXT: disallowed: bad alignment: store float{{.*}} align 8
+
+ %ptr.double = inttoptr i32 %ptr to double*
+ load double* %ptr.double, align 2
+ load double* %ptr.double, align 4
+ store double %d, double* %ptr.double, align 2
+ store double %d, double* %ptr.double, align 4
+; CHECK-NEXT: disallowed: bad alignment: {{.*}} load double{{.*}} align 2
+; CHECK-NEXT: disallowed: bad alignment: {{.*}} load double{{.*}} align 4
+; CHECK-NEXT: disallowed: bad alignment: store double{{.*}} align 2
+; CHECK-NEXT: disallowed: bad alignment: store double{{.*}} align 4
+
+ ; Too-small alignments for atomics are rejected.
+ load atomic i32* %ptr.i32 seq_cst, align 2
+ load atomic float* %ptr.float seq_cst, align 2
+ load atomic double* %ptr.double seq_cst, align 4
+; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic i32{{.*}} align 2
+; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic float{{.*}} align 2
+; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic double{{.*}} align 4
+
+ ; Too-large alignments for atomics are also rejected.
+ load atomic i32* %ptr.i32 seq_cst, align 8
+ load atomic float* %ptr.float seq_cst, align 8
+ load atomic double* %ptr.double seq_cst, align 16
+; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic i32{{.*}} align 8
+; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic float{{.*}} align 8
+; CHECK-NEXT: disallowed: bad alignment: {{.*}} load atomic double{{.*}} align 16
+
+ ret void
+}
diff --git a/test/NaCl/PNaClABI/abi-stripped-pointers.ll b/test/NaCl/PNaClABI/abi-stripped-pointers.ll
index c994b7d009..d590d4902d 100644
--- a/test/NaCl/PNaClABI/abi-stripped-pointers.ll
+++ b/test/NaCl/PNaClABI/abi-stripped-pointers.ll
@@ -38,18 +38,18 @@ define void @allowed_cases(i32 %arg) {
ptrtoint [1 x i8]* %alloc to i32
; These instructions may use a NormalizedPtr, which may be a global.
- load i32* @ptr
- store i32 123, i32* @ptr
+ load i32* @ptr, align 1
+ store i32 123, i32* @ptr, align 1
cmpxchg i32* @ptr, i32 1, i32 2 seq_cst
atomicrmw add i32* @ptr, i32 3 seq_cst
; A NormalizedPtr may be a bitcast.
%ptr_bitcast = bitcast [4 x i8]* @var to i32*
- load i32* %ptr_bitcast
+ load i32* %ptr_bitcast, align 1
; A NormalizedPtr may be an inttoptr.
%ptr_from_int = inttoptr i32 123 to i32*
- load i32* %ptr_from_int
+ load i32* %ptr_from_int, align 1
; Check direct and indirect function calls.
%func_as_int = ptrtoint void ()* @func to i32
@@ -85,15 +85,15 @@ entry:
%a = alloca i32
; CHECK-NEXT: non-i8-array alloca
- store i32 0, i32* null
+ store i32 0, i32* null, align 1
; CHECK-NEXT: bad pointer
- store i32 0, i32* undef
+ store i32 0, i32* undef, align 1
; CHECK-NEXT: bad pointer
%bc = bitcast i32* @ptr to i31*
; CHECK-NEXT: bad result type
- store i31 0, i31* %bc
+ store i31 0, i31* %bc, align 1
; CHECK-NEXT: bad pointer
; Only one level of bitcasts is allowed.
diff --git a/test/NaCl/PNaClABI/instructions.ll b/test/NaCl/PNaClABI/instructions.ll
index b65829d865..11b344e5a7 100644
--- a/test/NaCl/PNaClABI/instructions.ll
+++ b/test/NaCl/PNaClABI/instructions.ll
@@ -73,8 +73,8 @@ define void @memory() {
; Memory operations
%a1 = alloca [4 x i8]
%ptr = inttoptr i32 0 to i32*
- %a2 = load i32* %ptr
- store i32 undef, i32* %ptr
+ %a2 = load i32* %ptr, align 1
+ store i32 undef, i32* %ptr, align 1
fence acq_rel
%a3 = cmpxchg i32* %ptr, i32 undef, i32 undef acq_rel
%a4 = atomicrmw add i32* %ptr, i32 1 acquire
diff --git a/test/NaCl/PNaClABI/types-function.ll b/test/NaCl/PNaClABI/types-function.ll
index 1482c727f0..a3fb55b23c 100644
--- a/test/NaCl/PNaClABI/types-function.ll
+++ b/test/NaCl/PNaClABI/types-function.ll
@@ -22,7 +22,7 @@ define void @types() {
%h3 = fadd double 0.0, fpext (half 0.0 to double)
; CHECK: bad pointer: store
- store i32 0, i32* bitcast (i17* @a2 to i32*), align 4
+ store i32 0, i32* bitcast (i17* @a2 to i32*), align 1
; CHECK: bad function callee operand: call void @func(i15 1)
call void @func(i15 1)
diff --git a/test/Transforms/NaCl/strip-attributes.ll b/test/Transforms/NaCl/strip-attributes.ll
index 8ef83f5cf8..e8960d0f71 100644
--- a/test/Transforms/NaCl/strip-attributes.ll
+++ b/test/Transforms/NaCl/strip-attributes.ll
@@ -21,6 +21,7 @@ define protected void @protected_visibility() {
}
; CHECK: define void @protected_visibility() {
+
define void @call_attrs() {
call fastcc void @func_attrs(i32 inreg 10, i32 zeroext 20) noreturn nounwind readonly
ret void
@@ -28,6 +29,7 @@ define void @call_attrs() {
; CHECK: define void @call_attrs()
; CHECK: call void @func_attrs(i32 10, i32 20){{$}}
+
; We currently don't attempt to strip attributes from intrinsic
; declarations because the reader automatically inserts attributes
; based on built-in knowledge of intrinsics, so it is difficult to get
@@ -45,3 +47,56 @@ define void @arithmetic_attrs() {
; CHECK-NEXT: %add = add i32 1, 2
; CHECK-NEXT: %shl = shl i32 3, 4
; CHECK-NEXT: %lshr = lshr i32 2, 1
+
+
+; Implicit default alignments are changed to explicit alignments.
+define void @default_alignment_attrs(float %f, double %d) {
+ load i8* null
+ load i32* null
+ load float* null
+ load double* null
+
+ store i8 100, i8* null
+ store i32 100, i32* null
+ store float %f, float* null
+ store double %d, double* null
+ ret void
+}
+; CHECK: define void @default_alignment_attrs
+; CHECK-NEXT: load i8* null, align 1
+; CHECK-NEXT: load i32* null, align 1
+; CHECK-NEXT: load float* null, align 4
+; CHECK-NEXT: load double* null, align 8
+; CHECK-NEXT: store i8 100, i8* null, align 1
+; CHECK-NEXT: store i32 100, i32* null, align 1
+; CHECK-NEXT: store float %f, float* null, align 4
+; CHECK-NEXT: store double %d, double* null, align 8
+
+define void @reduce_alignment_assumptions() {
+ load i32* null, align 4
+ load float* null, align 2
+ load float* null, align 4
+ load float* null, align 8
+ load double* null, align 2
+ load double* null, align 8
+ load double* null, align 16
+
+ ; Higher alignment assumptions must be retained for atomics.
+ load atomic i32* null seq_cst, align 4
+ load atomic i32* null seq_cst, align 8
+ store atomic i32 100, i32* null seq_cst, align 4
+ store atomic i32 100, i32* null seq_cst, align 8
+ ret void
+}
+; CHECK: define void @reduce_alignment_assumptions
+; CHECK-NEXT: load i32* null, align 1
+; CHECK-NEXT: load float* null, align 1
+; CHECK-NEXT: load float* null, align 4
+; CHECK-NEXT: load float* null, align 4
+; CHECK-NEXT: load double* null, align 1
+; CHECK-NEXT: load double* null, align 8
+; CHECK-NEXT: load double* null, align 8
+; CHECK-NEXT: load atomic i32* null seq_cst, align 4
+; CHECK-NEXT: load atomic i32* null seq_cst, align 4
+; CHECK-NEXT: store atomic i32 100, i32* null seq_cst, align 4
+; CHECK-NEXT: store atomic i32 100, i32* null seq_cst, align 4