aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2007-07-20 16:34:21 +0000
committerDan Gohman <gohman@apple.com>2007-07-20 16:34:21 +0000
commit9941f7426ddef3a9f9de4b82630deaf4756fd924 (patch)
tree34a2ad0fbf9a37a65205c14b975719c89ab25d06
parent9bc5dce98d5607b2661e7233378e294a6dec1b78 (diff)
Optimize alignment of loads and stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40102 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp10
-rw-r--r--test/Transforms/InstCombine/loadstore-alignment.ll66
2 files changed, 76 insertions, 0 deletions
diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp
index 816a1c626a..33fd843d63 100644
--- a/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -8790,6 +8790,11 @@ static bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom) {
Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
Value *Op = LI.getOperand(0);
+ // Attempt to improve the alignment.
+ unsigned KnownAlign = GetKnownAlignment(Op, TD);
+ if (KnownAlign > LI.getAlignment())
+ LI.setAlignment(KnownAlign);
+
// load (cast X) --> cast (load X) iff safe
if (isa<CastInst>(Op))
if (Instruction *Res = InstCombineLoadCast(*this, LI))
@@ -8985,6 +8990,11 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
}
}
+ // Attempt to improve the alignment.
+ unsigned KnownAlign = GetKnownAlignment(Ptr, TD);
+ if (KnownAlign > SI.getAlignment())
+ SI.setAlignment(KnownAlign);
+
// Do really simple DSE, to catch cases where there are several consequtive
// stores to the same location, separated by a few arithmetic operations. This
// situation often occurs with bitfield accesses.
diff --git a/test/Transforms/InstCombine/loadstore-alignment.ll b/test/Transforms/InstCombine/loadstore-alignment.ll
new file mode 100644
index 0000000000..fd8b0e2e17
--- /dev/null
+++ b/test/Transforms/InstCombine/loadstore-alignment.ll
@@ -0,0 +1,66 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {, align 16} | wc -l | grep 12
+
+@x = external global <2 x i64>, align 16
+@xx = external global [13 x <2 x i64>], align 16
+
+define <2 x i64> @static_hem() {
+ %t = getelementptr <2 x i64>* @x, i32 7
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @hem(i32 %i) {
+ %t = getelementptr <2 x i64>* @x, i32 %i
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @hem_2d(i32 %i, i32 %j) {
+ %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @foo() {
+ %tmp1 = load <2 x i64>* @x, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @bar() {
+ %t = alloca <2 x i64>
+ call void @kip(<2 x i64>* %t);
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define void @static_hem_store(<2 x i64> %y) {
+ %t = getelementptr <2 x i64>* @x, i32 7
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+define void @hem_store(i32 %i, <2 x i64> %y) {
+ %t = getelementptr <2 x i64>* @x, i32 %i
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+define void @hem_2d_store(i32 %i, i32 %j, <2 x i64> %y) {
+ %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+define void @foo_store(<2 x i64> %y) {
+ store <2 x i64> %y, <2 x i64>* @x, align 1
+ ret void
+}
+
+define void @bar_store(<2 x i64> %y) {
+ %t = alloca <2 x i64>
+ call void @kip(<2 x i64>* %t);
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+declare void @kip(<2 x i64>* %t)