aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/Hexagon
diff options
context:
space:
mode:
authorAlexander Kornienko <alexfh@google.com>2013-04-03 14:07:16 +0000
committerAlexander Kornienko <alexfh@google.com>2013-04-03 14:07:16 +0000
commite133bc868944822bf8961f825d3aa63d6fa48fb7 (patch)
treeebbd4a8040181471467a9737d90d94dc6b58b316 /test/CodeGen/Hexagon
parent647735c781c5b37061ee03d6e9e6c7dda92218e2 (diff)
parent080e3c523e87ec68ca1ea5db4cd49816028dd8bd (diff)
Updating branches/google/stable to r178511stable
git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/google/stable@178655 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/Hexagon')
-rw-r--r--test/CodeGen/Hexagon/ashift-left-right.ll21
-rw-r--r--test/CodeGen/Hexagon/gp-rel.ll33
-rw-r--r--test/CodeGen/Hexagon/hwloop-dbg.ll1
-rw-r--r--test/CodeGen/Hexagon/memops.ll1369
-rw-r--r--test/CodeGen/Hexagon/memops1.ll33
-rw-r--r--test/CodeGen/Hexagon/memops2.ll32
-rw-r--r--test/CodeGen/Hexagon/memops3.ll31
-rw-r--r--test/CodeGen/Hexagon/misaligned-access.ll16
8 files changed, 1535 insertions, 1 deletions
diff --git a/test/CodeGen/Hexagon/ashift-left-right.ll b/test/CodeGen/Hexagon/ashift-left-right.ll
new file mode 100644
index 0000000000..7c41bc7bbf
--- /dev/null
+++ b/test/CodeGen/Hexagon/ashift-left-right.ll
@@ -0,0 +1,21 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+
+define i32 @foo(i32 %a, i32 %b) nounwind readnone {
+; CHECK: lsl
+; CHECK: aslh
+entry:
+ %shl1 = shl i32 16, %a
+ %shl2 = shl i32 %b, 16
+ %ret = mul i32 %shl1, %shl2
+ ret i32 %ret
+}
+
+define i32 @bar(i32 %a, i32 %b) nounwind readnone {
+; CHECK: asrh
+; CHECK: lsr
+entry:
+ %shl1 = ashr i32 16, %a
+ %shl2 = ashr i32 %b, 16
+ %ret = mul i32 %shl1, %shl2
+ ret i32 %ret
+}
diff --git a/test/CodeGen/Hexagon/gp-rel.ll b/test/CodeGen/Hexagon/gp-rel.ll
new file mode 100644
index 0000000000..561869e8ef
--- /dev/null
+++ b/test/CodeGen/Hexagon/gp-rel.ll
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+; Check that gp-relative instructions are being generated.
+
+@a = common global i32 0, align 4
+@b = common global i32 0, align 4
+@c = common global i32 0, align 4
+
+define i32 @foo(i32 %p) #0 {
+entry:
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#a)
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#b)
+; CHECK: if{{ *}}(p{{[0-3]}}) memw(##c){{ *}}={{ *}}r{{[0-9]+}}
+ %0 = load i32* @a, align 4
+ %1 = load i32* @b, align 4
+ %add = add nsw i32 %1, %0
+ %cmp = icmp eq i32 %0, %1
+ br i1 %cmp, label %if.then, label %entry.if.end_crit_edge
+
+entry.if.end_crit_edge:
+ %.pre = load i32* @c, align 4
+ br label %if.end
+
+if.then:
+ %add1 = add nsw i32 %add, %0
+ store i32 %add1, i32* @c, align 4
+ br label %if.end
+
+if.end:
+ %2 = phi i32 [ %.pre, %entry.if.end_crit_edge ], [ %add1, %if.then ]
+ %cmp2 = icmp eq i32 %add, %2
+ %sel1 = select i1 %cmp2, i32 %2, i32 %1
+ ret i32 %sel1
+}
diff --git a/test/CodeGen/Hexagon/hwloop-dbg.ll b/test/CodeGen/Hexagon/hwloop-dbg.ll
index eaffa0797a..c2e8153b7d 100644
--- a/test/CodeGen/Hexagon/hwloop-dbg.ll
+++ b/test/CodeGen/Hexagon/hwloop-dbg.ll
@@ -33,7 +33,6 @@ for.end: ; preds = %for.body
declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
-!llvm.dbg.cu = !{!0}
!0 = metadata !{i32 786449, i32 0, i32 12, metadata !"hwloop-dbg.c", metadata !"/usr2/kparzysz/s.hex/t", metadata !"QuIC LLVM Hexagon Clang version 6.1-pre-unknown, (git://git-hexagon-aus.quicinc.com/llvm/clang-mainline.git e9382867661454cdf44addb39430741578e9765c) (llvm/llvm-mainline.git 36412bb1fcf03ed426d4437b41198bae066675ac)", i1 true, i1 true, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1} ; [ DW_TAG_compile_unit ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c] [DW_LANG_C99]
!1 = metadata !{metadata !2}
diff --git a/test/CodeGen/Hexagon/memops.ll b/test/CodeGen/Hexagon/memops.ll
new file mode 100644
index 0000000000..5498848d85
--- /dev/null
+++ b/test/CodeGen/Hexagon/memops.ll
@@ -0,0 +1,1369 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
+; Generate MemOps for V4 and above.
+
+define void @memop_unsigned_char_add5(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+ %0 = load i8* %p, align 1, !tbaa !0
+ %conv = zext i8 %0 to i32
+ %add = add nsw i32 %conv, 5
+ %conv1 = trunc i32 %add to i8
+ store i8 %conv1, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_add(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+ %conv = zext i8 %x to i32
+ %0 = load i8* %p, align 1, !tbaa !0
+ %conv1 = zext i8 %0 to i32
+ %add = add nsw i32 %conv1, %conv
+ %conv2 = trunc i32 %add to i8
+ store i8 %conv2, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_sub(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+ %conv = zext i8 %x to i32
+ %0 = load i8* %p, align 1, !tbaa !0
+ %conv1 = zext i8 %0 to i32
+ %sub = sub nsw i32 %conv1, %conv
+ %conv2 = trunc i32 %sub to i8
+ store i8 %conv2, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_or(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+ %0 = load i8* %p, align 1, !tbaa !0
+ %or3 = or i8 %0, %x
+ store i8 %or3, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_and(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+ %0 = load i8* %p, align 1, !tbaa !0
+ %and3 = and i8 %0, %x
+ store i8 %and3, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_clrbit(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %0 = load i8* %p, align 1, !tbaa !0
+ %conv = zext i8 %0 to i32
+ %and = and i32 %conv, 223
+ %conv1 = trunc i32 %and to i8
+ store i8 %conv1, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_setbit(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %0 = load i8* %p, align 1, !tbaa !0
+ %conv = zext i8 %0 to i32
+ %or = or i32 %conv, 128
+ %conv1 = trunc i32 %or to i8
+ store i8 %conv1, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_add5_index(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv = zext i8 %0 to i32
+ %add = add nsw i32 %conv, 5
+ %conv1 = trunc i32 %add to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_add_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+ %conv = zext i8 %x to i32
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv1 = zext i8 %0 to i32
+ %add = add nsw i32 %conv1, %conv
+ %conv2 = trunc i32 %add to i8
+ store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_sub_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+ %conv = zext i8 %x to i32
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv1 = zext i8 %0 to i32
+ %sub = sub nsw i32 %conv1, %conv
+ %conv2 = trunc i32 %sub to i8
+ store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_or_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %or3 = or i8 %0, %x
+ store i8 %or3, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_and_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %and3 = and i8 %0, %x
+ store i8 %and3, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv = zext i8 %0 to i32
+ %and = and i32 %conv, 223
+ %conv1 = trunc i32 %and to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_setbit_index(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv = zext i8 %0 to i32
+ %or = or i32 %conv, 128
+ %conv1 = trunc i32 %or to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_add5_index5(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv = zext i8 %0 to i32
+ %add = add nsw i32 %conv, 5
+ %conv1 = trunc i32 %add to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_add_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}}
+ %conv = zext i8 %x to i32
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv1 = zext i8 %0 to i32
+ %add = add nsw i32 %conv1, %conv
+ %conv2 = trunc i32 %add to i8
+ store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_sub_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}}
+ %conv = zext i8 %x to i32
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv1 = zext i8 %0 to i32
+ %sub = sub nsw i32 %conv1, %conv
+ %conv2 = trunc i32 %sub to i8
+ store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_or_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %or3 = or i8 %0, %x
+ store i8 %or3, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_and_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %and3 = and i8 %0, %x
+ store i8 %and3, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_clrbit_index5(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv = zext i8 %0 to i32
+ %and = and i32 %conv, 223
+ %conv1 = trunc i32 %and to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_char_setbit_index5(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv = zext i8 %0 to i32
+ %or = or i32 %conv, 128
+ %conv1 = trunc i32 %or to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_add5(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+ %0 = load i8* %p, align 1, !tbaa !0
+ %conv2 = zext i8 %0 to i32
+ %add = add nsw i32 %conv2, 5
+ %conv1 = trunc i32 %add to i8
+ store i8 %conv1, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_add(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+ %conv4 = zext i8 %x to i32
+ %0 = load i8* %p, align 1, !tbaa !0
+ %conv13 = zext i8 %0 to i32
+ %add = add nsw i32 %conv13, %conv4
+ %conv2 = trunc i32 %add to i8
+ store i8 %conv2, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_sub(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+ %conv4 = zext i8 %x to i32
+ %0 = load i8* %p, align 1, !tbaa !0
+ %conv13 = zext i8 %0 to i32
+ %sub = sub nsw i32 %conv13, %conv4
+ %conv2 = trunc i32 %sub to i8
+ store i8 %conv2, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_or(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+ %0 = load i8* %p, align 1, !tbaa !0
+ %or3 = or i8 %0, %x
+ store i8 %or3, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_and(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+ %0 = load i8* %p, align 1, !tbaa !0
+ %and3 = and i8 %0, %x
+ store i8 %and3, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_clrbit(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %0 = load i8* %p, align 1, !tbaa !0
+ %conv2 = zext i8 %0 to i32
+ %and = and i32 %conv2, 223
+ %conv1 = trunc i32 %and to i8
+ store i8 %conv1, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_setbit(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %0 = load i8* %p, align 1, !tbaa !0
+ %conv2 = zext i8 %0 to i32
+ %or = or i32 %conv2, 128
+ %conv1 = trunc i32 %or to i8
+ store i8 %conv1, i8* %p, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_add5_index(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv2 = zext i8 %0 to i32
+ %add = add nsw i32 %conv2, 5
+ %conv1 = trunc i32 %add to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_add_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+ %conv4 = zext i8 %x to i32
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv13 = zext i8 %0 to i32
+ %add = add nsw i32 %conv13, %conv4
+ %conv2 = trunc i32 %add to i8
+ store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_sub_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+ %conv4 = zext i8 %x to i32
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv13 = zext i8 %0 to i32
+ %sub = sub nsw i32 %conv13, %conv4
+ %conv2 = trunc i32 %sub to i8
+ store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_or_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %or3 = or i8 %0, %x
+ store i8 %or3, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_and_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %and3 = and i8 %0, %x
+ store i8 %and3, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv2 = zext i8 %0 to i32
+ %and = and i32 %conv2, 223
+ %conv1 = trunc i32 %and to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_setbit_index(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv2 = zext i8 %0 to i32
+ %or = or i32 %conv2, 128
+ %conv1 = trunc i32 %or to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_add5_index5(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv2 = zext i8 %0 to i32
+ %add = add nsw i32 %conv2, 5
+ %conv1 = trunc i32 %add to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_add_index5(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}}
+ %conv4 = zext i8 %x to i32
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv13 = zext i8 %0 to i32
+ %add = add nsw i32 %conv13, %conv4
+ %conv2 = trunc i32 %add to i8
+ store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_sub_index5(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}}
+ %conv4 = zext i8 %x to i32
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv13 = zext i8 %0 to i32
+ %sub = sub nsw i32 %conv13, %conv4
+ %conv2 = trunc i32 %sub to i8
+ store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_or_index5(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %or3 = or i8 %0, %x
+ store i8 %or3, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_and_index5(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %and3 = and i8 %0, %x
+ store i8 %and3, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_clrbit_index5(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv2 = zext i8 %0 to i32
+ %and = and i32 %conv2, 223
+ %conv1 = trunc i32 %and to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_signed_char_setbit_index5(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv2 = zext i8 %0 to i32
+ %or = or i32 %conv2, 128
+ %conv1 = trunc i32 %or to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @memop_unsigned_short_add5(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+ %0 = load i16* %p, align 2, !tbaa !2
+ %conv = zext i16 %0 to i32
+ %add = add nsw i32 %conv, 5
+ %conv1 = trunc i32 %add to i16
+ store i16 %conv1, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_add(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+ %conv = zext i16 %x to i32
+ %0 = load i16* %p, align 2, !tbaa !2
+ %conv1 = zext i16 %0 to i32
+ %add = add nsw i32 %conv1, %conv
+ %conv2 = trunc i32 %add to i16
+ store i16 %conv2, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_sub(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+ %conv = zext i16 %x to i32
+ %0 = load i16* %p, align 2, !tbaa !2
+ %conv1 = zext i16 %0 to i32
+ %sub = sub nsw i32 %conv1, %conv
+ %conv2 = trunc i32 %sub to i16
+ store i16 %conv2, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_or(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+ %0 = load i16* %p, align 2, !tbaa !2
+ %or3 = or i16 %0, %x
+ store i16 %or3, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_and(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+ %0 = load i16* %p, align 2, !tbaa !2
+ %and3 = and i16 %0, %x
+ store i16 %and3, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_clrbit(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %0 = load i16* %p, align 2, !tbaa !2
+ %conv = zext i16 %0 to i32
+ %and = and i32 %conv, 65503
+ %conv1 = trunc i32 %and to i16
+ store i16 %conv1, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_setbit(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %0 = load i16* %p, align 2, !tbaa !2
+ %conv = zext i16 %0 to i32
+ %or = or i32 %conv, 128
+ %conv1 = trunc i32 %or to i16
+ store i16 %conv1, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_add5_index(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv = zext i16 %0 to i32
+ %add = add nsw i32 %conv, 5
+ %conv1 = trunc i32 %add to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_add_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+ %conv = zext i16 %x to i32
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv1 = zext i16 %0 to i32
+ %add = add nsw i32 %conv1, %conv
+ %conv2 = trunc i32 %add to i16
+ store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_sub_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+ %conv = zext i16 %x to i32
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv1 = zext i16 %0 to i32
+ %sub = sub nsw i32 %conv1, %conv
+ %conv2 = trunc i32 %sub to i16
+ store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_or_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %or3 = or i16 %0, %x
+ store i16 %or3, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_and_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %and3 = and i16 %0, %x
+ store i16 %and3, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv = zext i16 %0 to i32
+ %and = and i32 %conv, 65503
+ %conv1 = trunc i32 %and to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_setbit_index(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv = zext i16 %0 to i32
+ %or = or i32 %conv, 128
+ %conv1 = trunc i32 %or to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_add5_index5(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv = zext i16 %0 to i32
+ %add = add nsw i32 %conv, 5
+ %conv1 = trunc i32 %add to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_add_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}}
+ %conv = zext i16 %x to i32
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv1 = zext i16 %0 to i32
+ %add = add nsw i32 %conv1, %conv
+ %conv2 = trunc i32 %add to i16
+ store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_sub_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}}
+ %conv = zext i16 %x to i32
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv1 = zext i16 %0 to i32
+ %sub = sub nsw i32 %conv1, %conv
+ %conv2 = trunc i32 %sub to i16
+ store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_or_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %or3 = or i16 %0, %x
+ store i16 %or3, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_and_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %and3 = and i16 %0, %x
+ store i16 %and3, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_clrbit_index5(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv = zext i16 %0 to i32
+ %and = and i32 %conv, 65503
+ %conv1 = trunc i32 %and to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_unsigned_short_setbit_index5(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv = zext i16 %0 to i32
+ %or = or i32 %conv, 128
+ %conv1 = trunc i32 %or to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_add5(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+ %0 = load i16* %p, align 2, !tbaa !2
+ %conv2 = zext i16 %0 to i32
+ %add = add nsw i32 %conv2, 5
+ %conv1 = trunc i32 %add to i16
+ store i16 %conv1, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_add(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+ %conv4 = zext i16 %x to i32
+ %0 = load i16* %p, align 2, !tbaa !2
+ %conv13 = zext i16 %0 to i32
+ %add = add nsw i32 %conv13, %conv4
+ %conv2 = trunc i32 %add to i16
+ store i16 %conv2, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_sub(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+ %conv4 = zext i16 %x to i32
+ %0 = load i16* %p, align 2, !tbaa !2
+ %conv13 = zext i16 %0 to i32
+ %sub = sub nsw i32 %conv13, %conv4
+ %conv2 = trunc i32 %sub to i16
+ store i16 %conv2, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_or(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+ %0 = load i16* %p, align 2, !tbaa !2
+ %or3 = or i16 %0, %x
+ store i16 %or3, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_and(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+ %0 = load i16* %p, align 2, !tbaa !2
+ %and3 = and i16 %0, %x
+ store i16 %and3, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_clrbit(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %0 = load i16* %p, align 2, !tbaa !2
+ %conv2 = zext i16 %0 to i32
+ %and = and i32 %conv2, 65503
+ %conv1 = trunc i32 %and to i16
+ store i16 %conv1, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_setbit(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %0 = load i16* %p, align 2, !tbaa !2
+ %conv2 = zext i16 %0 to i32
+ %or = or i32 %conv2, 128
+ %conv1 = trunc i32 %or to i16
+ store i16 %conv1, i16* %p, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_add5_index(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv2 = zext i16 %0 to i32
+ %add = add nsw i32 %conv2, 5
+ %conv1 = trunc i32 %add to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_add_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+ %conv4 = zext i16 %x to i32
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv13 = zext i16 %0 to i32
+ %add = add nsw i32 %conv13, %conv4
+ %conv2 = trunc i32 %add to i16
+ store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_sub_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+ %conv4 = zext i16 %x to i32
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv13 = zext i16 %0 to i32
+ %sub = sub nsw i32 %conv13, %conv4
+ %conv2 = trunc i32 %sub to i16
+ store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_or_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %or3 = or i16 %0, %x
+ store i16 %or3, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_and_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %and3 = and i16 %0, %x
+ store i16 %and3, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv2 = zext i16 %0 to i32
+ %and = and i32 %conv2, 65503
+ %conv1 = trunc i32 %and to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_setbit_index(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv2 = zext i16 %0 to i32
+ %or = or i32 %conv2, 128
+ %conv1 = trunc i32 %or to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_add5_index5(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv2 = zext i16 %0 to i32
+ %add = add nsw i32 %conv2, 5
+ %conv1 = trunc i32 %add to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_add_index5(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}}
+ %conv4 = zext i16 %x to i32
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv13 = zext i16 %0 to i32
+ %add = add nsw i32 %conv13, %conv4
+ %conv2 = trunc i32 %add to i16
+ store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_sub_index5(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}}
+ %conv4 = zext i16 %x to i32
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv13 = zext i16 %0 to i32
+ %sub = sub nsw i32 %conv13, %conv4
+ %conv2 = trunc i32 %sub to i16
+ store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_or_index5(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %or3 = or i16 %0, %x
+ store i16 %or3, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_and_index5(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %and3 = and i16 %0, %x
+ store i16 %and3, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_clrbit_index5(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv2 = zext i16 %0 to i32
+ %and = and i32 %conv2, 65503
+ %conv1 = trunc i32 %and to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_short_setbit_index5(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %0 = load i16* %add.ptr, align 2, !tbaa !2
+ %conv2 = zext i16 %0 to i32
+ %or = or i32 %conv2, 128
+ %conv1 = trunc i32 %or to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+ ret void
+}
+
+define void @memop_signed_int_add5(i32* nocapture %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+ %0 = load i32* %p, align 4, !tbaa !3
+ %add = add i32 %0, 5
+ store i32 %add, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_add(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+ %0 = load i32* %p, align 4, !tbaa !3
+ %add = add i32 %0, %x
+ store i32 %add, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_sub(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+ %0 = load i32* %p, align 4, !tbaa !3
+ %sub = sub i32 %0, %x
+ store i32 %sub, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_or(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+ %0 = load i32* %p, align 4, !tbaa !3
+ %or = or i32 %0, %x
+ store i32 %or, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_and(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+ %0 = load i32* %p, align 4, !tbaa !3
+ %and = and i32 %0, %x
+ store i32 %and, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_clrbit(i32* nocapture %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %0 = load i32* %p, align 4, !tbaa !3
+ %and = and i32 %0, -33
+ store i32 %and, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_setbit(i32* nocapture %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %0 = load i32* %p, align 4, !tbaa !3
+ %or = or i32 %0, 128
+ store i32 %or, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_add5_index(i32* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %add = add i32 %0, 5
+ store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %add = add i32 %0, %x
+ store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %sub = sub i32 %0, %x
+ store i32 %sub, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %or = or i32 %0, %x
+ store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %and = and i32 %0, %x
+ store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %and = and i32 %0, -33
+ store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_setbit_index(i32* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %or = or i32 %0, 128
+ store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_add5_index5(i32* nocapture %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %add = add i32 %0, 5
+ store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_add_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %add = add i32 %0, %x
+ store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_sub_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %sub = sub i32 %0, %x
+ store i32 %sub, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_or_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %or = or i32 %0, %x
+ store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_and_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %and = and i32 %0, %x
+ store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_clrbit_index5(i32* nocapture %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %and = and i32 %0, -33
+ store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_signed_int_setbit_index5(i32* nocapture %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %or = or i32 %0, 128
+ store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_add5(i32* nocapture %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+ %0 = load i32* %p, align 4, !tbaa !3
+ %add = add nsw i32 %0, 5
+ store i32 %add, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_add(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+ %0 = load i32* %p, align 4, !tbaa !3
+ %add = add nsw i32 %0, %x
+ store i32 %add, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_sub(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+ %0 = load i32* %p, align 4, !tbaa !3
+ %sub = sub nsw i32 %0, %x
+ store i32 %sub, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_or(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+ %0 = load i32* %p, align 4, !tbaa !3
+ %or = or i32 %0, %x
+ store i32 %or, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_and(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+ %0 = load i32* %p, align 4, !tbaa !3
+ %and = and i32 %0, %x
+ store i32 %and, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_clrbit(i32* nocapture %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %0 = load i32* %p, align 4, !tbaa !3
+ %and = and i32 %0, -33
+ store i32 %and, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_setbit(i32* nocapture %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %0 = load i32* %p, align 4, !tbaa !3
+ %or = or i32 %0, 128
+ store i32 %or, i32* %p, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_add5_index(i32* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %add = add nsw i32 %0, 5
+ store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %add = add nsw i32 %0, %x
+ store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %sub = sub nsw i32 %0, %x
+ store i32 %sub, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %or = or i32 %0, %x
+ store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %and = and i32 %0, %x
+ store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %and = and i32 %0, -33
+ store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_setbit_index(i32* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %or = or i32 %0, 128
+ store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_add5_index5(i32* nocapture %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %add = add nsw i32 %0, 5
+ store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_add_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %add = add nsw i32 %0, %x
+ store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_sub_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %sub = sub nsw i32 %0, %x
+ store i32 %sub, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_or_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %or = or i32 %0, %x
+ store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_and_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}}
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %and = and i32 %0, %x
+ store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_clrbit_index5(i32* nocapture %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %and = and i32 %0, -33
+ store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+define void @memop_unsigned_int_setbit_index5(i32* nocapture %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+ %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %0 = load i32* %add.ptr, align 4, !tbaa !3
+ %or = or i32 %0, 128
+ store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+ ret void
+}
+
+!0 = metadata !{metadata !"omnipotent char", metadata !1}
+!1 = metadata !{metadata !"Simple C/C++ TBAA"}
+!2 = metadata !{metadata !"short", metadata !0}
+!3 = metadata !{metadata !"int", metadata !0}
diff --git a/test/CodeGen/Hexagon/memops1.ll b/test/CodeGen/Hexagon/memops1.ll
new file mode 100644
index 0000000000..2babdc848d
--- /dev/null
+++ b/test/CodeGen/Hexagon/memops1.ll
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
+; Generate MemOps for V4 and above.
+
+
+define void @f(i32* %p) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#40){{ *}}-={{ *}}#1
+ %p.addr = alloca i32*, align 4
+ store i32* %p, i32** %p.addr, align 4
+ %0 = load i32** %p.addr, align 4
+ %add.ptr = getelementptr inbounds i32* %0, i32 10
+ %1 = load i32* %add.ptr, align 4
+ %sub = sub nsw i32 %1, 1
+ store i32 %sub, i32* %add.ptr, align 4
+ ret void
+}
+
+define void @g(i32* %p, i32 %i) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#40){{ *}}-={{ *}}#1
+ %p.addr = alloca i32*, align 4
+ %i.addr = alloca i32, align 4
+ store i32* %p, i32** %p.addr, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32** %p.addr, align 4
+ %1 = load i32* %i.addr, align 4
+ %add.ptr = getelementptr inbounds i32* %0, i32 %1
+ %add.ptr1 = getelementptr inbounds i32* %add.ptr, i32 10
+ %2 = load i32* %add.ptr1, align 4
+ %sub = sub nsw i32 %2, 1
+ store i32 %sub, i32* %add.ptr1, align 4
+ ret void
+}
diff --git a/test/CodeGen/Hexagon/memops2.ll b/test/CodeGen/Hexagon/memops2.ll
new file mode 100644
index 0000000000..b1b25445c0
--- /dev/null
+++ b/test/CodeGen/Hexagon/memops2.ll
@@ -0,0 +1,32 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
+; Generate MemOps for V4 and above.
+
+
+define void @f(i16* nocapture %p) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1
+ %add.ptr = getelementptr inbounds i16* %p, i32 10
+ %0 = load i16* %add.ptr, align 2, !tbaa !0
+ %conv2 = zext i16 %0 to i32
+ %sub = add nsw i32 %conv2, 65535
+ %conv1 = trunc i32 %sub to i16
+ store i16 %conv1, i16* %add.ptr, align 2, !tbaa !0
+ ret void
+}
+
+define void @g(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1
+ %add.ptr.sum = add i32 %i, 10
+ %add.ptr1 = getelementptr inbounds i16* %p, i32 %add.ptr.sum
+ %0 = load i16* %add.ptr1, align 2, !tbaa !0
+ %conv3 = zext i16 %0 to i32
+ %sub = add nsw i32 %conv3, 65535
+ %conv2 = trunc i32 %sub to i16
+ store i16 %conv2, i16* %add.ptr1, align 2, !tbaa !0
+ ret void
+}
+
+!0 = metadata !{metadata !"short", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Hexagon/memops3.ll b/test/CodeGen/Hexagon/memops3.ll
new file mode 100644
index 0000000000..5b8bd6c87b
--- /dev/null
+++ b/test/CodeGen/Hexagon/memops3.ll
@@ -0,0 +1,31 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
+; Generate MemOps for V4 and above.
+
+
+define void @f(i8* nocapture %p) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1
+ %add.ptr = getelementptr inbounds i8* %p, i32 10
+ %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %conv = zext i8 %0 to i32
+ %sub = add nsw i32 %conv, 255
+ %conv1 = trunc i32 %sub to i8
+ store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ ret void
+}
+
+define void @g(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1
+ %add.ptr.sum = add i32 %i, 10
+ %add.ptr1 = getelementptr inbounds i8* %p, i32 %add.ptr.sum
+ %0 = load i8* %add.ptr1, align 1, !tbaa !0
+ %conv = zext i8 %0 to i32
+ %sub = add nsw i32 %conv, 255
+ %conv2 = trunc i32 %sub to i8
+ store i8 %conv2, i8* %add.ptr1, align 1, !tbaa !0
+ ret void
+}
+
+!0 = metadata !{metadata !"omnipotent char", metadata !1}
+!1 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Hexagon/misaligned-access.ll b/test/CodeGen/Hexagon/misaligned-access.ll
new file mode 100644
index 0000000000..4dafb44cc3
--- /dev/null
+++ b/test/CodeGen/Hexagon/misaligned-access.ll
@@ -0,0 +1,16 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s
+; Check that the mis-aligned load doesn't cause compiler to assert.
+
+declare i32 @_hi(i64) #1
+@temp1 = common global i32 0, align 4
+
+define i32 @CSDRSEARCH_executeSearchManager() #0 {
+entry:
+ %temp = alloca i32, align 4
+ %0 = load i32* @temp1, align 4
+ store i32 %0, i32* %temp, align 4
+ %1 = bitcast i32* %temp to i64*
+ %2 = load i64* %1, align 8
+ %call = call i32 @_hi(i64 %2)
+ ret i32 %call
+}