aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/AArch64/adc.ll54
-rw-r--r--test/CodeGen/AArch64/addsub-shifted.ll295
-rw-r--r--test/CodeGen/AArch64/addsub.ll127
-rw-r--r--test/CodeGen/AArch64/addsub_ext.ll189
-rw-r--r--test/CodeGen/AArch64/adrp-relocation.ll35
-rw-r--r--test/CodeGen/AArch64/alloca.ll134
-rw-r--r--test/CodeGen/AArch64/analyze-branch.ll231
-rw-r--r--test/CodeGen/AArch64/atomic-ops-not-barriers.ll24
-rw-r--r--test/CodeGen/AArch64/atomic-ops.ll1099
-rw-r--r--test/CodeGen/AArch64/basic-pic.ll70
-rw-r--r--test/CodeGen/AArch64/bitfield-insert-0.ll19
-rw-r--r--test/CodeGen/AArch64/bitfield-insert.ll193
-rw-r--r--test/CodeGen/AArch64/bitfield.ll218
-rw-r--r--test/CodeGen/AArch64/blockaddress.ll18
-rw-r--r--test/CodeGen/AArch64/bool-loads.ll55
-rw-r--r--test/CodeGen/AArch64/breg.ll17
-rw-r--r--test/CodeGen/AArch64/callee-save.ll86
-rw-r--r--test/CodeGen/AArch64/compare-branch.ll38
-rw-r--r--test/CodeGen/AArch64/cond-sel.ll213
-rw-r--r--test/CodeGen/AArch64/directcond.ll84
-rw-r--r--test/CodeGen/AArch64/dp-3source.ll163
-rw-r--r--test/CodeGen/AArch64/dp1.ll152
-rw-r--r--test/CodeGen/AArch64/dp2.ll169
-rw-r--r--test/CodeGen/AArch64/elf-extern.ll21
-rw-r--r--test/CodeGen/AArch64/extract.ll57
-rw-r--r--test/CodeGen/AArch64/fastcc-reserved.ll58
-rw-r--r--test/CodeGen/AArch64/fastcc.ll123
-rw-r--r--test/CodeGen/AArch64/fcmp.ll81
-rw-r--r--test/CodeGen/AArch64/fcvt-fixed.ll191
-rw-r--r--test/CodeGen/AArch64/fcvt-int.ll151
-rw-r--r--test/CodeGen/AArch64/flags-multiuse.ll35
-rw-r--r--test/CodeGen/AArch64/floatdp_1source.ll138
-rw-r--r--test/CodeGen/AArch64/floatdp_2source.ll60
-rw-r--r--test/CodeGen/AArch64/fp-cond-sel.ll26
-rw-r--r--test/CodeGen/AArch64/fp-dp3.ll102
-rw-r--r--test/CodeGen/AArch64/fp128-folding.ll17
-rw-r--r--test/CodeGen/AArch64/fp128.ll280
-rw-r--r--test/CodeGen/AArch64/fpimm.ll34
-rw-r--r--test/CodeGen/AArch64/func-argpassing.ll192
-rw-r--r--test/CodeGen/AArch64/func-calls.ll140
-rw-r--r--test/CodeGen/AArch64/global-alignment.ll69
-rw-r--r--test/CodeGen/AArch64/got-abuse.ll23
-rw-r--r--test/CodeGen/AArch64/i128-align.ll29
-rw-r--r--test/CodeGen/AArch64/illegal-float-ops.ll221
-rw-r--r--test/CodeGen/AArch64/init-array.ll9
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints-badI.ll7
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints-badK.ll7
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints-badK2.ll7
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints-badL.ll7
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints.ll117
-rw-r--r--test/CodeGen/AArch64/inline-asm-modifiers.ll125
-rw-r--r--test/CodeGen/AArch64/jump-table.ll56
-rw-r--r--test/CodeGen/AArch64/large-frame.ll117
-rw-r--r--test/CodeGen/AArch64/ldst-regoffset.ll333
-rw-r--r--test/CodeGen/AArch64/ldst-unscaledimm.ll218
-rw-r--r--test/CodeGen/AArch64/ldst-unsignedimm.ll251
-rw-r--r--test/CodeGen/AArch64/lit.local.cfg6
-rw-r--r--test/CodeGen/AArch64/literal_pools.ll49
-rw-r--r--test/CodeGen/AArch64/local_vars.ll57
-rw-r--r--test/CodeGen/AArch64/logical-imm.ll84
-rw-r--r--test/CodeGen/AArch64/logical_shifted_reg.ll224
-rw-r--r--test/CodeGen/AArch64/logical_shifted_reg.s208
-rw-r--r--test/CodeGen/AArch64/movw-consts.ll124
-rw-r--r--test/CodeGen/AArch64/pic-eh-stubs.ll60
-rw-r--r--test/CodeGen/AArch64/regress-bitcast-formals.ll11
-rw-r--r--test/CodeGen/AArch64/regress-f128csel-flags.ll27
-rw-r--r--test/CodeGen/AArch64/regress-tail-livereg.ll19
-rw-r--r--test/CodeGen/AArch64/regress-tblgen-chains.ll36
-rw-r--r--test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll37
-rw-r--r--test/CodeGen/AArch64/regress-wzr-allocatable.ll41
-rw-r--r--test/CodeGen/AArch64/setcc-takes-i32.ll22
-rw-r--r--test/CodeGen/AArch64/sibling-call.ll97
-rw-r--r--test/CodeGen/AArch64/tail-call.ll94
-rw-r--r--test/CodeGen/AArch64/tls-dynamic-together.ll18
-rw-r--r--test/CodeGen/AArch64/tls-dynamics.ll121
-rw-r--r--test/CodeGen/AArch64/tls-execs.ll63
-rw-r--r--test/CodeGen/AArch64/tst-br.ll48
-rw-r--r--test/CodeGen/AArch64/variadic.ll144
-rw-r--r--test/CodeGen/AArch64/zero-reg.ll31
-rw-r--r--test/DebugInfo/AArch64/cfi-frame.ll58
-rw-r--r--test/DebugInfo/AArch64/eh_frame.ll51
-rw-r--r--test/DebugInfo/AArch64/eh_frame_personality.ll46
-rw-r--r--test/DebugInfo/AArch64/lit.local.cfg6
-rw-r--r--test/DebugInfo/AArch64/variable-loc.ll87
-rw-r--r--test/MC/AArch64/basic-a64-diagnostics.s3709
-rw-r--r--test/MC/AArch64/basic-a64-instructions.s4790
-rw-r--r--test/MC/AArch64/elf-globaladdress.ll111
-rw-r--r--test/MC/AArch64/elf-objdump.s5
-rw-r--r--test/MC/AArch64/elf-reloc-addsubimm.s13
-rw-r--r--test/MC/AArch64/elf-reloc-condbr.s13
-rw-r--r--test/MC/AArch64/elf-reloc-ldrlit.s28
-rw-r--r--test/MC/AArch64/elf-reloc-ldstunsimm.s34
-rw-r--r--test/MC/AArch64/elf-reloc-movw.s98
-rw-r--r--test/MC/AArch64/elf-reloc-pcreladdressing.s29
-rw-r--r--test/MC/AArch64/elf-reloc-tstb.s18
-rw-r--r--test/MC/AArch64/elf-reloc-uncondbrimm.s18
-rw-r--r--test/MC/AArch64/lit.local.cfg5
-rw-r--r--test/MC/AArch64/mapping-across-sections.s28
-rw-r--r--test/MC/AArch64/mapping-within-section.s23
-rw-r--r--test/MC/AArch64/tls-relocs.s662
-rw-r--r--test/MC/Disassembler/AArch64/basic-a64-instructions.txt4145
-rw-r--r--test/MC/Disassembler/AArch64/basic-a64-undefined.txt43
-rw-r--r--test/MC/Disassembler/AArch64/basic-a64-unpredictable.txt96
-rw-r--r--test/MC/Disassembler/AArch64/ldp-offset-predictable.txt7
-rw-r--r--test/MC/Disassembler/AArch64/ldp-postind.predictable.txt17
-rw-r--r--test/MC/Disassembler/AArch64/ldp-preind.predictable.txt17
-rw-r--r--test/MC/Disassembler/AArch64/lit.local.cfg6
107 files changed, 22769 insertions, 0 deletions
diff --git a/test/CodeGen/AArch64/adc.ll b/test/CodeGen/AArch64/adc.ll
new file mode 100644
index 0000000000..45bf07928f
--- /dev/null
+++ b/test/CodeGen/AArch64/adc.ll
@@ -0,0 +1,54 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
+; CHECK: test_simple:
+
+ %valadd = add i128 %a, %b
+; CHECK: adds [[ADDLO:x[0-9]+]], x0, x2
+; CHECK-NEXT: adcs [[ADDHI:x[0-9]+]], x1, x3
+
+ %valsub = sub i128 %valadd, %c
+; CHECK: subs x0, [[ADDLO]], x4
+; CHECK: sbcs x1, [[ADDHI]], x5
+
+ ret i128 %valsub
+; CHECK: ret
+}
+
+define i128 @test_imm(i128 %a) {
+; CHECK: test_imm:
+
+ %val = add i128 %a, 12
+; CHECK: adds x0, x0, #12
+; CHECK: adcs x1, x1, {{x[0-9]|xzr}}
+
+ ret i128 %val
+; CHECK: ret
+}
+
+define i128 @test_shifted(i128 %a, i128 %b) {
+; CHECK: test_shifted:
+
+ %rhs = shl i128 %b, 45
+
+ %val = add i128 %a, %rhs
+; CHECK: adds x0, x0, x2, lsl #45
+; CHECK: adcs x1, x1, {{x[0-9]}}
+
+ ret i128 %val
+; CHECK: ret
+}
+
+define i128 @test_extended(i128 %a, i16 %b) {
+; CHECK: test_extended:
+
+ %ext = sext i16 %b to i128
+ %rhs = shl i128 %ext, 3
+
+ %val = add i128 %a, %rhs
+; CHECK: adds x0, x0, w2, sxth #3
+; CHECK: adcs x1, x1, {{x[0-9]}}
+
+ ret i128 %val
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/addsub-shifted.ll b/test/CodeGen/AArch64/addsub-shifted.ll
new file mode 100644
index 0000000000..ed8ef0d59a
--- /dev/null
+++ b/test/CodeGen/AArch64/addsub-shifted.ll
@@ -0,0 +1,295 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
+; CHECK: test_lsl_arith:
+
+ %rhs1 = load volatile i32* @var32
+ %shift1 = shl i32 %rhs1, 18
+ %val1 = add i32 %lhs32, %shift1
+ store volatile i32 %val1, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
+
+ %rhs2 = load volatile i32* @var32
+ %shift2 = shl i32 %rhs2, 31
+ %val2 = add i32 %shift2, %lhs32
+ store volatile i32 %val2, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
+
+ %rhs3 = load volatile i32* @var32
+ %shift3 = shl i32 %rhs3, 5
+ %val3 = sub i32 %lhs32, %shift3
+ store volatile i32 %val3, i32* @var32
+; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
+
+; Subtraction is not commutative!
+ %rhs4 = load volatile i32* @var32
+ %shift4 = shl i32 %rhs4, 19
+ %val4 = sub i32 %shift4, %lhs32
+ store volatile i32 %val4, i32* @var32
+; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
+
+ %lhs4a = load volatile i32* @var32
+ %shift4a = shl i32 %lhs4a, 15
+ %val4a = sub i32 0, %shift4a
+ store volatile i32 %val4a, i32* @var32
+; CHECK: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsl #15
+
+ %rhs5 = load volatile i64* @var64
+ %shift5 = shl i64 %rhs5, 18
+ %val5 = add i64 %lhs64, %shift5
+ store volatile i64 %val5, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
+
+ %rhs6 = load volatile i64* @var64
+ %shift6 = shl i64 %rhs6, 31
+ %val6 = add i64 %shift6, %lhs64
+ store volatile i64 %val6, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
+
+ %rhs7 = load volatile i64* @var64
+ %shift7 = shl i64 %rhs7, 5
+ %val7 = sub i64 %lhs64, %shift7
+ store volatile i64 %val7, i64* @var64
+; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
+
+; Subtraction is not commutative!
+ %rhs8 = load volatile i64* @var64
+ %shift8 = shl i64 %rhs8, 19
+ %val8 = sub i64 %shift8, %lhs64
+ store volatile i64 %val8, i64* @var64
+; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
+
+ %lhs8a = load volatile i64* @var64
+ %shift8a = shl i64 %lhs8a, 60
+ %val8a = sub i64 0, %shift8a
+ store volatile i64 %val8a, i64* @var64
+; CHECK: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsl #60
+
+ ret void
+; CHECK: ret
+}
+
+define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
+; CHECK: test_lsr_arith:
+
+ %shift1 = lshr i32 %rhs32, 18
+ %val1 = add i32 %lhs32, %shift1
+ store volatile i32 %val1, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
+
+ %shift2 = lshr i32 %rhs32, 31
+ %val2 = add i32 %shift2, %lhs32
+ store volatile i32 %val2, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
+
+ %shift3 = lshr i32 %rhs32, 5
+ %val3 = sub i32 %lhs32, %shift3
+ store volatile i32 %val3, i32* @var32
+; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
+
+; Subtraction is not commutative!
+ %shift4 = lshr i32 %rhs32, 19
+ %val4 = sub i32 %shift4, %lhs32
+ store volatile i32 %val4, i32* @var32
+; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
+
+ %shift4a = lshr i32 %lhs32, 15
+ %val4a = sub i32 0, %shift4a
+ store volatile i32 %val4a, i32* @var32
+; CHECK: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsr #15
+
+ %shift5 = lshr i64 %rhs64, 18
+ %val5 = add i64 %lhs64, %shift5
+ store volatile i64 %val5, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
+
+ %shift6 = lshr i64 %rhs64, 31
+ %val6 = add i64 %shift6, %lhs64
+ store volatile i64 %val6, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
+
+ %shift7 = lshr i64 %rhs64, 5
+ %val7 = sub i64 %lhs64, %shift7
+ store volatile i64 %val7, i64* @var64
+; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
+
+; Subtraction is not commutative!
+ %shift8 = lshr i64 %rhs64, 19
+ %val8 = sub i64 %shift8, %lhs64
+ store volatile i64 %val8, i64* @var64
+; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
+
+ %shift8a = lshr i64 %lhs64, 45
+ %val8a = sub i64 0, %shift8a
+ store volatile i64 %val8a, i64* @var64
+; CHECK: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsr #45
+
+ ret void
+; CHECK: ret
+}
+
+define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
+; CHECK: test_asr_arith:
+
+ %shift1 = ashr i32 %rhs32, 18
+ %val1 = add i32 %lhs32, %shift1
+ store volatile i32 %val1, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
+
+ %shift2 = ashr i32 %rhs32, 31
+ %val2 = add i32 %shift2, %lhs32
+ store volatile i32 %val2, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
+
+ %shift3 = ashr i32 %rhs32, 5
+ %val3 = sub i32 %lhs32, %shift3
+ store volatile i32 %val3, i32* @var32
+; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
+
+; Subtraction is not commutative!
+ %shift4 = ashr i32 %rhs32, 19
+ %val4 = sub i32 %shift4, %lhs32
+ store volatile i32 %val4, i32* @var32
+; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
+
+ %shift4a = ashr i32 %lhs32, 15
+ %val4a = sub i32 0, %shift4a
+ store volatile i32 %val4a, i32* @var32
+; CHECK: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, asr #15
+
+ %shift5 = ashr i64 %rhs64, 18
+ %val5 = add i64 %lhs64, %shift5
+ store volatile i64 %val5, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
+
+ %shift6 = ashr i64 %rhs64, 31
+ %val6 = add i64 %shift6, %lhs64
+ store volatile i64 %val6, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
+
+ %shift7 = ashr i64 %rhs64, 5
+ %val7 = sub i64 %lhs64, %shift7
+ store volatile i64 %val7, i64* @var64
+; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
+
+; Subtraction is not commutative!
+ %shift8 = ashr i64 %rhs64, 19
+ %val8 = sub i64 %shift8, %lhs64
+ store volatile i64 %val8, i64* @var64
+; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
+
+ %shift8a = ashr i64 %lhs64, 45
+ %val8a = sub i64 0, %shift8a
+ store volatile i64 %val8a, i64* @var64
+; CHECK: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, asr #45
+
+ ret void
+; CHECK: ret
+}
+
+define i32 @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
+; CHECK: test_cmp:
+
+ %shift1 = shl i32 %rhs32, 13
+ %tst1 = icmp uge i32 %lhs32, %shift1
+ br i1 %tst1, label %t2, label %end
+; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsl #13
+
+t2:
+ %shift2 = lshr i32 %rhs32, 20
+ %tst2 = icmp ne i32 %lhs32, %shift2
+ br i1 %tst2, label %t3, label %end
+; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
+
+t3:
+ %shift3 = ashr i32 %rhs32, 9
+ %tst3 = icmp ne i32 %lhs32, %shift3
+ br i1 %tst3, label %t4, label %end
+; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, asr #9
+
+t4:
+ %shift4 = shl i64 %rhs64, 43
+ %tst4 = icmp uge i64 %lhs64, %shift4
+ br i1 %tst4, label %t5, label %end
+; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsl #43
+
+t5:
+ %shift5 = lshr i64 %rhs64, 20
+ %tst5 = icmp ne i64 %lhs64, %shift5
+ br i1 %tst5, label %t6, label %end
+; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
+
+t6:
+ %shift6 = ashr i64 %rhs64, 59
+ %tst6 = icmp ne i64 %lhs64, %shift6
+ br i1 %tst6, label %t7, label %end
+; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, asr #59
+
+t7:
+ ret i32 1
+end:
+
+ ret i32 0
+; CHECK: ret
+}
+
+define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
+; CHECK: test_cmn:
+
+ %shift1 = shl i32 %rhs32, 13
+ %val1 = sub i32 0, %shift1
+ %tst1 = icmp uge i32 %lhs32, %val1
+ br i1 %tst1, label %t2, label %end
+ ; Important that this isn't lowered to a cmn instruction because if %rhs32 ==
+ ; 0 then the results will differ.
+; CHECK: sub [[RHS:w[0-9]+]], wzr, {{w[0-9]+}}, lsl #13
+; CHECK: cmp {{w[0-9]+}}, [[RHS]]
+
+t2:
+ %shift2 = lshr i32 %rhs32, 20
+ %val2 = sub i32 0, %shift2
+ %tst2 = icmp ne i32 %lhs32, %val2
+ br i1 %tst2, label %t3, label %end
+; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
+
+t3:
+ %shift3 = ashr i32 %rhs32, 9
+ %val3 = sub i32 0, %shift3
+ %tst3 = icmp eq i32 %lhs32, %val3
+ br i1 %tst3, label %t4, label %end
+; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, asr #9
+
+t4:
+ %shift4 = shl i64 %rhs64, 43
+ %val4 = sub i64 0, %shift4
+ %tst4 = icmp slt i64 %lhs64, %val4
+ br i1 %tst4, label %t5, label %end
+ ; Again, it's important that cmn isn't used here in case %rhs64 == 0.
+; CHECK: sub [[RHS:x[0-9]+]], xzr, {{x[0-9]+}}, lsl #43
+; CHECK: cmp {{x[0-9]+}}, [[RHS]]
+
+t5:
+ %shift5 = lshr i64 %rhs64, 20
+ %val5 = sub i64 0, %shift5
+ %tst5 = icmp ne i64 %lhs64, %val5
+ br i1 %tst5, label %t6, label %end
+; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
+
+t6:
+ %shift6 = ashr i64 %rhs64, 59
+ %val6 = sub i64 0, %shift6
+ %tst6 = icmp ne i64 %lhs64, %val6
+ br i1 %tst6, label %t7, label %end
+; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, asr #59
+
+t7:
+ ret i32 1
+end:
+
+ ret i32 0
+; CHECK: ret
+}
+
diff --git a/test/CodeGen/AArch64/addsub.ll b/test/CodeGen/AArch64/addsub.ll
new file mode 100644
index 0000000000..ccfb1c8f4a
--- /dev/null
+++ b/test/CodeGen/AArch64/addsub.ll
@@ -0,0 +1,127 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+; Note that this should be refactored (for efficiency if nothing else)
+; when the PCS is implemented so we don't have to worry about the
+; loads and stores.
+
+@var_i32 = global i32 42
+@var_i64 = global i64 0
+
+; Add pure 12-bit immediates:
+define void @add_small() {
+; CHECK: add_small:
+
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #4095
+ %val32 = load i32* @var_i32
+ %newval32 = add i32 %val32, 4095
+ store i32 %newval32, i32* @var_i32
+
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #52
+ %val64 = load i64* @var_i64
+ %newval64 = add i64 %val64, 52
+ store i64 %newval64, i64* @var_i64
+
+ ret void
+}
+
+; Add 12-bit immediates, shifted left by 12 bits
+define void @add_med() {
+; CHECK: add_med:
+
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #3567, lsl #12
+ %val32 = load i32* @var_i32
+ %newval32 = add i32 %val32, 14610432 ; =0xdef000
+ store i32 %newval32, i32* @var_i32
+
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #4095, lsl #12
+ %val64 = load i64* @var_i64
+ %newval64 = add i64 %val64, 16773120 ; =0xfff000
+ store i64 %newval64, i64* @var_i64
+
+ ret void
+}
+
+; Subtract 12-bit immediates
+define void @sub_small() {
+; CHECK: sub_small:
+
+; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, #4095
+ %val32 = load i32* @var_i32
+ %newval32 = sub i32 %val32, 4095
+ store i32 %newval32, i32* @var_i32
+
+; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, #52
+ %val64 = load i64* @var_i64
+ %newval64 = sub i64 %val64, 52
+ store i64 %newval64, i64* @var_i64
+
+ ret void
+}
+
+; Subtract 12-bit immediates, shifted left by 12 bits
+define void @sub_med() {
+; CHECK: sub_med:
+
+; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, #3567, lsl #12
+ %val32 = load i32* @var_i32
+ %newval32 = sub i32 %val32, 14610432 ; =0xdef000
+ store i32 %newval32, i32* @var_i32
+
+; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, #4095, lsl #12
+ %val64 = load i64* @var_i64
+ %newval64 = sub i64 %val64, 16773120 ; =0xfff000
+ store i64 %newval64, i64* @var_i64
+
+ ret void
+}
+
+define void @testing() {
+; CHECK: testing:
+ %val = load i32* @var_i32
+
+; CHECK: cmp {{w[0-9]+}}, #4095
+; CHECK: b.ne .LBB4_6
+ %cmp_pos_small = icmp ne i32 %val, 4095
+ br i1 %cmp_pos_small, label %ret, label %test2
+
+test2:
+; CHECK: cmp {{w[0-9]+}}, #3567, lsl #12
+; CHECK: b.lo .LBB4_6
+ %newval2 = add i32 %val, 1
+ store i32 %newval2, i32* @var_i32
+ %cmp_pos_big = icmp ult i32 %val, 14610432
+ br i1 %cmp_pos_big, label %ret, label %test3
+
+test3:
+; CHECK: cmp {{w[0-9]+}}, #123
+; CHECK: b.lt .LBB4_6
+ %newval3 = add i32 %val, 2
+ store i32 %newval3, i32* @var_i32
+ %cmp_pos_slt = icmp slt i32 %val, 123
+ br i1 %cmp_pos_slt, label %ret, label %test4
+
+test4:
+; CHECK: cmp {{w[0-9]+}}, #321
+; CHECK: b.gt .LBB4_6
+ %newval4 = add i32 %val, 3
+ store i32 %newval4, i32* @var_i32
+ %cmp_pos_sgt = icmp sgt i32 %val, 321
+ br i1 %cmp_pos_sgt, label %ret, label %test5
+
+test5:
+; CHECK: cmn {{w[0-9]+}}, #444
+; CHECK: b.gt .LBB4_6
+ %newval5 = add i32 %val, 4
+ store i32 %newval5, i32* @var_i32
+ %cmp_neg_uge = icmp sgt i32 %val, -444
+ br i1 %cmp_neg_uge, label %ret, label %test6
+
+test6:
+ %newval6 = add i32 %val, 5
+ store i32 %newval6, i32* @var_i32
+ ret void
+
+ret:
+ ret void
+}
+; TODO: adds/subs
diff --git a/test/CodeGen/AArch64/addsub_ext.ll b/test/CodeGen/AArch64/addsub_ext.ll
new file mode 100644
index 0000000000..e9e3cf2c67
--- /dev/null
+++ b/test/CodeGen/AArch64/addsub_ext.ll
@@ -0,0 +1,189 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var8 = global i8 0
+@var16 = global i16 0
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @addsub_i8rhs() {
+; CHECK: addsub_i8rhs:
+ %val8_tmp = load i8* @var8
+ %lhs32 = load i32* @var32
+ %lhs64 = load i64* @var64
+
+ ; Need this to prevent extension upon load and give a vanilla i8 operand.
+ %val8 = add i8 %val8_tmp, 123
+
+
+; Zero-extending to 32-bits
+ %rhs32_zext = zext i8 %val8 to i32
+ %res32_zext = add i32 %lhs32, %rhs32_zext
+ store volatile i32 %res32_zext, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxtb
+
+ %rhs32_zext_shift = shl i32 %rhs32_zext, 3
+ %res32_zext_shift = add i32 %lhs32, %rhs32_zext_shift
+ store volatile i32 %res32_zext_shift, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxtb #3
+
+
+; Zero-extending to 64-bits
+ %rhs64_zext = zext i8 %val8 to i64
+ %res64_zext = add i64 %lhs64, %rhs64_zext
+ store volatile i64 %res64_zext, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtb
+
+ %rhs64_zext_shift = shl i64 %rhs64_zext, 1
+ %res64_zext_shift = add i64 %lhs64, %rhs64_zext_shift
+ store volatile i64 %res64_zext_shift, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtb #1
+
+; Sign-extending to 32-bits
+ %rhs32_sext = sext i8 %val8 to i32
+ %res32_sext = add i32 %lhs32, %rhs32_sext
+ store volatile i32 %res32_sext, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxtb
+
+ %rhs32_sext_shift = shl i32 %rhs32_sext, 1
+ %res32_sext_shift = add i32 %lhs32, %rhs32_sext_shift
+ store volatile i32 %res32_sext_shift, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxtb #1
+
+; Sign-extending to 64-bits
+ %rhs64_sext = sext i8 %val8 to i64
+ %res64_sext = add i64 %lhs64, %rhs64_sext
+ store volatile i64 %res64_sext, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtb
+
+ %rhs64_sext_shift = shl i64 %rhs64_sext, 4
+ %res64_sext_shift = add i64 %lhs64, %rhs64_sext_shift
+ store volatile i64 %res64_sext_shift, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtb #4
+
+
+; CMP variants
+ %tst = icmp slt i32 %lhs32, %rhs32_zext
+ br i1 %tst, label %end, label %test2
+; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, uxtb
+
+test2:
+ %cmp_sext = sext i8 %val8 to i64
+ %tst2 = icmp eq i64 %lhs64, %cmp_sext
+ br i1 %tst2, label %other, label %end
+; CHECK: cmp {{x[0-9]+}}, {{w[0-9]+}}, sxtb
+
+other:
+ store volatile i32 %lhs32, i32* @var32
+ ret void
+
+end:
+ ret void
+}
+
+define void @addsub_i16rhs() {
+; CHECK: addsub_i16rhs:
+ %val16_tmp = load i16* @var16
+ %lhs32 = load i32* @var32
+ %lhs64 = load i64* @var64
+
+ ; Need this to prevent extension upon load and give a vanilla i16 operand.
+ %val16 = add i16 %val16_tmp, 123
+
+
+; Zero-extending to 32-bits
+ %rhs32_zext = zext i16 %val16 to i32
+ %res32_zext = add i32 %lhs32, %rhs32_zext
+ store volatile i32 %res32_zext, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxth
+
+ %rhs32_zext_shift = shl i32 %rhs32_zext, 3
+ %res32_zext_shift = add i32 %lhs32, %rhs32_zext_shift
+ store volatile i32 %res32_zext_shift, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, uxth #3
+
+
+; Zero-extending to 64-bits
+ %rhs64_zext = zext i16 %val16 to i64
+ %res64_zext = add i64 %lhs64, %rhs64_zext
+ store volatile i64 %res64_zext, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxth
+
+ %rhs64_zext_shift = shl i64 %rhs64_zext, 1
+ %res64_zext_shift = add i64 %lhs64, %rhs64_zext_shift
+ store volatile i64 %res64_zext_shift, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxth #1
+
+; Sign-extending to 32-bits
+ %rhs32_sext = sext i16 %val16 to i32
+ %res32_sext = add i32 %lhs32, %rhs32_sext
+ store volatile i32 %res32_sext, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxth
+
+ %rhs32_sext_shift = shl i32 %rhs32_sext, 1
+ %res32_sext_shift = add i32 %lhs32, %rhs32_sext_shift
+ store volatile i32 %res32_sext_shift, i32* @var32
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxth #1
+
+; Sign-extending to 64-bits
+ %rhs64_sext = sext i16 %val16 to i64
+ %res64_sext = add i64 %lhs64, %rhs64_sext
+ store volatile i64 %res64_sext, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxth
+
+ %rhs64_sext_shift = shl i64 %rhs64_sext, 4
+ %res64_sext_shift = add i64 %lhs64, %rhs64_sext_shift
+ store volatile i64 %res64_sext_shift, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxth #4
+
+
+; CMP variants
+ %tst = icmp slt i32 %lhs32, %rhs32_zext
+ br i1 %tst, label %end, label %test2
+; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, uxth
+
+test2:
+ %cmp_sext = sext i16 %val16 to i64
+ %tst2 = icmp eq i64 %lhs64, %cmp_sext
+ br i1 %tst2, label %other, label %end
+; CHECK: cmp {{x[0-9]+}}, {{w[0-9]+}}, sxth
+
+other:
+ store volatile i32 %lhs32, i32* @var32
+ ret void
+
+end:
+ ret void
+}
+
+; N.b. we could probably check more here ("add w2, w3, w1, uxtw" for
+; example), but the remaining instructions are probably not idiomatic
+; in the face of "add/sub (shifted register)" so I don't intend to.
+define void @addsub_i32rhs() {
+; CHECK: addsub_i32rhs:
+ %val32_tmp = load i32* @var32
+ %lhs64 = load i64* @var64
+
+ %val32 = add i32 %val32_tmp, 123
+
+ %rhs64_zext = zext i32 %val32 to i64
+ %res64_zext = add i64 %lhs64, %rhs64_zext
+ store volatile i64 %res64_zext, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtw
+
+ %rhs64_zext_shift = shl i64 %rhs64_zext, 2
+ %res64_zext_shift = add i64 %lhs64, %rhs64_zext_shift
+ store volatile i64 %res64_zext_shift, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, uxtw #2
+
+ %rhs64_sext = sext i32 %val32 to i64
+ %res64_sext = add i64 %lhs64, %rhs64_sext
+ store volatile i64 %res64_sext, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw
+
+ %rhs64_sext_shift = shl i64 %rhs64_sext, 2
+ %res64_sext_shift = add i64 %lhs64, %rhs64_sext_shift
+ store volatile i64 %res64_sext_shift, i64* @var64
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw #2
+
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/adrp-relocation.ll b/test/CodeGen/AArch64/adrp-relocation.ll
new file mode 100644
index 0000000000..ee600f0092
--- /dev/null
+++ b/test/CodeGen/AArch64/adrp-relocation.ll
@@ -0,0 +1,35 @@
+; RUN: llc -march=aarch64 -verify-machineinstrs -filetype=obj < %s | elf-dump | FileCheck %s
+
+define fp128 @testfn() nounwind {
+entry:
+ ret fp128 0xL00000000000000004004500000000000
+}
+
+define fp128 @foo() nounwind {
+entry:
+ %bar = alloca fp128 ()*, align 8
+ store fp128 ()* @testfn, fp128 ()** %bar, align 8
+ %call = call fp128 @testfn()
+ ret fp128 %call
+}
+
+; The above should produce an ADRP/ADD pair to calculate the address of
+; testfn. The important point is that LLVM shouldn't think it can deal with the
+; relocation on the ADRP itself (even though it knows everything about the
+; relative offsets of testfn and foo) because its value depends on where this
+; object file's .text section gets relocated in memory.
+
+; CHECK: .rela.text
+
+; CHECK: # Relocation 0
+; CHECK-NEXT: (('r_offset', 0x0000000000000028)
+; CHECK-NEXT: ('r_sym', 0x00000009)
+; CHECK-NEXT: ('r_type', 0x00000113)
+; CHECK-NEXT: ('r_addend', 0x0000000000000000)
+; CHECK-NEXT: ),
+; CHECK-NEXT: Relocation 1
+; CHECK-NEXT: (('r_offset', 0x000000000000002c)
+; CHECK-NEXT: ('r_sym', 0x00000009)
+; CHECK-NEXT: ('r_type', 0x00000115)
+; CHECK-NEXT: ('r_addend', 0x0000000000000000)
+; CHECK-NEXT: ),
diff --git a/test/CodeGen/AArch64/alloca.ll b/test/CodeGen/AArch64/alloca.ll
new file mode 100644
index 0000000000..0e8c14d7d2
--- /dev/null
+++ b/test/CodeGen/AArch64/alloca.ll
@@ -0,0 +1,134 @@
+; RUN: llc -march=aarch64 -verify-machineinstrs < %s | FileCheck %s
+
+declare void @use_addr(i8*)
+
+define void @test_simple_alloca(i64 %n) {
+; CHECK: test_simple_alloca:
+
+ %buf = alloca i8, i64 %n
+ ; Make sure we align the stack change to 16 bytes:
+; CHECK: add [[SPDELTA:x[0-9]+]], x0, #15
+; CHECK: and x0, [[SPDELTA]], #0xfffffffffffffff0
+
+ ; Make sure we change SP. It would be surprising if anything but x0 were used
+ ; for the final sp, but it could be if it was then moved into x0.
+; CHECK: mov [[TMP:x[0-9]+]], sp
+; CHECK: sub x0, [[TMP]], [[SPDELTA]]
+; CHECK: mov sp, x0
+
+ call void @use_addr(i8* %buf)
+; CHECK: bl use_addr
+
+ ret void
+ ; Make sure epilogue restores sp from fp
+; CHECK: sub sp, x29, #16
+; CHECK: ldp x29, x30, [sp, #16]
+; CHECK: add sp, sp, #32
+; CHECK: ret
+}
+
+declare void @use_addr_loc(i8*, i64*)
+
+define i64 @test_alloca_with_local(i64 %n) {
+; CHECK: test_alloca_with_local:
+; CHECK: sub sp, sp, #32
+; CHECK: stp x29, x30, [sp, #16]
+
+ %loc = alloca i64
+ %buf = alloca i8, i64 %n
+ ; Make sure we align the stack change to 16 bytes:
+; CHECK: add [[SPDELTA:x[0-9]+]], x0, #15
+; CHECK: and x0, [[SPDELTA]], #0xfffffffffffffff0
+
+ ; Make sure we change SP. It would be surprising if anything but x0 were used
+ ; for the final sp, but it could be if it was then moved into x0.
+; CHECK: mov [[TMP:x[0-9]+]], sp
+; CHECK: sub x0, [[TMP]], [[SPDELTA]]
+; CHECK: mov sp, x0
+
+ ; Obviously suboptimal code here, but it to get &local in x1
+; CHECK: sub [[TMP:x[0-9]+]], x29, [[LOC_FROM_FP:#[0-9]+]]
+; CHECK: add x1, [[TMP]], #0
+
+ call void @use_addr_loc(i8* %buf, i64* %loc)
+; CHECK: bl use_addr
+
+ %val = load i64* %loc
+; CHECK: sub x[[TMP:[0-9]+]], x29, [[LOC_FROM_FP]]
+; CHECK: ldr x0, [x[[TMP]]]
+
+ ret i64 %val
+ ; Make sure epilogue restores sp from fp
+; CHECK: sub sp, x29, #16
+; CHECK: ldp x29, x30, [sp, #16]
+; CHECK: add sp, sp, #32
+; CHECK: ret
+}
+
+define void @test_variadic_alloca(i64 %n, ...) {
+; CHECK: test_variadic_alloca:
+
+; CHECK: sub sp, sp, #208
+; CHECK: stp x29, x30, [sp, #192]
+; CHECK: add x29, sp, #192
+; CHECK: sub x9, x29, #192
+; CHECK: add x8, x9, #0
+; CHECK: str q7, [x8, #112]
+; [...]
+; CHECK: str q1, [x8, #16]
+
+ %addr = alloca i8, i64 %n
+
+ call void @use_addr(i8* %addr)
+; CHECK: bl use_addr
+
+ ret void
+; CHECK: sub sp, x29, #192
+; CHECK: ldp x29, x30, [sp, #192]
+; CHECK: add sp, sp, #208
+}
+
+define void @test_alloca_large_frame(i64 %n) {
+; CHECK: test_alloca_large_frame:
+
+; CHECK: sub sp, sp, #496
+; CHECK: stp x29, x30, [sp, #480]
+; CHECK: add x29, sp, #480
+; CHECK: sub sp, sp, #48
+; CHECK: sub sp, sp, #1953, lsl #12
+
+ %addr1 = alloca i8, i64 %n
+ %addr2 = alloca i64, i64 1000000
+
+ call void @use_addr_loc(i8* %addr1, i64* %addr2)
+
+ ret void
+; CHECK: sub sp, x29, #480
+; CHECK: ldp x29, x30, [sp, #480]
+; CHECK: add sp, sp, #496
+}
+
+declare i8* @llvm.stacksave()
+declare void @llvm.stackrestore(i8*)
+
+define void @test_scoped_alloca(i64 %n) {
+; CHECK: test_scoped_alloca
+; CHECK: sub sp, sp, #32
+
+ %sp = call i8* @llvm.stacksave()
+; CHECK: mov [[SAVED_SP:x[0-9]+]], sp
+
+ %addr = alloca i8, i64 %n
+; CHECK: and [[SPDELTA:x[0-9]+]], {{x[0-9]+}}, #0xfffffffffffffff0
+; CHECK: mov [[OLDSP:x[0-9]+]], sp
+; CHECK: sub [[NEWSP:x[0-9]+]], [[OLDSP]], [[SPDELTA]]
+; CHECK: mov sp, [[NEWSP]]
+
+ call void @use_addr(i8* %addr)
+; CHECK: bl use_addr
+
+ call void @llvm.stackrestore(i8* %sp)
+; CHECK: mov sp, [[SAVED_SP]]
+
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/analyze-branch.ll b/test/CodeGen/AArch64/analyze-branch.ll
new file mode 100644
index 0000000000..1f6e96f1d5
--- /dev/null
+++ b/test/CodeGen/AArch64/analyze-branch.ll
@@ -0,0 +1,231 @@
+; RUN: llc -march=aarch64 < %s | FileCheck %s
+
+; This test checks that LLVM can do basic stripping and reapplying of branches
+; to basic blocks.
+
+declare void @test_true()
+declare void @test_false()
+
+; !0 corresponds to a branch being taken, !1 to not being takne.
+!0 = metadata !{metadata !"branch_weights", i32 64, i32 4}
+!1 = metadata !{metadata !"branch_weights", i32 4, i32 64}
+
+define void @test_Bcc_fallthrough_taken(i32 %in) nounwind {
+; CHECK: test_Bcc_fallthrough_taken:
+ %tst = icmp eq i32 %in, 42
+ br i1 %tst, label %true, label %false, !prof !0
+
+; CHECK: cmp {{w[0-9]+}}, #42
+
+; CHECK: b.ne [[FALSE:.LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: // BB#
+; CHECK-NEXT: bl test_true
+
+; CHECK: [[FALSE]]:
+; CHECK: bl test_false
+
+true:
+ call void @test_true()
+ ret void
+
+false:
+ call void @test_false()
+ ret void
+}
+
+define void @test_Bcc_fallthrough_nottaken(i32 %in) nounwind {
+; CHECK: test_Bcc_fallthrough_nottaken:
+ %tst = icmp eq i32 %in, 42
+ br i1 %tst, label %true, label %false, !prof !1
+
+; CHECK: cmp {{w[0-9]+}}, #42
+
+; CHECK: b.eq [[TRUE:.LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: // BB#
+; CHECK-NEXT: bl test_false
+
+; CHECK: [[TRUE]]:
+; CHECK: bl test_true
+
+true:
+ call void @test_true()
+ ret void
+
+false:
+ call void @test_false()
+ ret void
+}
+
+define void @test_CBZ_fallthrough_taken(i32 %in) nounwind {
+; CHECK: test_CBZ_fallthrough_taken:
+ %tst = icmp eq i32 %in, 0
+ br i1 %tst, label %true, label %false, !prof !0
+
+; CHECK: cbnz {{w[0-9]+}}, [[FALSE:.LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: // BB#
+; CHECK-NEXT: bl test_true
+
+; CHECK: [[FALSE]]:
+; CHECK: bl test_false
+
+true:
+ call void @test_true()
+ ret void
+
+false:
+ call void @test_false()
+ ret void
+}
+
+define void @test_CBZ_fallthrough_nottaken(i64 %in) nounwind {
+; CHECK: test_CBZ_fallthrough_nottaken:
+ %tst = icmp eq i64 %in, 0
+ br i1 %tst, label %true, label %false, !prof !1
+
+; CHECK: cbz {{x[0-9]+}}, [[TRUE:.LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: // BB#
+; CHECK-NEXT: bl test_false
+
+; CHECK: [[TRUE]]:
+; CHECK: bl test_true
+
+true:
+ call void @test_true()
+ ret void
+
+false:
+ call void @test_false()
+ ret void
+}
+
+define void @test_CBNZ_fallthrough_taken(i32 %in) nounwind {
+; CHECK: test_CBNZ_fallthrough_taken:
+ %tst = icmp ne i32 %in, 0
+ br i1 %tst, label %true, label %false, !prof !0
+
+; CHECK: cbz {{w[0-9]+}}, [[FALSE:.LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: // BB#
+; CHECK-NEXT: bl test_true
+
+; CHECK: [[FALSE]]:
+; CHECK: bl test_false
+
+true:
+ call void @test_true()
+ ret void
+
+false:
+ call void @test_false()
+ ret void
+}
+
+define void @test_CBNZ_fallthrough_nottaken(i64 %in) nounwind {
+; CHECK: test_CBNZ_fallthrough_nottaken:
+ %tst = icmp ne i64 %in, 0
+ br i1 %tst, label %true, label %false, !prof !1
+
+; CHECK: cbnz {{x[0-9]+}}, [[TRUE:.LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: // BB#
+; CHECK-NEXT: bl test_false
+
+; CHECK: [[TRUE]]:
+; CHECK: bl test_true
+
+true:
+ call void @test_true()
+ ret void
+
+false:
+ call void @test_false()
+ ret void
+}
+
+define void @test_TBZ_fallthrough_taken(i32 %in) nounwind {
+; CHECK: test_TBZ_fallthrough_taken:
+ %bit = and i32 %in, 32768
+ %tst = icmp eq i32 %bit, 0
+ br i1 %tst, label %true, label %false, !prof !0
+
+; CHECK: tbnz {{w[0-9]+}}, #15, [[FALSE:.LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: // BB#
+; CHECK-NEXT: bl test_true
+
+; CHECK: [[FALSE]]:
+; CHECK: bl test_false
+
+true:
+ call void @test_true()
+ ret void
+
+false:
+ call void @test_false()
+ ret void
+}
+
+define void @test_TBZ_fallthrough_nottaken(i64 %in) nounwind {
+; CHECK: test_TBZ_fallthrough_nottaken:
+ %bit = and i64 %in, 32768
+ %tst = icmp eq i64 %bit, 0
+ br i1 %tst, label %true, label %false, !prof !1
+
+; CHECK: tbz {{x[0-9]+}}, #15, [[TRUE:.LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: // BB#
+; CHECK-NEXT: bl test_false
+
+; CHECK: [[TRUE]]:
+; CHECK: bl test_true
+
+true:
+ call void @test_true()
+ ret void
+
+false:
+ call void @test_false()
+ ret void
+}
+
+
+define void @test_TBNZ_fallthrough_taken(i32 %in) nounwind {
+; CHECK: test_TBNZ_fallthrough_taken:
+ %bit = and i32 %in, 32768
+ %tst = icmp ne i32 %bit, 0
+ br i1 %tst, label %true, label %false, !prof !0
+
+; CHECK: tbz {{w[0-9]+}}, #15, [[FALSE:.LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: // BB#
+; CHECK-NEXT: bl test_true
+
+; CHECK: [[FALSE]]:
+; CHECK: bl test_false
+
+true:
+ call void @test_true()
+ ret void
+
+false:
+ call void @test_false()
+ ret void
+}
+
+define void @test_TBNZ_fallthrough_nottaken(i64 %in) nounwind {
+; CHECK: test_TBNZ_fallthrough_nottaken:
+ %bit = and i64 %in, 32768
+ %tst = icmp ne i64 %bit, 0
+ br i1 %tst, label %true, label %false, !prof !1
+
+; CHECK: tbnz {{x[0-9]+}}, #15, [[TRUE:.LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: // BB#
+; CHECK-NEXT: bl test_false
+
+; CHECK: [[TRUE]]:
+; CHECK: bl test_true
+
+true:
+ call void @test_true()
+ ret void
+
+false:
+ call void @test_false()
+ ret void
+}
+
diff --git a/test/CodeGen/AArch64/atomic-ops-not-barriers.ll b/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
new file mode 100644
index 0000000000..f383d76b74
--- /dev/null
+++ b/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
@@ -0,0 +1,24 @@
+; RUN: llc -march=aarch64 < %s | FileCheck %s
+
+define i32 @foo(i32* %var, i1 %cond) {
+; CHECK: foo:
+ br i1 %cond, label %atomic_ver, label %simple_ver
+simple_ver:
+ %oldval = load i32* %var
+ %newval = add nsw i32 %oldval, -1
+ store i32 %newval, i32* %var
+ br label %somewhere
+atomic_ver:
+ %val = atomicrmw add i32* %var, i32 -1 seq_cst
+ br label %somewhere
+; CHECK: dmb
+; CHECK: ldxr
+; CHECK: dmb
+ ; The key point here is that the second dmb isn't immediately followed by the
+ ; simple_ver basic block, which LLVM attempted to do when DMB had been marked
+ ; with isBarrier. For now, look for something that looks like "somewhere".
+; CHECK-NEXT: mov
+somewhere:
+ %combined = phi i32 [ %val, %atomic_ver ], [ %newval, %simple_ver]
+ ret i32 %combined
+}
diff --git a/test/CodeGen/AArch64/atomic-ops.ll b/test/CodeGen/AArch64/atomic-ops.ll
new file mode 100644
index 0000000000..8a1e97626d
--- /dev/null
+++ b/test/CodeGen/AArch64/atomic-ops.ll
@@ -0,0 +1,1099 @@
+; RUN: llc -march=aarch64 -verify-machineinstrs < %s | FileCheck %s
+
+@var8 = global i8 0
+@var16 = global i16 0
+@var32 = global i32 0
+@var64 = global i64 0
+
+define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
+; CHECK: test_atomic_load_add_i8:
+ %old = atomicrmw add i8* @var8, i8 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: add [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
+; CHECK: test_atomic_load_add_i16:
+ %old = atomicrmw add i16* @var16, i16 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: add [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
+; CHECK: test_atomic_load_add_i32:
+ %old = atomicrmw add i32* @var32, i32 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: add [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
+; CHECK: test_atomic_load_add_i64:
+ %old = atomicrmw add i64* @var64, i64 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; x0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: add [[NEW:x[0-9]+]], x[[OLD]], x0
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
+; CHECK: test_atomic_load_sub_i8:
+ %old = atomicrmw sub i8* @var8, i8 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: sub [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
+; CHECK: test_atomic_load_sub_i16:
+ %old = atomicrmw sub i16* @var16, i16 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: sub [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
+; CHECK: test_atomic_load_sub_i32:
+ %old = atomicrmw sub i32* @var32, i32 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: sub [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
+; CHECK: test_atomic_load_sub_i64:
+ %old = atomicrmw sub i64* @var64, i64 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; x0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: sub [[NEW:x[0-9]+]], x[[OLD]], x0
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
+; CHECK: test_atomic_load_and_i8:
+ %old = atomicrmw and i8* @var8, i8 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: and [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
+; CHECK: test_atomic_load_and_i16:
+ %old = atomicrmw and i16* @var16, i16 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: and [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
+; CHECK: test_atomic_load_and_i32:
+ %old = atomicrmw and i32* @var32, i32 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: and [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
+; CHECK: test_atomic_load_and_i64:
+ %old = atomicrmw and i64* @var64, i64 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; x0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: and [[NEW:x[0-9]+]], x[[OLD]], x0
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
+; CHECK: test_atomic_load_or_i8:
+ %old = atomicrmw or i8* @var8, i8 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: orr [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
+; CHECK: test_atomic_load_or_i16:
+ %old = atomicrmw or i16* @var16, i16 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: orr [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
+; CHECK: test_atomic_load_or_i32:
+ %old = atomicrmw or i32* @var32, i32 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: orr [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
+; CHECK: test_atomic_load_or_i64:
+ %old = atomicrmw or i64* @var64, i64 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; x0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: orr [[NEW:x[0-9]+]], x[[OLD]], x0
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
+; CHECK: test_atomic_load_xor_i8:
+ %old = atomicrmw xor i8* @var8, i8 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: eor [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
+; CHECK: test_atomic_load_xor_i16:
+ %old = atomicrmw xor i16* @var16, i16 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: eor [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
+; CHECK: test_atomic_load_xor_i32:
+ %old = atomicrmw xor i32* @var32, i32 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: eor [[NEW:w[0-9]+]], w[[OLD]], w0
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
+; CHECK: test_atomic_load_xor_i64:
+ %old = atomicrmw xor i64* @var64, i64 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; x0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: eor [[NEW:x[0-9]+]], x[[OLD]], x0
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
+; CHECK: test_atomic_load_xchg_i8:
+ %old = atomicrmw xchg i8* @var8, i8 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
+; CHECK: test_atomic_load_xchg_i16:
+ %old = atomicrmw xchg i16* @var16, i16 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
+; CHECK: test_atomic_load_xchg_i32:
+ %old = atomicrmw xchg i32* @var32, i32 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
+; CHECK: test_atomic_load_xchg_i64:
+ %old = atomicrmw xchg i64* @var64, i64 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; x0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], x0, [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i64 %old
+}
+
+
+define i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
+; CHECK: test_atomic_load_min_i8:
+ %old = atomicrmw min i8* @var8, i8 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w0, w[[OLD]], sxtb
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
+; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
+; CHECK: test_atomic_load_min_i16:
+ %old = atomicrmw min i16* @var16, i16 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w0, w[[OLD]], sxth
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
+; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
+; CHECK: test_atomic_load_min_i32:
+ %old = atomicrmw min i32* @var32, i32 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w0, w[[OLD]]
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
+; CHECK: test_atomic_load_min_i64:
+ %old = atomicrmw min i64* @var64, i64 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; x0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp x0, x[[OLD]]
+; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, gt
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
+; CHECK: test_atomic_load_max_i8:
+ %old = atomicrmw max i8* @var8, i8 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w0, w[[OLD]], sxtb
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
+; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
+; CHECK: test_atomic_load_max_i16:
+ %old = atomicrmw max i16* @var16, i16 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w0, w[[OLD]], sxth
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
+; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
+; CHECK: test_atomic_load_max_i32:
+ %old = atomicrmw max i32* @var32, i32 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w0, w[[OLD]]
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
+; CHECK: test_atomic_load_max_i64:
+ %old = atomicrmw max i64* @var64, i64 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; x0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp x0, x[[OLD]]
+; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, lt
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
+; CHECK: test_atomic_load_umin_i8:
+ %old = atomicrmw umin i8* @var8, i8 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w0, w[[OLD]], uxtb
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
+; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
+; CHECK: test_atomic_load_umin_i16:
+ %old = atomicrmw umin i16* @var16, i16 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w0, w[[OLD]], uxth
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
+; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
+; CHECK: test_atomic_load_umin_i32:
+ %old = atomicrmw umin i32* @var32, i32 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w0, w[[OLD]]
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
+; CHECK: test_atomic_load_umin_i64:
+ %old = atomicrmw umin i64* @var64, i64 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; x0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp x0, x[[OLD]]
+; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, hi
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
+; CHECK: test_atomic_load_umax_i8:
+ %old = atomicrmw umax i8* @var8, i8 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w0, w[[OLD]], uxtb
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
+; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
+; CHECK: test_atomic_load_umax_i16:
+ %old = atomicrmw umax i16* @var16, i16 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w0, w[[OLD]], uxth
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
+; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
+; CHECK: test_atomic_load_umax_i32:
+ %old = atomicrmw umax i32* @var32, i32 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w0, w[[OLD]]
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
+; CHECK: test_atomic_load_umax_i64:
+ %old = atomicrmw umax i64* @var64, i64 %offset seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; x0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp x0, x[[OLD]]
+; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, lo
+; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne .LBB{{[0-9]+}}_1
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
+; CHECK: test_atomic_cmpxchg_i8:
+ %old = cmpxchg i8* @var8, i8 %wanted, i8 %new seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+
+; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
+; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w[[OLD]], w0
+; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
+ ; As above, w1 is a reasonable guess.
+; CHECK: stxrb [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne [[STARTAGAIN]]
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
+; CHECK: test_atomic_cmpxchg_i16:
+ %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+
+; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
+; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w[[OLD]], w0
+; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
+ ; As above, w1 is a reasonable guess.
+; CHECK: stxrh [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne [[STARTAGAIN]]
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
+; CHECK: test_atomic_cmpxchg_i32:
+ %old = cmpxchg i32* @var32, i32 %wanted, i32 %new seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+
+; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
+; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp w[[OLD]], w0
+; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
+ ; As above, w1 is a reasonable guess.
+; CHECK: stxr [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne [[STARTAGAIN]]
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
+; CHECK: test_atomic_cmpxchg_i64:
+ %old = cmpxchg i64* @var64, i64 %wanted, i64 %new seq_cst
+; CHECK: dmb ish
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+
+; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
+; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+ ; w0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp x[[OLD]], x0
+; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
+ ; As above, w1 is a reasonable guess.
+; CHECK: stxr [[STATUS:w[0-9]+]], x1, [x[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: b.ne [[STARTAGAIN]]
+; CHECK: dmb ish
+
+; CHECK: mov x0, x[[OLD]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_monotonic_i8() nounwind {
+; CHECK: test_atomic_load_monotonic_i8:
+ %val = load atomic i8* @var8 monotonic, align 1
+; CHECK-NOT: dmb
+; CHECK: adrp x[[HIADDR:[0-9]+]], var8
+; CHECK: ldrb w0, [x[[HIADDR]], #:lo12:var8]
+; CHECK-NOT: dmb
+
+ ret i8 %val
+}
+
+define i8 @test_atomic_load_monotonic_regoff_i8(i64 %base, i64 %off) nounwind {
+; CHECK: test_atomic_load_monotonic_regoff_i8:
+ %addr_int = add i64 %base, %off
+ %addr = inttoptr i64 %addr_int to i8*
+
+ %val = load atomic i8* %addr monotonic, align 1
+; CHECK-NOT: dmb
+; CHECK: ldrb w0, [x0, x1]
+; CHECK-NOT: dmb
+
+ ret i8 %val
+}
+
+define i8 @test_atomic_load_acquire_i8() nounwind {
+; CHECK: test_atomic_load_acquire_i8:
+ %val = load atomic i8* @var8 acquire, align 1
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+
+; CHECK: ldarb w0, [x[[ADDR]]]
+ ret i8 %val
+}
+
+define i8 @test_atomic_load_seq_cst_i8() nounwind {
+; CHECK: test_atomic_load_seq_cst_i8:
+ %val = load atomic i8* @var8 seq_cst, align 1
+; CHECK: adrp x[[HIADDR:[0-9]+]], var8
+; CHECK: ldrb w0, [x[[HIADDR]], #:lo12:var8]
+; CHECK: dmb ish
+ ret i8 %val
+}
+
+define i16 @test_atomic_load_monotonic_i16() nounwind {
+; CHECK: test_atomic_load_monotonic_i16:
+ %val = load atomic i16* @var16 monotonic, align 2
+; CHECK-NOT: dmb
+; CHECK: adrp x[[HIADDR:[0-9]+]], var16
+; CHECK: ldrh w0, [x[[HIADDR]], #:lo12:var16]
+; CHECK-NOT: dmb
+
+ ret i16 %val
+}
+
+define i32 @test_atomic_load_monotonic_regoff_i32(i64 %base, i64 %off) nounwind {
+; CHECK: test_atomic_load_monotonic_regoff_i32:
+ %addr_int = add i64 %base, %off
+ %addr = inttoptr i64 %addr_int to i32*
+
+ %val = load atomic i32* %addr monotonic, align 4
+; CHECK-NOT: dmb
+; CHECK: ldr w0, [x0, x1]
+; CHECK-NOT: dmb
+
+ ret i32 %val
+}
+
+define i64 @test_atomic_load_seq_cst_i64() nounwind {
+; CHECK: test_atomic_load_seq_cst_i64:
+ %val = load atomic i64* @var64 seq_cst, align 8
+; CHECK: adrp x[[HIADDR:[0-9]+]], var64
+; CHECK: ldr x0, [x[[HIADDR]], #:lo12:var64]
+; CHECK: dmb ish
+ ret i64 %val
+}
+
+define void @test_atomic_store_monotonic_i8(i8 %val) nounwind {
+; CHECK: test_atomic_store_monotonic_i8:
+ store atomic i8 %val, i8* @var8 monotonic, align 1
+; CHECK: adrp x[[HIADDR:[0-9]+]], var8
+; CHECK: strb w0, [x[[HIADDR]], #:lo12:var8]
+
+ ret void
+}
+
+define void @test_atomic_store_monotonic_regoff_i8(i64 %base, i64 %off, i8 %val) nounwind {
+; CHECK: test_atomic_store_monotonic_regoff_i8:
+
+ %addr_int = add i64 %base, %off
+ %addr = inttoptr i64 %addr_int to i8*
+
+ store atomic i8 %val, i8* %addr monotonic, align 1
+; CHECK: strb w2, [x0, x1]
+
+ ret void
+}
+define void @test_atomic_store_release_i8(i8 %val) nounwind {
+; CHECK: test_atomic_store_release_i8:
+ store atomic i8 %val, i8* @var8 release, align 1
+; CHECK: adrp [[HIADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var8
+; CHECK: stlrb w0, [x[[ADDR]]]
+
+ ret void
+}
+
+define void @test_atomic_store_seq_cst_i8(i8 %val) nounwind {
+; CHECK: test_atomic_store_seq_cst_i8:
+ store atomic i8 %val, i8* @var8 seq_cst, align 1
+; CHECK: adrp [[HIADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var8
+; CHECK: stlrb w0, [x[[ADDR]]]
+; CHECK: dmb ish
+
+ ret void
+}
+
+define void @test_atomic_store_monotonic_i16(i16 %val) nounwind {
+; CHECK: test_atomic_store_monotonic_i16:
+ store atomic i16 %val, i16* @var16 monotonic, align 2
+; CHECK: adrp x[[HIADDR:[0-9]+]], var16
+; CHECK: strh w0, [x[[HIADDR]], #:lo12:var16]
+
+ ret void
+}
+
+define void @test_atomic_store_monotonic_regoff_i32(i64 %base, i64 %off, i32 %val) nounwind {
+; CHECK: test_atomic_store_monotonic_regoff_i32:
+
+ %addr_int = add i64 %base, %off
+ %addr = inttoptr i64 %addr_int to i32*
+
+ store atomic i32 %val, i32* %addr monotonic, align 4
+; CHECK: str w2, [x0, x1]
+
+ ret void
+}
+
+define void @test_atomic_store_release_i64(i64 %val) nounwind {
+; CHECK: test_atomic_store_release_i64:
+ store atomic i64 %val, i64* @var64 release, align 8
+; CHECK: adrp [[HIADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var64
+; CHECK: stlr x0, [x[[ADDR]]]
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/basic-pic.ll b/test/CodeGen/AArch64/basic-pic.ll
new file mode 100644
index 0000000000..da94041c95
--- /dev/null
+++ b/test/CodeGen/AArch64/basic-pic.ll
@@ -0,0 +1,70 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic -filetype=obj %s -o -| llvm-objdump -r - | FileCheck --check-prefix=CHECK-ELF %s
+
+@var = global i32 0
+
+; CHECK-ELF: RELOCATION RECORDS FOR [.text]
+
+define i32 @get_globalvar() {
+; CHECK: get_globalvar:
+
+ %val = load i32* @var
+; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
+; CHECK: ldr x[[GOTLOC:[0-9]+]], [x[[GOTHI]], #:got_lo12:var]
+; CHECK: ldr w0, [x[[GOTLOC]]]
+
+; CHECK-ELF: R_AARCH64_ADR_GOT_PAGE var
+; CHECK-ELF: R_AARCH64_LD64_GOT_LO12_NC var
+ ret i32 %val
+}
+
+define i32* @get_globalvaraddr() {
+; CHECK: get_globalvaraddr:
+
+ %val = load i32* @var
+; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
+; CHECK: ldr x0, [x[[GOTHI]], #:got_lo12:var]
+
+; CHECK-ELF: R_AARCH64_ADR_GOT_PAGE var
+; CHECK-ELF: R_AARCH64_LD64_GOT_LO12_NC var
+ ret i32* @var
+}
+
+@hiddenvar = hidden global i32 0
+
+define i32 @get_hiddenvar() {
+; CHECK: get_hiddenvar:
+
+ %val = load i32* @hiddenvar
+; CHECK: adrp x[[HI:[0-9]+]], hiddenvar
+; CHECK: ldr w0, [x[[HI]], #:lo12:hiddenvar]
+
+; CHECK-ELF: R_AARCH64_ADR_PREL_PG_HI21 hiddenvar
+; CHECK-ELF: R_AARCH64_LDST32_ABS_LO12_NC hiddenvar
+ ret i32 %val
+}
+
+define i32* @get_hiddenvaraddr() {
+; CHECK: get_hiddenvaraddr:
+
+ %val = load i32* @hiddenvar
+; CHECK: adrp [[HI:x[0-9]+]], hiddenvar
+; CHECK: add x0, [[HI]], #:lo12:hiddenvar
+
+; CHECK-ELF: R_AARCH64_ADR_PREL_PG_HI21 hiddenvar
+; CHECK-ELF: R_AARCH64_ADD_ABS_LO12_NC hiddenvar
+ ret i32* @hiddenvar
+}
+
+define void()* @get_func() {
+; CHECK: get_func:
+
+ ret void()* bitcast(void()*()* @get_func to void()*)
+; CHECK: adrp x[[GOTHI:[0-9]+]], :got:get_func
+; CHECK: ldr x0, [x[[GOTHI]], #:got_lo12:get_func]
+
+ ; Particularly important that the ADRP gets a relocation, LLVM tends to think
+ ; it can relax it because it knows where get_func is. It can't!
+; CHECK-ELF: R_AARCH64_ADR_GOT_PAGE get_func
+; CHECK-ELF: R_AARCH64_LD64_GOT_LO12_NC get_func
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/bitfield-insert-0.ll b/test/CodeGen/AArch64/bitfield-insert-0.ll
new file mode 100644
index 0000000000..1343ec7449
--- /dev/null
+++ b/test/CodeGen/AArch64/bitfield-insert-0.ll
@@ -0,0 +1,19 @@
+; RUN: llc -march=aarch64 -filetype=obj < %s | llvm-objdump -disassemble - | FileCheck %s
+
+; The encoding of lsb -> immr in the CGed bitfield instructions was wrong at one
+; point, in the edge case where lsb = 0. Just make sure.
+
+define void @test_bfi0(i32* %existing, i32* %new) {
+; CHECK: bfxil {{w[0-9]+}}, {{w[0-9]+}}, #0, #18
+
+ %oldval = load volatile i32* %existing
+ %oldval_keep = and i32 %oldval, 4294705152 ; 0xfffc_0000
+
+ %newval = load volatile i32* %new
+ %newval_masked = and i32 %newval, 262143 ; = 0x0003_ffff
+
+ %combined = or i32 %newval_masked, %oldval_keep
+ store volatile i32 %combined, i32* %existing
+
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/bitfield-insert.ll b/test/CodeGen/AArch64/bitfield-insert.ll
new file mode 100644
index 0000000000..d874a12240
--- /dev/null
+++ b/test/CodeGen/AArch64/bitfield-insert.ll
@@ -0,0 +1,193 @@
+; RUN: llc -march=aarch64 < %s | FileCheck %s
+
+; First, a simple example from Clang. The registers could plausibly be
+; different, but probably won't be.
+
+%struct.foo = type { i8, [2 x i8], i8 }
+
+define [1 x i64] @from_clang([1 x i64] %f.coerce, i32 %n) nounwind readnone {
+; CHECK: from_clang:
+; CHECK: bfi w0, w1, #3, #4
+; CHECK-NEXT: ret
+
+entry:
+ %f.coerce.fca.0.extract = extractvalue [1 x i64] %f.coerce, 0
+ %tmp.sroa.0.0.extract.trunc = trunc i64 %f.coerce.fca.0.extract to i32
+ %bf.value = shl i32 %n, 3
+ %0 = and i32 %bf.value, 120
+ %f.sroa.0.0.insert.ext.masked = and i32 %tmp.sroa.0.0.extract.trunc, 135
+ %1 = or i32 %f.sroa.0.0.insert.ext.masked, %0
+ %f.sroa.0.0.extract.trunc = zext i32 %1 to i64
+ %tmp1.sroa.1.1.insert.insert = and i64 %f.coerce.fca.0.extract, 4294967040
+ %tmp1.sroa.0.0.insert.insert = or i64 %f.sroa.0.0.extract.trunc, %tmp1.sroa.1.1.insert.insert
+ %.fca.0.insert = insertvalue [1 x i64] undef, i64 %tmp1.sroa.0.0.insert.insert, 0
+ ret [1 x i64] %.fca.0.insert
+}
+
+define void @test_whole32(i32* %existing, i32* %new) {
+; CHECK: test_whole32:
+; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #26, #5
+
+ %oldval = load volatile i32* %existing
+ %oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff
+
+ %newval = load volatile i32* %new
+ %newval_shifted = shl i32 %newval, 26
+ %newval_masked = and i32 %newval_shifted, 2080374784 ; = 0x7c000000
+
+ %combined = or i32 %oldval_keep, %newval_masked
+ store volatile i32 %combined, i32* %existing
+
+ ret void
+}
+
+define void @test_whole64(i64* %existing, i64* %new) {
+; CHECK: test_whole64:
+; CHECK: bfi {{x[0-9]+}}, {{x[0-9]+}}, #26, #14
+; CHECK-NOT: and
+; CHECK: ret
+
+ %oldval = load volatile i64* %existing
+ %oldval_keep = and i64 %oldval, 18446742974265032703 ; = 0xffffff0003ffffffL
+
+ %newval = load volatile i64* %new
+ %newval_shifted = shl i64 %newval, 26
+ %newval_masked = and i64 %newval_shifted, 1099444518912 ; = 0xfffc000000
+
+ %combined = or i64 %oldval_keep, %newval_masked
+ store volatile i64 %combined, i64* %existing
+
+ ret void
+}
+
+define void @test_whole32_from64(i64* %existing, i64* %new) {
+; CHECK: test_whole32_from64:
+; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #{{0|16}}, #16
+; CHECK-NOT: and
+; CHECK: ret
+
+ %oldval = load volatile i64* %existing
+ %oldval_keep = and i64 %oldval, 4294901760 ; = 0xffff0000
+
+ %newval = load volatile i64* %new
+ %newval_masked = and i64 %newval, 65535 ; = 0xffff
+
+ %combined = or i64 %oldval_keep, %newval_masked
+ store volatile i64 %combined, i64* %existing
+
+ ret void
+}
+
+define void @test_32bit_masked(i32 *%existing, i32 *%new) {
+; CHECK: test_32bit_masked:
+; CHECK: bfi [[INSERT:w[0-9]+]], {{w[0-9]+}}, #3, #4
+; CHECK: and {{w[0-9]+}}, [[INSERT]], #0xff
+
+ %oldval = load volatile i32* %existing
+ %oldval_keep = and i32 %oldval, 135 ; = 0x87
+
+ %newval = load volatile i32* %new
+ %newval_shifted = shl i32 %newval, 3
+ %newval_masked = and i32 %newval_shifted, 120 ; = 0x78
+
+ %combined = or i32 %oldval_keep, %newval_masked
+ store volatile i32 %combined, i32* %existing
+
+ ret void
+}
+
+define void @test_64bit_masked(i64 *%existing, i64 *%new) {
+; CHECK: test_64bit_masked:
+; CHECK: bfi [[INSERT:x[0-9]+]], {{x[0-9]+}}, #40, #8
+; CHECK: and {{x[0-9]+}}, [[INSERT]], #0xffff00000000
+
+ %oldval = load volatile i64* %existing
+ %oldval_keep = and i64 %oldval, 1095216660480 ; = 0xff_0000_0000
+
+ %newval = load volatile i64* %new
+ %newval_shifted = shl i64 %newval, 40
+ %newval_masked = and i64 %newval_shifted, 280375465082880 ; = 0xff00_0000_0000
+
+ %combined = or i64 %newval_masked, %oldval_keep
+ store volatile i64 %combined, i64* %existing
+
+ ret void
+}
+
+; Mask is too complicated for literal ANDwwi, make sure other avenues are tried.
+define void @test_32bit_complexmask(i32 *%existing, i32 *%new) {
+; CHECK: test_32bit_complexmask:
+; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #3, #4
+; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+
+ %oldval = load volatile i32* %existing
+ %oldval_keep = and i32 %oldval, 647 ; = 0x287
+
+ %newval = load volatile i32* %new
+ %newval_shifted = shl i32 %newval, 3
+ %newval_masked = and i32 %newval_shifted, 120 ; = 0x278
+
+ %combined = or i32 %oldval_keep, %newval_masked
+ store volatile i32 %combined, i32* %existing
+
+ ret void
+}
+
+; Neither mask is is a contiguous set of 1s. BFI can't be used
+define void @test_32bit_badmask(i32 *%existing, i32 *%new) {
+; CHECK: test_32bit_badmask:
+; CHECK-NOT: bfi
+; CHECK: ret
+
+ %oldval = load volatile i32* %existing
+ %oldval_keep = and i32 %oldval, 135 ; = 0x87
+
+ %newval = load volatile i32* %new
+ %newval_shifted = shl i32 %newval, 3
+ %newval_masked = and i32 %newval_shifted, 632 ; = 0x278
+
+ %combined = or i32 %oldval_keep, %newval_masked
+ store volatile i32 %combined, i32* %existing
+
+ ret void
+}
+
+; Ditto
+define void @test_64bit_badmask(i64 *%existing, i64 *%new) {
+; CHECK: test_64bit_badmask:
+; CHECK-NOT: bfi
+; CHECK: ret
+
+ %oldval = load volatile i64* %existing
+ %oldval_keep = and i64 %oldval, 135 ; = 0x87
+
+ %newval = load volatile i64* %new
+ %newval_shifted = shl i64 %newval, 3
+ %newval_masked = and i64 %newval_shifted, 664 ; = 0x278
+
+ %combined = or i64 %oldval_keep, %newval_masked
+ store volatile i64 %combined, i64* %existing
+
+ ret void
+}
+
+; Bitfield insert where there's a left-over shr needed at the beginning
+; (e.g. result of str.bf1 = str.bf2)
+define void @test_32bit_with_shr(i32* %existing, i32* %new) {
+; CHECK: test_32bit_with_shr:
+
+ %oldval = load volatile i32* %existing
+ %oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff
+
+ %newval = load i32* %new
+ %newval_shifted = shl i32 %newval, 12
+ %newval_masked = and i32 %newval_shifted, 2080374784 ; = 0x7c000000
+
+ %combined = or i32 %oldval_keep, %newval_masked
+ store volatile i32 %combined, i32* %existing
+; CHECK: lsr [[BIT:w[0-9]+]], {{w[0-9]+}}, #14
+; CHECK: bfi {{w[0-9]}}, [[BIT]], #26, #5
+
+ ret void
+}
+
diff --git a/test/CodeGen/AArch64/bitfield.ll b/test/CodeGen/AArch64/bitfield.ll
new file mode 100644
index 0000000000..06a296ef27
--- /dev/null
+++ b/test/CodeGen/AArch64/bitfield.ll
@@ -0,0 +1,218 @@
+
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @test_extendb(i8 %var) {
+; CHECK: test_extendb:
+
+ %sxt32 = sext i8 %var to i32
+ store volatile i32 %sxt32, i32* @var32
+; CHECK: sxtb {{w[0-9]+}}, {{w[0-9]+}}
+
+ %sxt64 = sext i8 %var to i64
+ store volatile i64 %sxt64, i64* @var64
+; CHECK: sxtb {{x[0-9]+}}, {{w[0-9]+}}
+
+; N.b. this doesn't actually produce a bitfield instruction at the
+; moment, but it's still a good test to have and the semantics are
+; correct.
+ %uxt32 = zext i8 %var to i32
+ store volatile i32 %uxt32, i32* @var32
+; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xff
+
+ %uxt64 = zext i8 %var to i64
+ store volatile i64 %uxt64, i64* @var64
+; CHECK: uxtb {{x[0-9]+}}, {{w[0-9]+}}
+ ret void
+}
+
+define void @test_extendh(i16 %var) {
+; CHECK: test_extendh:
+
+ %sxt32 = sext i16 %var to i32
+ store volatile i32 %sxt32, i32* @var32
+; CHECK: sxth {{w[0-9]+}}, {{w[0-9]+}}
+
+ %sxt64 = sext i16 %var to i64
+ store volatile i64 %sxt64, i64* @var64
+; CHECK: sxth {{x[0-9]+}}, {{w[0-9]+}}
+
+; N.b. this doesn't actually produce a bitfield instruction at the
+; moment, but it's still a good test to have and the semantics are
+; correct.
+ %uxt32 = zext i16 %var to i32
+ store volatile i32 %uxt32, i32* @var32
+; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xffff
+
+ %uxt64 = zext i16 %var to i64
+ store volatile i64 %uxt64, i64* @var64
+; CHECK: uxth {{x[0-9]+}}, {{w[0-9]+}}
+ ret void
+}
+
+define void @test_extendw(i32 %var) {
+; CHECK: test_extendw:
+
+ %sxt64 = sext i32 %var to i64
+ store volatile i64 %sxt64, i64* @var64
+; CHECK: sxtw {{x[0-9]+}}, {{w[0-9]+}}
+
+ %uxt64 = zext i32 %var to i64
+ store volatile i64 %uxt64, i64* @var64
+; CHECK: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #0, #32
+ ret void
+}
+
+define void @test_shifts(i32 %val32, i64 %val64) {
+; CHECK: test_shifts:
+
+ %shift1 = ashr i32 %val32, 31
+ store volatile i32 %shift1, i32* @var32
+; CHECK: asr {{w[0-9]+}}, {{w[0-9]+}}, #31
+
+ %shift2 = lshr i32 %val32, 8
+ store volatile i32 %shift2, i32* @var32
+; CHECK: lsr {{w[0-9]+}}, {{w[0-9]+}}, #8
+
+ %shift3 = shl i32 %val32, 1
+ store volatile i32 %shift3, i32* @var32
+; CHECK: lsl {{w[0-9]+}}, {{w[0-9]+}}, #1
+
+ %shift4 = ashr i64 %val64, 31
+ store volatile i64 %shift4, i64* @var64
+; CHECK: asr {{x[0-9]+}}, {{x[0-9]+}}, #31
+
+ %shift5 = lshr i64 %val64, 8
+ store volatile i64 %shift5, i64* @var64
+; CHECK: lsr {{x[0-9]+}}, {{x[0-9]+}}, #8
+
+ %shift6 = shl i64 %val64, 63
+ store volatile i64 %shift6, i64* @var64
+; CHECK: lsl {{x[0-9]+}}, {{x[0-9]+}}, #63
+
+ %shift7 = ashr i64 %val64, 63
+ store volatile i64 %shift7, i64* @var64
+; CHECK: asr {{x[0-9]+}}, {{x[0-9]+}}, #63
+
+ %shift8 = lshr i64 %val64, 63
+ store volatile i64 %shift8, i64* @var64
+; CHECK: lsr {{x[0-9]+}}, {{x[0-9]+}}, #63
+
+ %shift9 = lshr i32 %val32, 31
+ store volatile i32 %shift9, i32* @var32
+; CHECK: lsr {{w[0-9]+}}, {{w[0-9]+}}, #31
+
+ %shift10 = shl i32 %val32, 31
+ store volatile i32 %shift10, i32* @var32
+; CHECK: lsl {{w[0-9]+}}, {{w[0-9]+}}, #31
+
+ ret void
+}
+
+; LLVM can produce in-register extensions taking place entirely with
+; 64-bit registers too.
+define void @test_sext_inreg_64(i64 %in) {
+; CHECK: test_sext_inreg_64:
+
+; i1 doesn't have an official alias, but crops up and is handled by
+; the bitfield ops.
+ %trunc_i1 = trunc i64 %in to i1
+ %sext_i1 = sext i1 %trunc_i1 to i64
+ store volatile i64 %sext_i1, i64* @var64
+; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #1
+
+ %trunc_i8 = trunc i64 %in to i8
+ %sext_i8 = sext i8 %trunc_i8 to i64
+ store volatile i64 %sext_i8, i64* @var64
+; CHECK: sxtb {{x[0-9]+}}, {{w[0-9]+}}
+
+ %trunc_i16 = trunc i64 %in to i16
+ %sext_i16 = sext i16 %trunc_i16 to i64
+ store volatile i64 %sext_i16, i64* @var64
+; CHECK: sxth {{x[0-9]+}}, {{w[0-9]+}}
+
+ %trunc_i32 = trunc i64 %in to i32
+ %sext_i32 = sext i32 %trunc_i32 to i64
+ store volatile i64 %sext_i32, i64* @var64
+; CHECK: sxtw {{x[0-9]+}}, {{w[0-9]+}}
+ ret void
+}
+
+; These instructions don't actually select to official bitfield
+; operations, but it's important that we select them somehow:
+define void @test_zext_inreg_64(i64 %in) {
+; CHECK: test_zext_inreg_64:
+
+ %trunc_i8 = trunc i64 %in to i8
+ %zext_i8 = zext i8 %trunc_i8 to i64
+ store volatile i64 %zext_i8, i64* @var64
+; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xff
+
+ %trunc_i16 = trunc i64 %in to i16
+ %zext_i16 = zext i16 %trunc_i16 to i64
+ store volatile i64 %zext_i16, i64* @var64
+; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffff
+
+ %trunc_i32 = trunc i64 %in to i32
+ %zext_i32 = zext i32 %trunc_i32 to i64
+ store volatile i64 %zext_i32, i64* @var64
+; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffffffff
+
+ ret void
+}
+
+define i64 @test_sext_inreg_from_32(i32 %in) {
+; CHECK: test_sext_inreg_from_32:
+
+ %small = trunc i32 %in to i1
+ %ext = sext i1 %small to i64
+
+ ; Different registers are of course, possible, though suboptimal. This is
+ ; making sure that a 64-bit "(sext_inreg (anyext GPR32), i1)" uses the 64-bit
+ ; sbfx rather than just 32-bits.
+; CHECK: sbfx x0, x0, #0, #1
+ ret i64 %ext
+}
+
+
+define i32 @test_ubfx32(i32* %addr) {
+; CHECK: test_ubfx32:
+; CHECK: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #23, #3
+
+ %fields = load i32* %addr
+ %shifted = lshr i32 %fields, 23
+ %masked = and i32 %shifted, 7
+ ret i32 %masked
+}
+
+define i64 @test_ubfx64(i64* %addr) {
+; CHECK: test_ubfx64:
+; CHECK: ubfx {{x[0-9]+}}, {{x[0-9]+}}, #25, #10
+
+ %fields = load i64* %addr
+ %shifted = lshr i64 %fields, 25
+ %masked = and i64 %shifted, 1023
+ ret i64 %masked
+}
+
+define i32 @test_sbfx32(i32* %addr) {
+; CHECK: test_sbfx32:
+; CHECK: sbfx {{w[0-9]+}}, {{w[0-9]+}}, #6, #3
+
+ %fields = load i32* %addr
+ %shifted = shl i32 %fields, 23
+ %extended = ashr i32 %shifted, 29
+ ret i32 %extended
+}
+
+define i64 @test_sbfx64(i64* %addr) {
+; CHECK: test_sbfx64:
+; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #63
+
+ %fields = load i64* %addr
+ %shifted = shl i64 %fields, 1
+ %extended = ashr i64 %shifted, 1
+ ret i64 %extended
+}
diff --git a/test/CodeGen/AArch64/blockaddress.ll b/test/CodeGen/AArch64/blockaddress.ll
new file mode 100644
index 0000000000..a7de51d458
--- /dev/null
+++ b/test/CodeGen/AArch64/blockaddress.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=aarch64 -verify-machineinstrs < %s | FileCheck %s
+
+@addr = global i8* null
+
+define void @test_blockaddress() {
+; CHECK: test_blockaddress:
+ store volatile i8* blockaddress(@test_blockaddress, %block), i8** @addr
+ %val = load volatile i8** @addr
+ indirectbr i8* %val, [label %block]
+; CHECK: adrp [[DEST_HI:x[0-9]+]], [[DEST_LBL:.Ltmp[0-9]+]]
+; CHECK: add [[DEST:x[0-9]+]], [[DEST_HI]], #:lo12:[[DEST_LBL]]
+; CHECK: str [[DEST]],
+; CHECK: ldr [[NEWDEST:x[0-9]+]]
+; CHECK: br [[NEWDEST]]
+
+block:
+ ret void
+}
diff --git a/test/CodeGen/AArch64/bool-loads.ll b/test/CodeGen/AArch64/bool-loads.ll
new file mode 100644
index 0000000000..43d030f80d
--- /dev/null
+++ b/test/CodeGen/AArch64/bool-loads.ll
@@ -0,0 +1,55 @@
+; RUN: llc -march=aarch64 < %s | FileCheck %s
+
+@var = global i1 0
+
+define i32 @test_sextloadi32() {
+; CHECK: test_sextloadi32
+
+ %val = load i1* @var
+ %ret = sext i1 %val to i32
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var]
+; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #1
+
+ ret i32 %ret
+; CHECK: ret
+}
+
+define i64 @test_sextloadi64() {
+; CHECK: test_sextloadi64
+
+ %val = load i1* @var
+ %ret = sext i1 %val to i64
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var]
+; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #1
+
+ ret i64 %ret
+; CHECK: ret
+}
+
+define i32 @test_zextloadi32() {
+; CHECK: test_zextloadi32
+
+; It's not actually necessary that "ret" is next, but as far as LLVM
+; is concerned only 0 or 1 should be loadable so no extension is
+; necessary.
+ %val = load i1* @var
+ %ret = zext i1 %val to i32
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var]
+
+ ret i32 %ret
+; CHECK-NEXT: ret
+}
+
+define i64 @test_zextloadi64() {
+; CHECK: test_zextloadi64
+
+; It's not actually necessary that "ret" is next, but as far as LLVM
+; is concerned only 0 or 1 should be loadable so no extension is
+; necessary.
+ %val = load i1* @var
+ %ret = zext i1 %val to i64
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var]
+
+ ret i64 %ret
+; CHECK-NEXT: ret
+}
diff --git a/test/CodeGen/AArch64/breg.ll b/test/CodeGen/AArch64/breg.ll
new file mode 100644
index 0000000000..fc490610e0
--- /dev/null
+++ b/test/CodeGen/AArch64/breg.ll
@@ -0,0 +1,17 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@stored_label = global i8* null
+
+define void @foo() {
+; CHECK: foo:
+ %lab = load i8** @stored_label
+ indirectbr i8* %lab, [label %otherlab, label %retlab]
+; CHECK: adrp {{x[0-9]+}}, stored_label
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:stored_label]
+; CHECK: br {{x[0-9]+}}
+
+otherlab:
+ ret void
+retlab:
+ ret void
+}
diff --git a/test/CodeGen/AArch64/callee-save.ll b/test/CodeGen/AArch64/callee-save.ll
new file mode 100644
index 0000000000..953dbc433f
--- /dev/null
+++ b/test/CodeGen/AArch64/callee-save.ll
@@ -0,0 +1,86 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var = global float 0.0
+
+define void @foo() {
+; CHECK: foo:
+
+; CHECK stp d14, d15, [sp
+; CHECK stp d12, d13, [sp
+; CHECK stp d10, d11, [sp
+; CHECK stp d8, d9, [sp
+
+ ; Create lots of live variables to exhaust the supply of
+ ; caller-saved registers
+ %val1 = load volatile float* @var
+ %val2 = load volatile float* @var
+ %val3 = load volatile float* @var
+ %val4 = load volatile float* @var
+ %val5 = load volatile float* @var
+ %val6 = load volatile float* @var
+ %val7 = load volatile float* @var
+ %val8 = load volatile float* @var
+ %val9 = load volatile float* @var
+ %val10 = load volatile float* @var
+ %val11 = load volatile float* @var
+ %val12 = load volatile float* @var
+ %val13 = load volatile float* @var
+ %val14 = load volatile float* @var
+ %val15 = load volatile float* @var
+ %val16 = load volatile float* @var
+ %val17 = load volatile float* @var
+ %val18 = load volatile float* @var
+ %val19 = load volatile float* @var
+ %val20 = load volatile float* @var
+ %val21 = load volatile float* @var
+ %val22 = load volatile float* @var
+ %val23 = load volatile float* @var
+ %val24 = load volatile float* @var
+ %val25 = load volatile float* @var
+ %val26 = load volatile float* @var
+ %val27 = load volatile float* @var
+ %val28 = load volatile float* @var
+ %val29 = load volatile float* @var
+ %val30 = load volatile float* @var
+ %val31 = load volatile float* @var
+ %val32 = load volatile float* @var
+
+ store volatile float %val1, float* @var
+ store volatile float %val2, float* @var
+ store volatile float %val3, float* @var
+ store volatile float %val4, float* @var
+ store volatile float %val5, float* @var
+ store volatile float %val6, float* @var
+ store volatile float %val7, float* @var
+ store volatile float %val8, float* @var
+ store volatile float %val9, float* @var
+ store volatile float %val10, float* @var
+ store volatile float %val11, float* @var
+ store volatile float %val12, float* @var
+ store volatile float %val13, float* @var
+ store volatile float %val14, float* @var
+ store volatile float %val15, float* @var
+ store volatile float %val16, float* @var
+ store volatile float %val17, float* @var
+ store volatile float %val18, float* @var
+ store volatile float %val19, float* @var
+ store volatile float %val20, float* @var
+ store volatile float %val21, float* @var
+ store volatile float %val22, float* @var
+ store volatile float %val23, float* @var
+ store volatile float %val24, float* @var
+ store volatile float %val25, float* @var
+ store volatile float %val26, float* @var
+ store volatile float %val27, float* @var
+ store volatile float %val28, float* @var
+ store volatile float %val29, float* @var
+ store volatile float %val30, float* @var
+ store volatile float %val31, float* @var
+ store volatile float %val32, float* @var
+
+; CHECK: ldp d8, d9, [sp
+; CHECK: ldp d10, d11, [sp
+; CHECK: ldp d12, d13, [sp
+; CHECK: ldp d14, d15, [sp
+ ret void
+}
diff --git a/test/CodeGen/AArch64/compare-branch.ll b/test/CodeGen/AArch64/compare-branch.ll
new file mode 100644
index 0000000000..52a0d5d92a
--- /dev/null
+++ b/test/CodeGen/AArch64/compare-branch.ll
@@ -0,0 +1,38 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @foo() {
+; CHECK: foo:
+
+ %val1 = load volatile i32* @var32
+ %tst1 = icmp eq i32 %val1, 0
+ br i1 %tst1, label %end, label %test2
+; CHECK: cbz {{w[0-9]+}}, .LBB
+
+test2:
+ %val2 = load volatile i32* @var32
+ %tst2 = icmp ne i32 %val2, 0
+ br i1 %tst2, label %end, label %test3
+; CHECK: cbnz {{w[0-9]+}}, .LBB
+
+test3:
+ %val3 = load volatile i64* @var64
+ %tst3 = icmp eq i64 %val3, 0
+ br i1 %tst3, label %end, label %test4
+; CHECK: cbz {{x[0-9]+}}, .LBB
+
+test4:
+ %val4 = load volatile i64* @var64
+ %tst4 = icmp ne i64 %val4, 0
+ br i1 %tst4, label %end, label %test5
+; CHECK: cbnz {{x[0-9]+}}, .LBB
+
+test5:
+ store volatile i64 %val4, i64* @var64
+ ret void
+
+end:
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/cond-sel.ll b/test/CodeGen/AArch64/cond-sel.ll
new file mode 100644
index 0000000000..9ca7997a14
--- /dev/null
+++ b/test/CodeGen/AArch64/cond-sel.ll
@@ -0,0 +1,213 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
+; CHECK: test_csel:
+
+ %tst1 = icmp ugt i32 %lhs32, %rhs32
+ %val1 = select i1 %tst1, i32 42, i32 52
+ store i32 %val1, i32* @var32
+; CHECK: movz [[W52:w[0-9]+]], #52
+; CHECK: movz [[W42:w[0-9]+]], #42
+; CHECK: csel {{w[0-9]+}}, [[W42]], [[W52]], hi
+
+ %rhs64 = sext i32 %rhs32 to i64
+ %tst2 = icmp sle i64 %lhs64, %rhs64
+ %val2 = select i1 %tst2, i64 %lhs64, i64 %rhs64
+ store i64 %val2, i64* @var64
+; CHECK: cmp [[LHS:x[0-9]+]], [[RHS:w[0-9]+]], sxtw
+; CHECK: sxtw [[EXT_RHS:x[0-9]+]], [[RHS]]
+; CHECK: csel {{x[0-9]+}}, [[LHS]], [[EXT_RHS]], le
+
+ ret void
+; CHECK: ret
+}
+
+define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %rhs64) {
+; CHECK: test_floatcsel:
+
+ %tst1 = fcmp one float %lhs32, %rhs32
+; CHECK: fcmp {{s[0-9]+}}, {{s[0-9]+}}
+ %val1 = select i1 %tst1, i32 42, i32 52
+ store i32 %val1, i32* @var32
+; CHECK: movz [[W52:w[0-9]+]], #52
+; CHECK: movz [[W42:w[0-9]+]], #42
+; CHECK: csel [[MAYBETRUE:w[0-9]+]], [[W42]], [[W52]], mi
+; CHECK: csel {{w[0-9]+}}, [[W42]], [[MAYBETRUE]], gt
+
+
+ %tst2 = fcmp ueq double %lhs64, %rhs64
+; CHECK: fcmp {{d[0-9]+}}, {{d[0-9]+}}
+ %val2 = select i1 %tst2, i64 9, i64 15
+ store i64 %val2, i64* @var64
+; CHECK: movz [[CONST15:x[0-9]+]], #15
+; CHECK: movz [[CONST9:x[0-9]+]], #9
+; CHECK: csel [[MAYBETRUE:x[0-9]+]], [[CONST9]], [[CONST15]], eq
+; CHECK: csel {{x[0-9]+}}, [[CONST9]], [[MAYBETRUE]], vs
+
+ ret void
+; CHECK: ret
+}
+
+
+define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
+; CHECK: test_csinc:
+
+; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
+ %tst1 = icmp ugt i32 %lhs32, %rhs32
+ %inc1 = add i32 %rhs32, 1
+ %val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
+ store volatile i32 %val1, i32* @var32
+; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
+; CHECK: csinc {{w[0-9]+}}, [[LHS]], [[RHS]], ls
+
+ %rhs2 = add i32 %rhs32, 42
+ %tst2 = icmp sle i32 %lhs32, %rhs2
+ %inc2 = add i32 %rhs32, 1
+ %val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
+ store volatile i32 %val2, i32* @var32
+; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
+; CHECK: csinc {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
+
+; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
+ %rhs3 = sext i32 %rhs32 to i64
+ %tst3 = icmp ugt i64 %lhs64, %rhs3
+ %inc3 = add i64 %rhs3, 1
+ %val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
+ store volatile i64 %val3, i64* @var64
+; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
+; CHECK: csinc {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
+
+ %rhs4 = zext i32 %rhs32 to i64
+ %tst4 = icmp sle i64 %lhs64, %rhs4
+ %inc4 = add i64 %rhs4, 1
+ %val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
+ store volatile i64 %val4, i64* @var64
+; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
+; CHECK: csinc {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
+
+ ret void
+; CHECK: ret
+}
+
+define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
+; CHECK: test_csinv:
+
+; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
+ %tst1 = icmp ugt i32 %lhs32, %rhs32
+ %inc1 = xor i32 -1, %rhs32
+ %val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
+ store volatile i32 %val1, i32* @var32
+; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
+; CHECK: csinv {{w[0-9]+}}, [[LHS]], [[RHS]], ls
+
+ %rhs2 = add i32 %rhs32, 42
+ %tst2 = icmp sle i32 %lhs32, %rhs2
+ %inc2 = xor i32 -1, %rhs32
+ %val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
+ store volatile i32 %val2, i32* @var32
+; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
+; CHECK: csinv {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
+
+; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
+ %rhs3 = sext i32 %rhs32 to i64
+ %tst3 = icmp ugt i64 %lhs64, %rhs3
+ %inc3 = xor i64 -1, %rhs3
+ %val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
+ store volatile i64 %val3, i64* @var64
+; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
+; CHECK: csinv {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
+
+ %rhs4 = zext i32 %rhs32 to i64
+ %tst4 = icmp sle i64 %lhs64, %rhs4
+ %inc4 = xor i64 -1, %rhs4
+ %val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
+ store volatile i64 %val4, i64* @var64
+; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
+; CHECK: csinv {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
+
+ ret void
+; CHECK: ret
+}
+
+define void @test_csneg(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
+; CHECK: test_csneg:
+
+; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
+ %tst1 = icmp ugt i32 %lhs32, %rhs32
+ %inc1 = sub i32 0, %rhs32
+ %val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
+ store volatile i32 %val1, i32* @var32
+; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
+; CHECK: csneg {{w[0-9]+}}, [[LHS]], [[RHS]], ls
+
+ %rhs2 = add i32 %rhs32, 42
+ %tst2 = icmp sle i32 %lhs32, %rhs2
+ %inc2 = sub i32 0, %rhs32
+ %val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
+ store volatile i32 %val2, i32* @var32
+; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
+; CHECK: csneg {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
+
+; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
+ %rhs3 = sext i32 %rhs32 to i64
+ %tst3 = icmp ugt i64 %lhs64, %rhs3
+ %inc3 = sub i64 0, %rhs3
+ %val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
+ store volatile i64 %val3, i64* @var64
+; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
+; CHECK: csneg {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
+
+ %rhs4 = zext i32 %rhs32 to i64
+ %tst4 = icmp sle i64 %lhs64, %rhs4
+ %inc4 = sub i64 0, %rhs4
+ %val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
+ store volatile i64 %val4, i64* @var64
+; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
+; CHECK: csneg {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
+
+ ret void
+; CHECK: ret
+}
+
+define void @test_cset(i32 %lhs, i32 %rhs, i64 %lhs64) {
+; CHECK: test_cset:
+
+; N.b. code is not optimal here (32-bit csinc would be better) but
+; incoming DAG is too complex
+ %tst1 = icmp eq i32 %lhs, %rhs
+ %val1 = zext i1 %tst1 to i32
+ store i32 %val1, i32* @var32
+; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: csinc {{w[0-9]+}}, wzr, wzr, ne
+
+ %rhs64 = sext i32 %rhs to i64
+ %tst2 = icmp ule i64 %lhs64, %rhs64
+ %val2 = zext i1 %tst2 to i64
+ store i64 %val2, i64* @var64
+; CHECK: csinc {{w[0-9]+}}, wzr, wzr, hi
+
+ ret void
+; CHECK: ret
+}
+
+define void @test_csetm(i32 %lhs, i32 %rhs, i64 %lhs64) {
+; CHECK: test_csetm:
+
+ %tst1 = icmp eq i32 %lhs, %rhs
+ %val1 = sext i1 %tst1 to i32
+ store i32 %val1, i32* @var32
+; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: csinv {{w[0-9]+}}, wzr, wzr, ne
+
+ %rhs64 = sext i32 %rhs to i64
+ %tst2 = icmp ule i64 %lhs64, %rhs64
+ %val2 = sext i1 %tst2 to i64
+ store i64 %val2, i64* @var64
+; CHECK: csinv {{x[0-9]+}}, xzr, xzr, hi
+
+ ret void
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/directcond.ll b/test/CodeGen/AArch64/directcond.ll
new file mode 100644
index 0000000000..3741011e38
--- /dev/null
+++ b/test/CodeGen/AArch64/directcond.ll
@@ -0,0 +1,84 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+define i32 @test_select_i32(i1 %bit, i32 %a, i32 %b) {
+; CHECK: test_select_i32:
+ %val = select i1 %bit, i32 %a, i32 %b
+; CHECK: movz [[ONE:w[0-9]+]], #1
+; CHECK: tst w0, [[ONE]]
+; CHECK-NEXT: csel w0, w1, w2, ne
+
+ ret i32 %val
+}
+
+define i64 @test_select_i64(i1 %bit, i64 %a, i64 %b) {
+; CHECK: test_select_i64:
+ %val = select i1 %bit, i64 %a, i64 %b
+; CHECK: movz [[ONE:w[0-9]+]], #1
+; CHECK: tst w0, [[ONE]]
+; CHECK-NEXT: csel x0, x1, x2, ne
+
+ ret i64 %val
+}
+
+define float @test_select_float(i1 %bit, float %a, float %b) {
+; CHECK: test_select_float:
+ %val = select i1 %bit, float %a, float %b
+; CHECK: movz [[ONE:w[0-9]+]], #1
+; CHECK: tst w0, [[ONE]]
+; CHECK-NEXT: fcsel s0, s0, s1, ne
+
+ ret float %val
+}
+
+define double @test_select_double(i1 %bit, double %a, double %b) {
+; CHECK: test_select_double:
+ %val = select i1 %bit, double %a, double %b
+; CHECK: movz [[ONE:w[0-9]+]], #1
+; CHECK: tst w0, [[ONE]]
+; CHECK-NEXT: fcsel d0, d0, d1, ne
+
+ ret double %val
+}
+
+define i32 @test_brcond(i1 %bit) {
+; CHECK: test_brcond:
+ br i1 %bit, label %true, label %false
+; CHECK: tbz {{w[0-9]+}}, #0, .LBB
+
+true:
+ ret i32 0
+false:
+ ret i32 42
+}
+
+define i1 @test_setcc_float(float %lhs, float %rhs) {
+; CHECK: test_setcc_float
+ %val = fcmp oeq float %lhs, %rhs
+; CHECK: fcmp s0, s1
+; CHECK: csinc w0, wzr, wzr, ne
+ ret i1 %val
+}
+
+define i1 @test_setcc_double(double %lhs, double %rhs) {
+; CHECK: test_setcc_double
+ %val = fcmp oeq double %lhs, %rhs
+; CHECK: fcmp d0, d1
+; CHECK: csinc w0, wzr, wzr, ne
+ ret i1 %val
+}
+
+define i1 @test_setcc_i32(i32 %lhs, i32 %rhs) {
+; CHECK: test_setcc_i32
+ %val = icmp ugt i32 %lhs, %rhs
+; CHECK: cmp w0, w1
+; CHECK: csinc w0, wzr, wzr, ls
+ ret i1 %val
+}
+
+define i1 @test_setcc_i64(i64 %lhs, i64 %rhs) {
+; CHECK: test_setcc_i64
+ %val = icmp ne i64 %lhs, %rhs
+; CHECK: cmp x0, x1
+; CHECK: csinc w0, wzr, wzr, eq
+ ret i1 %val
+}
diff --git a/test/CodeGen/AArch64/dp-3source.ll b/test/CodeGen/AArch64/dp-3source.ll
new file mode 100644
index 0000000000..1553cc08c5
--- /dev/null
+++ b/test/CodeGen/AArch64/dp-3source.ll
@@ -0,0 +1,163 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+define i32 @test_madd32(i32 %val0, i32 %val1, i32 %val2) {
+; CHECK: test_madd32:
+ %mid = mul i32 %val1, %val2
+ %res = add i32 %val0, %mid
+; CHECK: madd {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ ret i32 %res
+}
+
+define i64 @test_madd64(i64 %val0, i64 %val1, i64 %val2) {
+; CHECK: test_madd64:
+ %mid = mul i64 %val1, %val2
+ %res = add i64 %val0, %mid
+; CHECK: madd {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ ret i64 %res
+}
+
+define i32 @test_msub32(i32 %val0, i32 %val1, i32 %val2) {
+; CHECK: test_msub32:
+ %mid = mul i32 %val1, %val2
+ %res = sub i32 %val0, %mid
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ ret i32 %res
+}
+
+define i64 @test_msub64(i64 %val0, i64 %val1, i64 %val2) {
+; CHECK: test_msub64:
+ %mid = mul i64 %val1, %val2
+ %res = sub i64 %val0, %mid
+; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_smaddl(i64 %acc, i32 %val1, i32 %val2) {
+; CHECK: test_smaddl:
+ %ext1 = sext i32 %val1 to i64
+ %ext2 = sext i32 %val2 to i64
+ %prod = mul i64 %ext1, %ext2
+ %res = add i64 %acc, %prod
+; CHECK: smaddl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{x[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_smsubl(i64 %acc, i32 %val1, i32 %val2) {
+; CHECK: test_smsubl:
+ %ext1 = sext i32 %val1 to i64
+ %ext2 = sext i32 %val2 to i64
+ %prod = mul i64 %ext1, %ext2
+ %res = sub i64 %acc, %prod
+; CHECK: smsubl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{x[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_umaddl(i64 %acc, i32 %val1, i32 %val2) {
+; CHECK: test_umaddl:
+ %ext1 = zext i32 %val1 to i64
+ %ext2 = zext i32 %val2 to i64
+ %prod = mul i64 %ext1, %ext2
+ %res = add i64 %acc, %prod
+; CHECK: umaddl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{x[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_umsubl(i64 %acc, i32 %val1, i32 %val2) {
+; CHECK: test_umsubl:
+ %ext1 = zext i32 %val1 to i64
+ %ext2 = zext i32 %val2 to i64
+ %prod = mul i64 %ext1, %ext2
+ %res = sub i64 %acc, %prod
+; CHECK: umsubl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{x[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_smulh(i64 %lhs, i64 %rhs) {
+; CHECK: test_smulh:
+ %ext1 = sext i64 %lhs to i128
+ %ext2 = sext i64 %rhs to i128
+ %res = mul i128 %ext1, %ext2
+ %high = lshr i128 %res, 64
+ %val = trunc i128 %high to i64
+; CHECK: smulh {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ ret i64 %val
+}
+
+define i64 @test_umulh(i64 %lhs, i64 %rhs) {
+; CHECK: test_umulh:
+ %ext1 = zext i64 %lhs to i128
+ %ext2 = zext i64 %rhs to i128
+ %res = mul i128 %ext1, %ext2
+ %high = lshr i128 %res, 64
+ %val = trunc i128 %high to i64
+; CHECK: umulh {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ ret i64 %val
+}
+
+define i32 @test_mul32(i32 %lhs, i32 %rhs) {
+; CHECK: test_mul32:
+ %res = mul i32 %lhs, %rhs
+; CHECK: mul {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ ret i32 %res
+}
+
+define i64 @test_mul64(i64 %lhs, i64 %rhs) {
+; CHECK: test_mul64:
+ %res = mul i64 %lhs, %rhs
+; CHECK: mul {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ ret i64 %res
+}
+
+define i32 @test_mneg32(i32 %lhs, i32 %rhs) {
+; CHECK: test_mneg32:
+ %prod = mul i32 %lhs, %rhs
+ %res = sub i32 0, %prod
+; CHECK: mneg {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ ret i32 %res
+}
+
+define i64 @test_mneg64(i64 %lhs, i64 %rhs) {
+; CHECK: test_mneg64:
+ %prod = mul i64 %lhs, %rhs
+ %res = sub i64 0, %prod
+; CHECK: mneg {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_smull(i32 %lhs, i32 %rhs) {
+; CHECK: test_smull:
+ %ext1 = sext i32 %lhs to i64
+ %ext2 = sext i32 %rhs to i64
+ %res = mul i64 %ext1, %ext2
+; CHECK: smull {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_umull(i32 %lhs, i32 %rhs) {
+; CHECK: test_umull:
+ %ext1 = zext i32 %lhs to i64
+ %ext2 = zext i32 %rhs to i64
+ %res = mul i64 %ext1, %ext2
+; CHECK: umull {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_smnegl(i32 %lhs, i32 %rhs) {
+; CHECK: test_smnegl:
+ %ext1 = sext i32 %lhs to i64
+ %ext2 = sext i32 %rhs to i64
+ %prod = mul i64 %ext1, %ext2
+ %res = sub i64 0, %prod
+; CHECK: smnegl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_umnegl(i32 %lhs, i32 %rhs) {
+; CHECK: test_umnegl:
+ %ext1 = zext i32 %lhs to i64
+ %ext2 = zext i32 %rhs to i64
+ %prod = mul i64 %ext1, %ext2
+ %res = sub i64 0, %prod
+; CHECK: umnegl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ ret i64 %res
+}
diff --git a/test/CodeGen/AArch64/dp1.ll b/test/CodeGen/AArch64/dp1.ll
new file mode 100644
index 0000000000..cfa82af7d4
--- /dev/null
+++ b/test/CodeGen/AArch64/dp1.ll
@@ -0,0 +1,152 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @rev_i32() {
+; CHECK: rev_i32:
+ %val0_tmp = load i32* @var32
+ %val1_tmp = call i32 @llvm.bswap.i32(i32 %val0_tmp)
+; CHECK: rev {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %val1_tmp, i32* @var32
+ ret void
+}
+
+define void @rev_i64() {
+; CHECK: rev_i64:
+ %val0_tmp = load i64* @var64
+ %val1_tmp = call i64 @llvm.bswap.i64(i64 %val0_tmp)
+; CHECK: rev {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %val1_tmp, i64* @var64
+ ret void
+}
+
+define void @rev32_i64() {
+; CHECK: rev32_i64:
+ %val0_tmp = load i64* @var64
+ %val1_tmp = shl i64 %val0_tmp, 32
+ %val5_tmp = sub i64 64, 32
+ %val2_tmp = lshr i64 %val0_tmp, %val5_tmp
+ %val3_tmp = or i64 %val1_tmp, %val2_tmp
+ %val4_tmp = call i64 @llvm.bswap.i64(i64 %val3_tmp)
+; CHECK: rev32 {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %val4_tmp, i64* @var64
+ ret void
+}
+
+define void @rev16_i32() {
+; CHECK: rev16_i32:
+ %val0_tmp = load i32* @var32
+ %val1_tmp = shl i32 %val0_tmp, 16
+ %val2_tmp = lshr i32 %val0_tmp, 16
+ %val3_tmp = or i32 %val1_tmp, %val2_tmp
+ %val4_tmp = call i32 @llvm.bswap.i32(i32 %val3_tmp)
+; CHECK: rev16 {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %val4_tmp, i32* @var32
+ ret void
+}
+
+define void @clz_zerodef_i32() {
+; CHECK: clz_zerodef_i32:
+ %val0_tmp = load i32* @var32
+ %val4_tmp = call i32 @llvm.ctlz.i32(i32 %val0_tmp, i1 0)
+; CHECK: clz {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %val4_tmp, i32* @var32
+ ret void
+}
+
+define void @clz_zerodef_i64() {
+; CHECK: clz_zerodef_i64:
+ %val0_tmp = load i64* @var64
+ %val4_tmp = call i64 @llvm.ctlz.i64(i64 %val0_tmp, i1 0)
+; CHECK: clz {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %val4_tmp, i64* @var64
+ ret void
+}
+
+define void @clz_zeroundef_i32() {
+; CHECK: clz_zeroundef_i32:
+ %val0_tmp = load i32* @var32
+ %val4_tmp = call i32 @llvm.ctlz.i32(i32 %val0_tmp, i1 1)
+; CHECK: clz {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %val4_tmp, i32* @var32
+ ret void
+}
+
+define void @clz_zeroundef_i64() {
+; CHECK: clz_zeroundef_i64:
+ %val0_tmp = load i64* @var64
+ %val4_tmp = call i64 @llvm.ctlz.i64(i64 %val0_tmp, i1 1)
+; CHECK: clz {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %val4_tmp, i64* @var64
+ ret void
+}
+
+define void @cttz_zerodef_i32() {
+; CHECK: cttz_zerodef_i32:
+ %val0_tmp = load i32* @var32
+ %val4_tmp = call i32 @llvm.cttz.i32(i32 %val0_tmp, i1 0)
+; CHECK: rbit [[REVERSED:w[0-9]+]], {{w[0-9]+}}
+; CHECK: clz {{w[0-9]+}}, [[REVERSED]]
+ store volatile i32 %val4_tmp, i32* @var32
+ ret void
+}
+
+define void @cttz_zerodef_i64() {
+; CHECK: cttz_zerodef_i64:
+ %val0_tmp = load i64* @var64
+ %val4_tmp = call i64 @llvm.cttz.i64(i64 %val0_tmp, i1 0)
+; CHECK: rbit [[REVERSED:x[0-9]+]], {{x[0-9]+}}
+; CHECK: clz {{x[0-9]+}}, [[REVERSED]]
+ store volatile i64 %val4_tmp, i64* @var64
+ ret void
+}
+
+define void @cttz_zeroundef_i32() {
+; CHECK: cttz_zeroundef_i32:
+ %val0_tmp = load i32* @var32
+ %val4_tmp = call i32 @llvm.cttz.i32(i32 %val0_tmp, i1 1)
+; CHECK: rbit [[REVERSED:w[0-9]+]], {{w[0-9]+}}
+; CHECK: clz {{w[0-9]+}}, [[REVERSED]]
+ store volatile i32 %val4_tmp, i32* @var32
+ ret void
+}
+
+define void @cttz_zeroundef_i64() {
+; CHECK: cttz_zeroundef_i64:
+ %val0_tmp = load i64* @var64
+ %val4_tmp = call i64 @llvm.cttz.i64(i64 %val0_tmp, i1 1)
+; CHECK: rbit [[REVERSED:x[0-9]+]], {{x[0-9]+}}
+; CHECK: clz {{x[0-9]+}}, [[REVERSED]]
+ store volatile i64 %val4_tmp, i64* @var64
+ ret void
+}
+
+; These two are just compilation tests really: the operation's set to Expand in
+; ISelLowering.
+define void @ctpop_i32() {
+; CHECK: ctpop_i32:
+ %val0_tmp = load i32* @var32
+ %val4_tmp = call i32 @llvm.ctpop.i32(i32 %val0_tmp)
+ store volatile i32 %val4_tmp, i32* @var32
+ ret void
+}
+
+define void @ctpop_i64() {
+; CHECK: ctpop_i64:
+ %val0_tmp = load i64* @var64
+ %val4_tmp = call i64 @llvm.ctpop.i64(i64 %val0_tmp)
+ store volatile i64 %val4_tmp, i64* @var64
+ ret void
+}
+
+
+declare i32 @llvm.bswap.i32(i32)
+declare i64 @llvm.bswap.i64(i64)
+declare i32 @llvm.ctlz.i32 (i32, i1)
+declare i64 @llvm.ctlz.i64 (i64, i1)
+declare i32 @llvm.cttz.i32 (i32, i1)
+declare i64 @llvm.cttz.i64 (i64, i1)
+declare i32 @llvm.ctpop.i32 (i32)
+declare i64 @llvm.ctpop.i64 (i64)
+
diff --git a/test/CodeGen/AArch64/dp2.ll b/test/CodeGen/AArch64/dp2.ll
new file mode 100644
index 0000000000..97f89be8d2
--- /dev/null
+++ b/test/CodeGen/AArch64/dp2.ll
@@ -0,0 +1,169 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var32_0 = global i32 0
+@var32_1 = global i32 0
+@var64_0 = global i64 0
+@var64_1 = global i64 0
+
+define void @rorv_i64() {
+; CHECK: rorv_i64:
+ %val0_tmp = load i64* @var64_0
+ %val1_tmp = load i64* @var64_1
+ %val2_tmp = sub i64 64, %val1_tmp
+ %val3_tmp = shl i64 %val0_tmp, %val2_tmp
+ %val4_tmp = lshr i64 %val0_tmp, %val1_tmp
+ %val5_tmp = or i64 %val3_tmp, %val4_tmp
+; CHECK: ror {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %val5_tmp, i64* @var64_0
+ ret void
+}
+
+define void @asrv_i64() {
+; CHECK: asrv_i64:
+ %val0_tmp = load i64* @var64_0
+ %val1_tmp = load i64* @var64_1
+ %val4_tmp = ashr i64 %val0_tmp, %val1_tmp
+; CHECK: asr {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %val4_tmp, i64* @var64_1
+ ret void
+}
+
+define void @lsrv_i64() {
+; CHECK: lsrv_i64:
+ %val0_tmp = load i64* @var64_0
+ %val1_tmp = load i64* @var64_1
+ %val4_tmp = lshr i64 %val0_tmp, %val1_tmp
+; CHECK: lsr {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %val4_tmp, i64* @var64_0
+ ret void
+}
+
+define void @lslv_i64() {
+; CHECK: lslv_i64:
+ %val0_tmp = load i64* @var64_0
+ %val1_tmp = load i64* @var64_1
+ %val4_tmp = shl i64 %val0_tmp, %val1_tmp
+; CHECK: lsl {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %val4_tmp, i64* @var64_1
+ ret void
+}
+
+define void @udiv_i64() {
+; CHECK: udiv_i64:
+ %val0_tmp = load i64* @var64_0
+ %val1_tmp = load i64* @var64_1
+ %val4_tmp = udiv i64 %val0_tmp, %val1_tmp
+; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %val4_tmp, i64* @var64_0
+ ret void
+}
+
+define void @sdiv_i64() {
+; CHECK: sdiv_i64:
+ %val0_tmp = load i64* @var64_0
+ %val1_tmp = load i64* @var64_1
+ %val4_tmp = sdiv i64 %val0_tmp, %val1_tmp
+; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %val4_tmp, i64* @var64_1
+ ret void
+}
+
+
+define void @lsrv_i32() {
+; CHECK: lsrv_i32:
+ %val0_tmp = load i32* @var32_0
+ %val1_tmp = load i32* @var32_1
+ %val2_tmp = add i32 1, %val1_tmp
+ %val4_tmp = lshr i32 %val0_tmp, %val2_tmp
+; CHECK: lsr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %val4_tmp, i32* @var32_0
+ ret void
+}
+
+define void @lslv_i32() {
+; CHECK: lslv_i32:
+ %val0_tmp = load i32* @var32_0
+ %val1_tmp = load i32* @var32_1
+ %val2_tmp = add i32 1, %val1_tmp
+ %val4_tmp = shl i32 %val0_tmp, %val2_tmp
+; CHECK: lsl {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %val4_tmp, i32* @var32_1
+ ret void
+}
+
+define void @rorv_i32() {
+; CHECK: rorv_i32:
+ %val0_tmp = load i32* @var32_0
+ %val6_tmp = load i32* @var32_1
+ %val1_tmp = add i32 1, %val6_tmp
+ %val2_tmp = sub i32 32, %val1_tmp
+ %val3_tmp = shl i32 %val0_tmp, %val2_tmp
+ %val4_tmp = lshr i32 %val0_tmp, %val1_tmp
+ %val5_tmp = or i32 %val3_tmp, %val4_tmp
+; CHECK: ror {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %val5_tmp, i32* @var32_0
+ ret void
+}
+
+define void @asrv_i32() {
+; CHECK: asrv_i32:
+ %val0_tmp = load i32* @var32_0
+ %val1_tmp = load i32* @var32_1
+ %val2_tmp = add i32 1, %val1_tmp
+ %val4_tmp = ashr i32 %val0_tmp, %val2_tmp
+; CHECK: asr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %val4_tmp, i32* @var32_1
+ ret void
+}
+
+define void @sdiv_i32() {
+; CHECK: sdiv_i32:
+ %val0_tmp = load i32* @var32_0
+ %val1_tmp = load i32* @var32_1
+ %val4_tmp = sdiv i32 %val0_tmp, %val1_tmp
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %val4_tmp, i32* @var32_1
+ ret void
+}
+
+define void @udiv_i32() {
+; CHECK: udiv_i32:
+ %val0_tmp = load i32* @var32_0
+ %val1_tmp = load i32* @var32_1
+ %val4_tmp = udiv i32 %val0_tmp, %val1_tmp
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %val4_tmp, i32* @var32_0
+ ret void
+}
+
+; The point of this test is that we may not actually see (shl GPR32:$Val, (zext GPR32:$Val2))
+; in the DAG (the RHS may be natively 64-bit), but we should still use the lsl instructions.
+define i32 @test_lsl32() {
+; CHECK: test_lsl32:
+
+ %val = load i32* @var32_0
+ %ret = shl i32 1, %val
+; CHECK: lsl {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+
+ ret i32 %ret
+}
+
+define i32 @test_lsr32() {
+; CHECK: test_lsr32:
+
+ %val = load i32* @var32_0
+ %ret = lshr i32 1, %val
+; CHECK: lsr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+
+ ret i32 %ret
+}
+
+define i32 @test_asr32(i32 %in) {
+; CHECK: test_asr32:
+
+ %val = load i32* @var32_0
+ %ret = ashr i32 %in, %val
+; CHECK: asr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+
+ ret i32 %ret
+}
diff --git a/test/CodeGen/AArch64/elf-extern.ll b/test/CodeGen/AArch64/elf-extern.ll
new file mode 100644
index 0000000000..544b9e7a76
--- /dev/null
+++ b/test/CodeGen/AArch64/elf-extern.ll
@@ -0,0 +1,21 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 -filetype=obj | elf-dump | FileCheck %s
+
+; External symbols are a different concept to global variables but should still
+; get relocations and so on when used.
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
+
+define i32 @check_extern() {
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* undef, i32 undef, i32 4, i1 0)
+ ret i32 0
+}
+
+; CHECK: .rela.text
+; CHECK: ('r_sym', 0x00000009)
+; CHECK-NEXT: ('r_type', 0x0000011b)
+
+; CHECK: .symtab
+; CHECK: Symbol 9
+; CHECK-NEXT: memcpy
+
+
diff --git a/test/CodeGen/AArch64/extract.ll b/test/CodeGen/AArch64/extract.ll
new file mode 100644
index 0000000000..0efd4472e0
--- /dev/null
+++ b/test/CodeGen/AArch64/extract.ll
@@ -0,0 +1,57 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+define i64 @ror_i64(i64 %in) {
+; CHECK: ror_i64:
+ %left = shl i64 %in, 19
+ %right = lshr i64 %in, 45
+ %val5 = or i64 %left, %right
+; CHECK: extr {{x[0-9]+}}, x0, x0, #45
+ ret i64 %val5
+}
+
+define i32 @ror_i32(i32 %in) {
+; CHECK: ror_i32:
+ %left = shl i32 %in, 9
+ %right = lshr i32 %in, 23
+ %val5 = or i32 %left, %right
+; CHECK: extr {{w[0-9]+}}, w0, w0, #23
+ ret i32 %val5
+}
+
+define i32 @extr_i32(i32 %lhs, i32 %rhs) {
+; CHECK: extr_i32:
+ %left = shl i32 %lhs, 6
+ %right = lshr i32 %rhs, 26
+ %val = or i32 %left, %right
+ ; Order of lhs and rhs matters here. Regalloc would have to be very odd to use
+ ; something other than w0 and w1.
+; CHECK: extr {{w[0-9]+}}, w0, w1, #26
+
+ ret i32 %val
+}
+
+define i64 @extr_i64(i64 %lhs, i64 %rhs) {
+; CHECK: extr_i64:
+ %right = lshr i64 %rhs, 40
+ %left = shl i64 %lhs, 24
+ %val = or i64 %right, %left
+ ; Order of lhs and rhs matters here. Regalloc would have to be very odd to use
+ ; something other than w0 and w1.
+; CHECK: extr {{x[0-9]+}}, x0, x1, #40
+
+ ret i64 %val
+}
+
+; Regression test: a bad experimental pattern crept into git which optimised
+; this pattern to a single EXTR.
+define i32 @extr_regress(i32 %a, i32 %b) {
+; CHECK: extr_regress:
+
+ %sh1 = shl i32 %a, 14
+ %sh2 = lshr i32 %b, 14
+ %val = or i32 %sh2, %sh1
+; CHECK-NOT: extr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, #{{[0-9]+}}
+
+ ret i32 %val
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/fastcc-reserved.ll b/test/CodeGen/AArch64/fastcc-reserved.ll
new file mode 100644
index 0000000000..b0bc1c908d
--- /dev/null
+++ b/test/CodeGen/AArch64/fastcc-reserved.ll
@@ -0,0 +1,58 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 -tailcallopt | FileCheck %s
+
+; This test is designed to be run in the situation where the
+; call-frame is not reserved (hence disable-fp-elim), but where
+; callee-pop can occur (hence tailcallopt).
+
+declare fastcc void @will_pop([8 x i32], i32 %val)
+
+define fastcc void @foo(i32 %in) {
+; CHECK: foo:
+
+ %addr = alloca i8, i32 %in
+
+; Normal frame setup stuff:
+; CHECK: sub sp, sp,
+; CHECK stp x29, x30
+
+; Reserve space for call-frame:
+; CHECK: sub sp, sp, #16
+
+ call fastcc void @will_pop([8 x i32] undef, i32 42)
+; CHECK: bl will_pop
+
+; Since @will_pop is fastcc with tailcallopt, it will put the stack
+; back where it needs to be, we shouldn't duplicate that
+; CHECK-NOT: sub sp, sp, #16
+; CHECK-NOT: add sp, sp,
+
+; CHECK: ldp x29, x30
+; CHECK: add sp, sp,
+ ret void
+}
+
+declare void @wont_pop([8 x i32], i32 %val)
+
+define void @foo1(i32 %in) {
+; CHECK: foo1:
+
+ %addr = alloca i8, i32 %in
+; Normal frame setup again
+; CHECK sub sp, sp,
+; CHECK stp x29, x30
+
+; Reserve space for call-frame
+; CHECK sub sp, sp, #16
+
+ call void @wont_pop([8 x i32] undef, i32 42)
+; CHECK bl wont_pop
+
+; This time we *do* need to unreserve the call-frame
+; CHECK add sp, sp, #16
+
+; Check for epilogue (primarily to make sure sp spotted above wasn't
+; part of it).
+; CHECK: ldp x29, x30
+; CHECK: add sp, sp,
+ ret void
+}
diff --git a/test/CodeGen/AArch64/fastcc.ll b/test/CodeGen/AArch64/fastcc.ll
new file mode 100644
index 0000000000..8f9b9feb58
--- /dev/null
+++ b/test/CodeGen/AArch64/fastcc.ll
@@ -0,0 +1,123 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 -tailcallopt | FileCheck %s -check-prefix CHECK-TAIL
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+; Without tailcallopt fastcc still means the caller cleans up the
+; stack, so try to make sure this is respected.
+
+define fastcc void @func_stack0() {
+; CHECK: func_stack0:
+; CHECK: sub sp, sp, #48
+
+; CHECK-TAIL: func_stack0:
+; CHECK-TAIL: sub sp, sp, #48
+
+
+ call fastcc void @func_stack8([8 x i32] undef, i32 42)
+; CHECK: bl func_stack8
+; CHECK-NOT: sub sp, sp,
+
+; CHECK-TAIL: bl func_stack8
+; CHECK-TAIL: sub sp, sp, #16
+
+
+ call fastcc void @func_stack32([8 x i32] undef, i128 0, i128 9)
+; CHECK: bl func_stack32
+; CHECK-NOT: sub sp, sp,
+
+; CHECK-TAIL: bl func_stack32
+; CHECK-TAIL: sub sp, sp, #32
+
+
+ call fastcc void @func_stack0()
+; CHECK: bl func_stack0
+; CHECK-NOT: sub sp, sp
+
+; CHECK-TAIL: bl func_stack0
+; CHECK-TAIL-NOT: sub sp, sp
+
+ ret void
+; CHECK: add sp, sp, #48
+; CHECK-NEXT: ret
+
+; CHECK-TAIL: add sp, sp, #48
+; CHECK-TAIL-NEXT: ret
+
+}
+
+define fastcc void @func_stack8([8 x i32], i32 %stacked) {
+; CHECK: func_stack8:
+; CHECK: sub sp, sp, #48
+
+; CHECK-TAIL: func_stack8:
+; CHECK-TAIL: sub sp, sp, #48
+
+
+ call fastcc void @func_stack8([8 x i32] undef, i32 42)
+; CHECK: bl func_stack8
+; CHECK-NOT: sub sp, sp,
+
+; CHECK-TAIL: bl func_stack8
+; CHECK-TAIL: sub sp, sp, #16
+
+
+ call fastcc void @func_stack32([8 x i32] undef, i128 0, i128 9)
+; CHECK: bl func_stack32
+; CHECK-NOT: sub sp, sp,
+
+; CHECK-TAIL: bl func_stack32
+; CHECK-TAIL: sub sp, sp, #32
+
+
+ call fastcc void @func_stack0()
+; CHECK: bl func_stack0
+; CHECK-NOT: sub sp, sp
+
+; CHECK-TAIL: bl func_stack0
+; CHECK-TAIL-NOT: sub sp, sp
+
+ ret void
+; CHECK: add sp, sp, #48
+; CHECK-NEXT: ret
+
+; CHECK-TAIL: add sp, sp, #64
+; CHECK-TAIL-NEXT: ret
+}
+
+define fastcc void @func_stack32([8 x i32], i128 %stacked0, i128 %stacked1) {
+; CHECK: func_stack32:
+; CHECK: sub sp, sp, #48
+
+; CHECK-TAIL: func_stack32:
+; CHECK-TAIL: sub sp, sp, #48
+
+
+ call fastcc void @func_stack8([8 x i32] undef, i32 42)
+; CHECK: bl func_stack8
+; CHECK-NOT: sub sp, sp,
+
+; CHECK-TAIL: bl func_stack8
+; CHECK-TAIL: sub sp, sp, #16
+
+
+ call fastcc void @func_stack32([8 x i32] undef, i128 0, i128 9)
+; CHECK: bl func_stack32
+; CHECK-NOT: sub sp, sp,
+
+; CHECK-TAIL: bl func_stack32
+; CHECK-TAIL: sub sp, sp, #32
+
+
+ call fastcc void @func_stack0()
+; CHECK: bl func_stack0
+; CHECK-NOT: sub sp, sp
+
+; CHECK-TAIL: bl func_stack0
+; CHECK-TAIL-NOT: sub sp, sp
+
+ ret void
+; CHECK: add sp, sp, #48
+; CHECK-NEXT: ret
+
+; CHECK-TAIL: add sp, sp, #80
+; CHECK-TAIL-NEXT: ret
+}
diff --git a/test/CodeGen/AArch64/fcmp.ll b/test/CodeGen/AArch64/fcmp.ll
new file mode 100644
index 0000000000..d254e8c051
--- /dev/null
+++ b/test/CodeGen/AArch64/fcmp.ll
@@ -0,0 +1,81 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+declare void @bar(i32)
+
+define void @test_float(float %a, float %b) {
+; CHECK: test_float:
+
+ %tst1 = fcmp oeq float %a, %b
+ br i1 %tst1, label %end, label %t2
+; CHECK: fcmp {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK: b.eq .L
+
+t2:
+ %tst2 = fcmp une float %b, 0.0
+ br i1 %tst2, label %t3, label %end
+; CHECK: fcmp {{s[0-9]+}}, #0.0
+; CHECK: b.eq .L
+
+
+t3:
+; This test can't be implemented with just one A64 conditional
+; branch. LLVM converts "ordered and not equal" to "unordered or
+; equal" before instruction selection, which is what we currently
+; test. Obviously, other sequences are valid.
+ %tst3 = fcmp one float %a, %b
+ br i1 %tst3, label %t4, label %end
+; CHECK: fcmp {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NEXT: b.eq .[[T4:LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: b.vs .[[T4]]
+t4:
+ %tst4 = fcmp uge float %a, -0.0
+ br i1 %tst4, label %t5, label %end
+; CHECK-NOT: fcmp {{s[0-9]+}}, #0.0
+; CHECK: b.mi .LBB
+
+t5:
+ call void @bar(i32 0)
+ ret void
+end:
+ ret void
+
+}
+
+define void @test_double(double %a, double %b) {
+; CHECK: test_double:
+
+ %tst1 = fcmp oeq double %a, %b
+ br i1 %tst1, label %end, label %t2
+; CHECK: fcmp {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK: b.eq .L
+
+t2:
+ %tst2 = fcmp une double %b, 0.0
+ br i1 %tst2, label %t3, label %end
+; CHECK: fcmp {{d[0-9]+}}, #0.0
+; CHECK: b.eq .L
+
+
+t3:
+; This test can't be implemented with just one A64 conditional
+; branch. LLVM converts "ordered and not equal" to "unordered or
+; equal" before instruction selection, which is what we currently
+; test. Obviously, other sequences are valid.
+ %tst3 = fcmp one double %a, %b
+ br i1 %tst3, label %t4, label %end
+; CHECK: fcmp {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-NEXT: b.eq .[[T4:LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: b.vs .[[T4]]
+t4:
+ %tst4 = fcmp uge double %a, -0.0
+ br i1 %tst4, label %t5, label %end
+; CHECK-NOT: fcmp {{d[0-9]+}}, #0.0
+; CHECK: b.mi .LBB
+
+t5:
+ call void @bar(i32 0)
+ ret void
+end:
+ ret void
+
+}
diff --git a/test/CodeGen/AArch64/fcvt-fixed.ll b/test/CodeGen/AArch64/fcvt-fixed.ll
new file mode 100644
index 0000000000..c76a84ac77
--- /dev/null
+++ b/test/CodeGen/AArch64/fcvt-fixed.ll
@@ -0,0 +1,191 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 -O0 | FileCheck %s
+
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @test_fcvtzs(float %flt, double %dbl) {
+; CHECK: test_fcvtzs:
+
+ %fix1 = fmul float %flt, 128.0
+ %cvt1 = fptosi float %fix1 to i32
+; CHECK: fcvtzs {{w[0-9]+}}, {{s[0-9]+}}, #7
+ store volatile i32 %cvt1, i32* @var32
+
+ %fix2 = fmul float %flt, 4294967296.0
+ %cvt2 = fptosi float %fix2 to i32
+; CHECK: fcvtzs {{w[0-9]+}}, {{s[0-9]+}}, #32
+ store volatile i32 %cvt2, i32* @var32
+
+ %fix3 = fmul float %flt, 128.0
+ %cvt3 = fptosi float %fix3 to i64
+; CHECK: fcvtzs {{x[0-9]+}}, {{s[0-9]+}}, #7
+ store volatile i64 %cvt3, i64* @var64
+
+ %fix4 = fmul float %flt, 18446744073709551616.0
+ %cvt4 = fptosi float %fix4 to i64
+; CHECK: fcvtzs {{x[0-9]+}}, {{s[0-9]+}}, #64
+ store volatile i64 %cvt4, i64* @var64
+
+ %fix5 = fmul double %dbl, 128.0
+ %cvt5 = fptosi double %fix5 to i32
+; CHECK: fcvtzs {{w[0-9]+}}, {{d[0-9]+}}, #7
+ store volatile i32 %cvt5, i32* @var32
+
+ %fix6 = fmul double %dbl, 4294967296.0
+ %cvt6 = fptosi double %fix6 to i32
+; CHECK: fcvtzs {{w[0-9]+}}, {{d[0-9]+}}, #32
+ store volatile i32 %cvt6, i32* @var32
+
+ %fix7 = fmul double %dbl, 128.0
+ %cvt7 = fptosi double %fix7 to i64
+; CHECK: fcvtzs {{x[0-9]+}}, {{d[0-9]+}}, #7
+ store volatile i64 %cvt7, i64* @var64
+
+ %fix8 = fmul double %dbl, 18446744073709551616.0
+ %cvt8 = fptosi double %fix8 to i64
+; CHECK: fcvtzs {{x[0-9]+}}, {{d[0-9]+}}, #64
+ store volatile i64 %cvt8, i64* @var64
+
+ ret void
+}
+
+define void @test_fcvtzu(float %flt, double %dbl) {
+; CHECK: test_fcvtzu:
+
+ %fix1 = fmul float %flt, 128.0
+ %cvt1 = fptoui float %fix1 to i32
+; CHECK: fcvtzu {{w[0-9]+}}, {{s[0-9]+}}, #7
+ store volatile i32 %cvt1, i32* @var32
+
+ %fix2 = fmul float %flt, 4294967296.0
+ %cvt2 = fptoui float %fix2 to i32
+; CHECK: fcvtzu {{w[0-9]+}}, {{s[0-9]+}}, #32
+ store volatile i32 %cvt2, i32* @var32
+
+ %fix3 = fmul float %flt, 128.0
+ %cvt3 = fptoui float %fix3 to i64
+; CHECK: fcvtzu {{x[0-9]+}}, {{s[0-9]+}}, #7
+ store volatile i64 %cvt3, i64* @var64
+
+ %fix4 = fmul float %flt, 18446744073709551616.0
+ %cvt4 = fptoui float %fix4 to i64
+; CHECK: fcvtzu {{x[0-9]+}}, {{s[0-9]+}}, #64
+ store volatile i64 %cvt4, i64* @var64
+
+ %fix5 = fmul double %dbl, 128.0
+ %cvt5 = fptoui double %fix5 to i32
+; CHECK: fcvtzu {{w[0-9]+}}, {{d[0-9]+}}, #7
+ store volatile i32 %cvt5, i32* @var32
+
+ %fix6 = fmul double %dbl, 4294967296.0
+ %cvt6 = fptoui double %fix6 to i32
+; CHECK: fcvtzu {{w[0-9]+}}, {{d[0-9]+}}, #32
+ store volatile i32 %cvt6, i32* @var32
+
+ %fix7 = fmul double %dbl, 128.0
+ %cvt7 = fptoui double %fix7 to i64
+; CHECK: fcvtzu {{x[0-9]+}}, {{d[0-9]+}}, #7
+ store volatile i64 %cvt7, i64* @var64
+
+ %fix8 = fmul double %dbl, 18446744073709551616.0
+ %cvt8 = fptoui double %fix8 to i64
+; CHECK: fcvtzu {{x[0-9]+}}, {{d[0-9]+}}, #64
+ store volatile i64 %cvt8, i64* @var64
+
+ ret void
+}
+
+@varfloat = global float 0.0
+@vardouble = global double 0.0
+
+define void @test_scvtf(i32 %int, i64 %long) {
+; CHECK: test_scvtf:
+
+ %cvt1 = sitofp i32 %int to float
+ %fix1 = fdiv float %cvt1, 128.0
+; CHECK: scvtf {{s[0-9]+}}, {{w[0-9]+}}, #7
+ store volatile float %fix1, float* @varfloat
+
+ %cvt2 = sitofp i32 %int to float
+ %fix2 = fdiv float %cvt2, 4294967296.0
+; CHECK: scvtf {{s[0-9]+}}, {{w[0-9]+}}, #32
+ store volatile float %fix2, float* @varfloat
+
+ %cvt3 = sitofp i64 %long to float
+ %fix3 = fdiv float %cvt3, 128.0
+; CHECK: scvtf {{s[0-9]+}}, {{x[0-9]+}}, #7
+ store volatile float %fix3, float* @varfloat
+
+ %cvt4 = sitofp i64 %long to float
+ %fix4 = fdiv float %cvt4, 18446744073709551616.0
+; CHECK: scvtf {{s[0-9]+}}, {{x[0-9]+}}, #64
+ store volatile float %fix4, float* @varfloat
+
+ %cvt5 = sitofp i32 %int to double
+ %fix5 = fdiv double %cvt5, 128.0
+; CHECK: scvtf {{d[0-9]+}}, {{w[0-9]+}}, #7
+ store volatile double %fix5, double* @vardouble
+
+ %cvt6 = sitofp i32 %int to double
+ %fix6 = fdiv double %cvt6, 4294967296.0
+; CHECK: scvtf {{d[0-9]+}}, {{w[0-9]+}}, #32
+ store volatile double %fix6, double* @vardouble
+
+ %cvt7 = sitofp i64 %long to double
+ %fix7 = fdiv double %cvt7, 128.0
+; CHECK: scvtf {{d[0-9]+}}, {{x[0-9]+}}, #7
+ store volatile double %fix7, double* @vardouble
+
+ %cvt8 = sitofp i64 %long to double
+ %fix8 = fdiv double %cvt8, 18446744073709551616.0
+; CHECK: scvtf {{d[0-9]+}}, {{x[0-9]+}}, #64
+ store volatile double %fix8, double* @vardouble
+
+ ret void
+}
+
+define void @test_ucvtf(i32 %int, i64 %long) {
+; CHECK: test_ucvtf:
+
+ %cvt1 = uitofp i32 %int to float
+ %fix1 = fdiv float %cvt1, 128.0
+; CHECK: ucvtf {{s[0-9]+}}, {{w[0-9]+}}, #7
+ store volatile float %fix1, float* @varfloat
+
+ %cvt2 = uitofp i32 %int to float
+ %fix2 = fdiv float %cvt2, 4294967296.0
+; CHECK: ucvtf {{s[0-9]+}}, {{w[0-9]+}}, #32
+ store volatile float %fix2, float* @varfloat
+
+ %cvt3 = uitofp i64 %long to float
+ %fix3 = fdiv float %cvt3, 128.0
+; CHECK: ucvtf {{s[0-9]+}}, {{x[0-9]+}}, #7
+ store volatile float %fix3, float* @varfloat
+
+ %cvt4 = uitofp i64 %long to float
+ %fix4 = fdiv float %cvt4, 18446744073709551616.0
+; CHECK: ucvtf {{s[0-9]+}}, {{x[0-9]+}}, #64
+ store volatile float %fix4, float* @varfloat
+
+ %cvt5 = uitofp i32 %int to double
+ %fix5 = fdiv double %cvt5, 128.0
+; CHECK: ucvtf {{d[0-9]+}}, {{w[0-9]+}}, #7
+ store volatile double %fix5, double* @vardouble
+
+ %cvt6 = uitofp i32 %int to double
+ %fix6 = fdiv double %cvt6, 4294967296.0
+; CHECK: ucvtf {{d[0-9]+}}, {{w[0-9]+}}, #32
+ store volatile double %fix6, double* @vardouble
+
+ %cvt7 = uitofp i64 %long to double
+ %fix7 = fdiv double %cvt7, 128.0
+; CHECK: ucvtf {{d[0-9]+}}, {{x[0-9]+}}, #7
+ store volatile double %fix7, double* @vardouble
+
+ %cvt8 = uitofp i64 %long to double
+ %fix8 = fdiv double %cvt8, 18446744073709551616.0
+; CHECK: ucvtf {{d[0-9]+}}, {{x[0-9]+}}, #64
+ store volatile double %fix8, double* @vardouble
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/fcvt-int.ll b/test/CodeGen/AArch64/fcvt-int.ll
new file mode 100644
index 0000000000..c4bcaac206
--- /dev/null
+++ b/test/CodeGen/AArch64/fcvt-int.ll
@@ -0,0 +1,151 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+define i32 @test_floattoi32(float %in) {
+; CHECK: test_floattoi32:
+
+ %signed = fptosi float %in to i32
+ %unsigned = fptoui float %in to i32
+; CHECK: fcvtzu [[UNSIG:w[0-9]+]], {{s[0-9]+}}
+; CHECK: fcvtzs [[SIG:w[0-9]+]], {{s[0-9]+}}
+
+ %res = sub i32 %signed, %unsigned
+; CHECK: sub {{w[0-9]+}}, [[SIG]], [[UNSIG]]
+
+ ret i32 %res
+; CHECK: ret
+}
+
+define i32 @test_doubletoi32(double %in) {
+; CHECK: test_doubletoi32:
+
+ %signed = fptosi double %in to i32
+ %unsigned = fptoui double %in to i32
+; CHECK: fcvtzu [[UNSIG:w[0-9]+]], {{d[0-9]+}}
+; CHECK: fcvtzs [[SIG:w[0-9]+]], {{d[0-9]+}}
+
+ %res = sub i32 %signed, %unsigned
+; CHECK: sub {{w[0-9]+}}, [[SIG]], [[UNSIG]]
+
+ ret i32 %res
+; CHECK: ret
+}
+
+define i64 @test_floattoi64(float %in) {
+; CHECK: test_floattoi64:
+
+ %signed = fptosi float %in to i64
+ %unsigned = fptoui float %in to i64
+; CHECK: fcvtzu [[UNSIG:x[0-9]+]], {{s[0-9]+}}
+; CHECK: fcvtzs [[SIG:x[0-9]+]], {{s[0-9]+}}
+
+ %res = sub i64 %signed, %unsigned
+; CHECK: sub {{x[0-9]+}}, [[SIG]], [[UNSIG]]
+
+ ret i64 %res
+; CHECK: ret
+}
+
+define i64 @test_doubletoi64(double %in) {
+; CHECK: test_doubletoi64:
+
+ %signed = fptosi double %in to i64
+ %unsigned = fptoui double %in to i64
+; CHECK: fcvtzu [[UNSIG:x[0-9]+]], {{d[0-9]+}}
+; CHECK: fcvtzs [[SIG:x[0-9]+]], {{d[0-9]+}}
+
+ %res = sub i64 %signed, %unsigned
+; CHECK: sub {{x[0-9]+}}, [[SIG]], [[UNSIG]]
+
+ ret i64 %res
+; CHECK: ret
+}
+
+define float @test_i32tofloat(i32 %in) {
+; CHECK: test_i32tofloat:
+
+ %signed = sitofp i32 %in to float
+ %unsigned = uitofp i32 %in to float
+; CHECK: ucvtf [[UNSIG:s[0-9]+]], {{w[0-9]+}}
+; CHECK: scvtf [[SIG:s[0-9]+]], {{w[0-9]+}}
+
+ %res = fsub float %signed, %unsigned
+; CHECL: fsub {{s[0-9]+}}, [[SIG]], [[UNSIG]]
+ ret float %res
+; CHECK: ret
+}
+
+define double @test_i32todouble(i32 %in) {
+; CHECK: test_i32todouble:
+
+ %signed = sitofp i32 %in to double
+ %unsigned = uitofp i32 %in to double
+; CHECK: ucvtf [[UNSIG:d[0-9]+]], {{w[0-9]+}}
+; CHECK: scvtf [[SIG:d[0-9]+]], {{w[0-9]+}}
+
+ %res = fsub double %signed, %unsigned
+; CHECK: fsub {{d[0-9]+}}, [[SIG]], [[UNSIG]]
+ ret double %res
+; CHECK: ret
+}
+
+define float @test_i64tofloat(i64 %in) {
+; CHECK: test_i64tofloat:
+
+ %signed = sitofp i64 %in to float
+ %unsigned = uitofp i64 %in to float
+; CHECK: ucvtf [[UNSIG:s[0-9]+]], {{x[0-9]+}}
+; CHECK: scvtf [[SIG:s[0-9]+]], {{x[0-9]+}}
+
+ %res = fsub float %signed, %unsigned
+; CHECK: fsub {{s[0-9]+}}, [[SIG]], [[UNSIG]]
+ ret float %res
+; CHECK: ret
+}
+
+define double @test_i64todouble(i64 %in) {
+; CHECK: test_i64todouble:
+
+ %signed = sitofp i64 %in to double
+ %unsigned = uitofp i64 %in to double
+; CHECK: ucvtf [[UNSIG:d[0-9]+]], {{x[0-9]+}}
+; CHECK: scvtf [[SIG:d[0-9]+]], {{x[0-9]+}}
+
+ %res = fsub double %signed, %unsigned
+; CHECK: sub {{d[0-9]+}}, [[SIG]], [[UNSIG]]
+ ret double %res
+; CHECK: ret
+}
+
+define i32 @test_bitcastfloattoi32(float %in) {
+; CHECK: test_bitcastfloattoi32:
+
+ %res = bitcast float %in to i32
+; CHECK: fmov {{w[0-9]+}}, {{s[0-9]+}}
+ ret i32 %res
+}
+
+define i64 @test_bitcastdoubletoi64(double %in) {
+; CHECK: test_bitcastdoubletoi64:
+
+ %res = bitcast double %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define float @test_bitcasti32tofloat(i32 %in) {
+; CHECK: test_bitcasti32tofloat:
+
+ %res = bitcast i32 %in to float
+; CHECK: fmov {{s[0-9]+}}, {{w[0-9]+}}
+ ret float %res
+
+}
+
+define double @test_bitcasti64todouble(i64 %in) {
+; CHECK: test_bitcasti64todouble:
+
+ %res = bitcast i64 %in to double
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret double %res
+
+}
diff --git a/test/CodeGen/AArch64/flags-multiuse.ll b/test/CodeGen/AArch64/flags-multiuse.ll
new file mode 100644
index 0000000000..80be052cd9
--- /dev/null
+++ b/test/CodeGen/AArch64/flags-multiuse.ll
@@ -0,0 +1,35 @@
+; RUN: llc -march=aarch64 -verify-machineinstrs < %s | FileCheck %s
+
+; LLVM should be able to cope with multiple uses of the same flag-setting
+; instruction at different points of a routine. Either by rematerializing the
+; compare or by saving and restoring the flag register.
+
+declare void @bar()
+
+@var = global i32 0
+
+define i32 @test_multiflag(i32 %n, i32 %m, i32 %o) {
+; CHECK: test_multiflag:
+
+ %test = icmp ne i32 %n, %m
+; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
+
+ %val = zext i1 %test to i32
+; CHECK: csinc {{[xw][0-9]+}}, {{xzr|wzr}}, {{xzr|wzr}}, eq
+
+ store i32 %val, i32* @var
+
+ call void @bar()
+; CHECK: bl bar
+
+ ; Currently, the comparison is emitted again. An MSR/MRS pair would also be
+ ; acceptable, but assuming the call preserves NZCV is not.
+ br i1 %test, label %iftrue, label %iffalse
+; CHECK: cmp [[LHS]], [[RHS]]
+; CHECK: b.eq
+
+iftrue:
+ ret i32 42
+iffalse:
+ ret i32 0
+}
diff --git a/test/CodeGen/AArch64/floatdp_1source.ll b/test/CodeGen/AArch64/floatdp_1source.ll
new file mode 100644
index 0000000000..0f10b42c0b
--- /dev/null
+++ b/test/CodeGen/AArch64/floatdp_1source.ll
@@ -0,0 +1,138 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@varhalf = global half 0.0
+@varfloat = global float 0.0
+@vardouble = global double 0.0
+
+declare float @fabsf(float) readonly
+declare double @fabs(double) readonly
+
+declare float @llvm.sqrt.f32(float %Val)
+declare double @llvm.sqrt.f64(double %Val)
+
+declare float @ceilf(float) readonly
+declare double @ceil(double) readonly
+
+declare float @floorf(float) readonly
+declare double @floor(double) readonly
+
+declare float @truncf(float) readonly
+declare double @trunc(double) readonly
+
+declare float @rintf(float) readonly
+declare double @rint(double) readonly
+
+declare float @nearbyintf(float) readonly
+declare double @nearbyint(double) readonly
+
+define void @simple_float() {
+; CHECK: simple_float:
+ %val1 = load volatile float* @varfloat
+
+ %valabs = call float @fabsf(float %val1)
+ store volatile float %valabs, float* @varfloat
+; CHECK: fabs {{s[0-9]+}}, {{s[0-9]+}}
+
+ %valneg = fsub float -0.0, %val1
+ store volatile float %valneg, float* @varfloat
+; CHECK: fneg {{s[0-9]+}}, {{s[0-9]+}}
+
+ %valsqrt = call float @llvm.sqrt.f32(float %val1)
+ store volatile float %valsqrt, float* @varfloat
+; CHECK: fsqrt {{s[0-9]+}}, {{s[0-9]+}}
+
+ %valceil = call float @ceilf(float %val1)
+ store volatile float %valceil, float* @varfloat
+; CHECK: frintp {{s[0-9]+}}, {{s[0-9]+}}
+
+ %valfloor = call float @floorf(float %val1)
+ store volatile float %valfloor, float* @varfloat
+; CHECK: frintm {{s[0-9]+}}, {{s[0-9]+}}
+
+ %valtrunc = call float @truncf(float %val1)
+ store volatile float %valtrunc, float* @varfloat
+; CHECK: frintz {{s[0-9]+}}, {{s[0-9]+}}
+
+ %valrint = call float @rintf(float %val1)
+ store volatile float %valrint, float* @varfloat
+; CHECK: frintx {{s[0-9]+}}, {{s[0-9]+}}
+
+ %valnearbyint = call float @nearbyintf(float %val1)
+ store volatile float %valnearbyint, float* @varfloat
+; CHECK: frinti {{s[0-9]+}}, {{s[0-9]+}}
+
+ ret void
+}
+
+define void @simple_double() {
+; CHECK: simple_double:
+ %val1 = load volatile double* @vardouble
+
+ %valabs = call double @fabs(double %val1)
+ store volatile double %valabs, double* @vardouble
+; CHECK: fabs {{d[0-9]+}}, {{d[0-9]+}}
+
+ %valneg = fsub double -0.0, %val1
+ store volatile double %valneg, double* @vardouble
+; CHECK: fneg {{d[0-9]+}}, {{d[0-9]+}}
+
+ %valsqrt = call double @llvm.sqrt.f64(double %val1)
+ store volatile double %valsqrt, double* @vardouble
+; CHECK: fsqrt {{d[0-9]+}}, {{d[0-9]+}}
+
+ %valceil = call double @ceil(double %val1)
+ store volatile double %valceil, double* @vardouble
+; CHECK: frintp {{d[0-9]+}}, {{d[0-9]+}}
+
+ %valfloor = call double @floor(double %val1)
+ store volatile double %valfloor, double* @vardouble
+; CHECK: frintm {{d[0-9]+}}, {{d[0-9]+}}
+
+ %valtrunc = call double @trunc(double %val1)
+ store volatile double %valtrunc, double* @vardouble
+; CHECK: frintz {{d[0-9]+}}, {{d[0-9]+}}
+
+ %valrint = call double @rint(double %val1)
+ store volatile double %valrint, double* @vardouble
+; CHECK: frintx {{d[0-9]+}}, {{d[0-9]+}}
+
+ %valnearbyint = call double @nearbyint(double %val1)
+ store volatile double %valnearbyint, double* @vardouble
+; CHECK: frinti {{d[0-9]+}}, {{d[0-9]+}}
+
+ ret void
+}
+
+define void @converts() {
+; CHECK: converts:
+
+ %val16 = load volatile half* @varhalf
+ %val32 = load volatile float* @varfloat
+ %val64 = load volatile double* @vardouble
+
+ %val16to32 = fpext half %val16 to float
+ store volatile float %val16to32, float* @varfloat
+; CHECK: fcvt {{s[0-9]+}}, {{h[0-9]+}}
+
+ %val16to64 = fpext half %val16 to double
+ store volatile double %val16to64, double* @vardouble
+; CHECK: fcvt {{d[0-9]+}}, {{h[0-9]+}}
+
+ %val32to16 = fptrunc float %val32 to half
+ store volatile half %val32to16, half* @varhalf
+; CHECK: fcvt {{h[0-9]+}}, {{s[0-9]+}}
+
+ %val32to64 = fpext float %val32 to double
+ store volatile double %val32to64, double* @vardouble
+; CHECK: fcvt {{d[0-9]+}}, {{s[0-9]+}}
+
+ %val64to16 = fptrunc double %val64 to half
+ store volatile half %val64to16, half* @varhalf
+; CHECK: fcvt {{h[0-9]+}}, {{d[0-9]+}}
+
+ %val64to32 = fptrunc double %val64 to float
+ store volatile float %val64to32, float* @varfloat
+; CHECK: fcvt {{s[0-9]+}}, {{d[0-9]+}}
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/floatdp_2source.ll b/test/CodeGen/AArch64/floatdp_2source.ll
new file mode 100644
index 0000000000..2cbcca723a
--- /dev/null
+++ b/test/CodeGen/AArch64/floatdp_2source.ll
@@ -0,0 +1,60 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@varfloat = global float 0.0
+@vardouble = global double 0.0
+
+define void @testfloat() {
+; CHECK: testfloat:
+ %val1 = load float* @varfloat
+
+ %val2 = fadd float %val1, %val1
+; CHECK: fadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+
+ %val3 = fmul float %val2, %val1
+; CHECK: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+
+ %val4 = fdiv float %val3, %val1
+; CHECK: fdiv {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+
+ %val5 = fsub float %val4, %val2
+; CHECK: fsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+
+ store volatile float %val5, float* @varfloat
+
+; These will be enabled with the implementation of floating-point litpool entries.
+ %val6 = fmul float %val1, %val2
+ %val7 = fsub float -0.0, %val6
+; CHECK: fnmul {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+
+ store volatile float %val7, float* @varfloat
+
+ ret void
+}
+
+define void @testdouble() {
+; CHECK: testdouble:
+ %val1 = load double* @vardouble
+
+ %val2 = fadd double %val1, %val1
+; CHECK: fadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+
+ %val3 = fmul double %val2, %val1
+; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+
+ %val4 = fdiv double %val3, %val1
+; CHECK: fdiv {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+
+ %val5 = fsub double %val4, %val2
+; CHECK: fsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+
+ store volatile double %val5, double* @vardouble
+
+; These will be enabled with the implementation of doubleing-point litpool entries.
+ %val6 = fmul double %val1, %val2
+ %val7 = fsub double -0.0, %val6
+; CHECK: fnmul {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+
+ store volatile double %val7, double* @vardouble
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/fp-cond-sel.ll b/test/CodeGen/AArch64/fp-cond-sel.ll
new file mode 100644
index 0000000000..c64927b34e
--- /dev/null
+++ b/test/CodeGen/AArch64/fp-cond-sel.ll
@@ -0,0 +1,26 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@varfloat = global float 0.0
+@vardouble = global double 0.0
+
+define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
+; CHECK: test_csel:
+
+ %tst1 = icmp ugt i32 %lhs32, %rhs32
+ %val1 = select i1 %tst1, float 0.0, float 1.0
+ store float %val1, float* @varfloat
+; CHECK: fmov [[FLT1:s[0-9]+]], #1.0
+; CHECK: ldr [[FLT0:s[0-9]+]], .LCPI
+; CHECK: fcsel {{s[0-9]+}}, [[FLT0]], [[FLT1]], hi
+
+ %rhs64 = sext i32 %rhs32 to i64
+ %tst2 = icmp sle i64 %lhs64, %rhs64
+ %val2 = select i1 %tst2, double 1.0, double 0.0
+ store double %val2, double* @vardouble
+; CHECK: ldr [[FLT0:d[0-9]+]], .LCPI
+; CHECK: fmov [[FLT1:d[0-9]+]], #1.0
+; CHECK: fcsel {{d[0-9]+}}, [[FLT1]], [[FLT0]], le
+
+ ret void
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/fp-dp3.ll b/test/CodeGen/AArch64/fp-dp3.ll
new file mode 100644
index 0000000000..84f34567b1
--- /dev/null
+++ b/test/CodeGen/AArch64/fp-dp3.ll
@@ -0,0 +1,102 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 -fp-contract=fast | FileCheck %s
+
+declare float @llvm.fma.f32(float, float, float)
+declare double @llvm.fma.f64(double, double, double)
+
+define float @test_fmadd(float %a, float %b, float %c) {
+; CHECK: test_fmadd:
+ %val = call float @llvm.fma.f32(float %a, float %b, float %c)
+; CHECK: fmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ ret float %val
+}
+
+define float @test_fmsub(float %a, float %b, float %c) {
+; CHECK: test_fmsub:
+ %nega = fsub float -0.0, %a
+ %val = call float @llvm.fma.f32(float %nega, float %b, float %c)
+; CHECK: fmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ ret float %val
+}
+
+define float @test_fnmadd(float %a, float %b, float %c) {
+; CHECK: test_fnmadd:
+ %negc = fsub float -0.0, %c
+ %val = call float @llvm.fma.f32(float %a, float %b, float %negc)
+; CHECK: fnmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ ret float %val
+}
+
+define float @test_fnmsub(float %a, float %b, float %c) {
+; CHECK: test_fnmsub:
+ %nega = fsub float -0.0, %a
+ %negc = fsub float -0.0, %c
+ %val = call float @llvm.fma.f32(float %nega, float %b, float %negc)
+; CHECK: fnmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ ret float %val
+}
+
+define double @testd_fmadd(double %a, double %b, double %c) {
+; CHECK: testd_fmadd:
+ %val = call double @llvm.fma.f64(double %a, double %b, double %c)
+; CHECK: fmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+ ret double %val
+}
+
+define double @testd_fmsub(double %a, double %b, double %c) {
+; CHECK: testd_fmsub:
+ %nega = fsub double -0.0, %a
+ %val = call double @llvm.fma.f64(double %nega, double %b, double %c)
+; CHECK: fmsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+ ret double %val
+}
+
+define double @testd_fnmadd(double %a, double %b, double %c) {
+; CHECK: testd_fnmadd:
+ %negc = fsub double -0.0, %c
+ %val = call double @llvm.fma.f64(double %a, double %b, double %negc)
+; CHECK: fnmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+ ret double %val
+}
+
+define double @testd_fnmsub(double %a, double %b, double %c) {
+; CHECK: testd_fnmsub:
+ %nega = fsub double -0.0, %a
+ %negc = fsub double -0.0, %c
+ %val = call double @llvm.fma.f64(double %nega, double %b, double %negc)
+; CHECK: fnmsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+ ret double %val
+}
+
+define float @test_fmadd_unfused(float %a, float %b, float %c) {
+; CHECK: test_fmadd_unfused:
+ %prod = fmul float %b, %c
+ %sum = fadd float %a, %prod
+; CHECK: fmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ ret float %sum
+}
+
+define float @test_fmsub_unfused(float %a, float %b, float %c) {
+; CHECK: test_fmsub_unfused:
+ %prod = fmul float %b, %c
+ %diff = fsub float %a, %prod
+; CHECK: fmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ ret float %diff
+}
+
+define float @test_fnmadd_unfused(float %a, float %b, float %c) {
+; CHECK: test_fnmadd_unfused:
+ %nega = fsub float -0.0, %a
+ %prod = fmul float %b, %c
+ %sum = fadd float %nega, %prod
+; CHECK: fnmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ ret float %sum
+}
+
+define float @test_fnmsub_unfused(float %a, float %b, float %c) {
+; CHECK: test_fnmsub_unfused:
+ %nega = fsub float -0.0, %a
+ %prod = fmul float %b, %c
+ %diff = fsub float %nega, %prod
+; CHECK: fnmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ ret float %diff
+}
diff --git a/test/CodeGen/AArch64/fp128-folding.ll b/test/CodeGen/AArch64/fp128-folding.ll
new file mode 100644
index 0000000000..891755fece
--- /dev/null
+++ b/test/CodeGen/AArch64/fp128-folding.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=aarch64 -verify-machineinstrs < %s | FileCheck %s
+declare void @bar(i8*, i8*, i32*)
+
+; SelectionDAG used to try to fold some fp128 operations using the ppc128 type,
+; which is not supported.
+
+define fp128 @test_folding() {
+; CHECK: test_folding:
+ %l = alloca i32
+ store i32 42, i32* %l
+ %val = load i32* %l
+ %fpval = sitofp i32 %val to fp128
+ ; If the value is loaded from a constant pool into an fp128, it's been folded
+ ; successfully.
+; CHECK: ldr {{q[0-9]+}}, .LCPI
+ ret fp128 %fpval
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/fp128.ll b/test/CodeGen/AArch64/fp128.ll
new file mode 100644
index 0000000000..afba292402
--- /dev/null
+++ b/test/CodeGen/AArch64/fp128.ll
@@ -0,0 +1,280 @@
+; RUN: llc -march=aarch64 -verify-machineinstrs < %s | FileCheck %s
+
+@lhs = global fp128 zeroinitializer
+@rhs = global fp128 zeroinitializer
+
+define fp128 @test_add() {
+; CHECK: test_add:
+
+ %lhs = load fp128* @lhs
+ %rhs = load fp128* @rhs
+; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+
+ %val = fadd fp128 %lhs, %rhs
+; CHECK: bl __addtf3
+ ret fp128 %val
+}
+
+define fp128 @test_sub() {
+; CHECK: test_sub:
+
+ %lhs = load fp128* @lhs
+ %rhs = load fp128* @rhs
+; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+
+ %val = fsub fp128 %lhs, %rhs
+; CHECK: bl __subtf3
+ ret fp128 %val
+}
+
+define fp128 @test_mul() {
+; CHECK: test_mul:
+
+ %lhs = load fp128* @lhs
+ %rhs = load fp128* @rhs
+; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+
+ %val = fmul fp128 %lhs, %rhs
+; CHECK: bl __multf3
+ ret fp128 %val
+}
+
+define fp128 @test_div() {
+; CHECK: test_div:
+
+ %lhs = load fp128* @lhs
+ %rhs = load fp128* @rhs
+; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+
+ %val = fdiv fp128 %lhs, %rhs
+; CHECK: bl __divtf3
+ ret fp128 %val
+}
+
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @test_fptosi() {
+; CHECK: test_fptosi:
+ %val = load fp128* @lhs
+
+ %val32 = fptosi fp128 %val to i32
+ store i32 %val32, i32* @var32
+; CHECK: bl __fixtfsi
+
+ %val64 = fptosi fp128 %val to i64
+ store i64 %val64, i64* @var64
+; CHECK: bl __fixtfdi
+
+ ret void
+}
+
+define void @test_fptoui() {
+; CHECK: test_fptoui:
+ %val = load fp128* @lhs
+
+ %val32 = fptoui fp128 %val to i32
+ store i32 %val32, i32* @var32
+; CHECK: bl __fixunstfsi
+
+ %val64 = fptoui fp128 %val to i64
+ store i64 %val64, i64* @var64
+; CHECK: bl __fixunstfdi
+
+ ret void
+}
+
+define void @test_sitofp() {
+; CHECK: test_sitofp:
+
+ %src32 = load i32* @var32
+ %val32 = sitofp i32 %src32 to fp128
+ store volatile fp128 %val32, fp128* @lhs
+; CHECK: bl __floatsitf
+
+ %src64 = load i64* @var64
+ %val64 = sitofp i64 %src64 to fp128
+ store volatile fp128 %val64, fp128* @lhs
+; CHECK: bl __floatditf
+
+ ret void
+}
+
+define void @test_uitofp() {
+; CHECK: test_uitofp:
+
+ %src32 = load i32* @var32
+ %val32 = uitofp i32 %src32 to fp128
+ store volatile fp128 %val32, fp128* @lhs
+; CHECK: bl __floatunsitf
+
+ %src64 = load i64* @var64
+ %val64 = uitofp i64 %src64 to fp128
+ store volatile fp128 %val64, fp128* @lhs
+; CHECK: bl __floatunditf
+
+ ret void
+}
+
+define i1 @test_setcc1() {
+; CHECK: test_setcc1:
+
+ %lhs = load fp128* @lhs
+ %rhs = load fp128* @rhs
+; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+
+; Technically, everything after the call to __letf2 is redundant, but we'll let
+; LLVM have its fun for now.
+ %val = fcmp ole fp128 %lhs, %rhs
+; CHECK: bl __letf2
+; CHECK: cmp w0, #0
+; CHECK: csinc w0, wzr, wzr, gt
+
+ ret i1 %val
+; CHECK: ret
+}
+
+define i1 @test_setcc2() {
+; CHECK: test_setcc2:
+
+ %lhs = load fp128* @lhs
+ %rhs = load fp128* @rhs
+; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+
+; Technically, everything after the call to __letf2 is redundant, but we'll let
+; LLVM have its fun for now.
+ %val = fcmp ugt fp128 %lhs, %rhs
+; CHECK: bl __unordtf2
+; CHECK: mov x[[UNORDERED:[0-9]+]], x0
+
+; CHECK: bl __gttf2
+; CHECK: cmp w0, #0
+; CHECK: csinc [[GT:w[0-9]+]], wzr, wzr, le
+; CHECK: cmp w[[UNORDERED]], #0
+; CHECK: csinc [[UNORDERED:w[0-9]+]], wzr, wzr, eq
+; CHECK: orr w0, [[UNORDERED]], [[GT]]
+
+ ret i1 %val
+; CHECK: ret
+}
+
+define i32 @test_br_cc() {
+; CHECK: test_br_cc:
+
+ %lhs = load fp128* @lhs
+ %rhs = load fp128* @rhs
+; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+
+ ; olt == !uge, which LLVM unfortunately "optimizes" this to.
+ %cond = fcmp olt fp128 %lhs, %rhs
+; CHECK: bl __unordtf2
+; CHECK: mov x[[UNORDERED:[0-9]+]], x0
+
+; CHECK: bl __getf2
+; CHECK: cmp w0, #0
+
+; CHECK: csinc [[OGE:w[0-9]+]], wzr, wzr, lt
+; CHECK: cmp w[[UNORDERED]], #0
+; CHECK: csinc [[UNORDERED:w[0-9]+]], wzr, wzr, eq
+; CHECK: orr [[UGE:w[0-9]+]], [[UNORDERED]], [[OGE]]
+; CHECK: cbnz [[UGE]], [[RET29:.LBB[0-9]+_[0-9]+]]
+ br i1 %cond, label %iftrue, label %iffalse
+
+iftrue:
+ ret i32 42
+; CHECK-NEXT: BB#
+; CHECK-NEXT: movz x0, #42
+; CHECK-NEXT: b [[REALRET:.LBB[0-9]+_[0-9]+]]
+
+iffalse:
+ ret i32 29
+; CHECK: [[RET29]]:
+; CHECK-NEXT: movz x0, #29
+; CHECK-NEXT: [[REALRET]]:
+; CHECK: ret
+}
+
+define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
+; CHECK: test_select:
+
+ %val = select i1 %cond, fp128 %lhs, fp128 %rhs
+ store fp128 %val, fp128* @lhs
+; CHECK: cmp w0, #0
+; CHECK: str q1, [sp]
+; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: BB#
+; CHECK-NEXT: str q0, [sp]
+; CHECK-NEXT: [[IFFALSE]]:
+; CHECK-NEXT: ldr q0, [sp]
+; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs]
+ ret void
+; CHECK: ret
+}
+
+@varfloat = global float 0.0
+@vardouble = global double 0.0
+
+define void @test_round() {
+; CHECK: test_round:
+
+ %val = load fp128* @lhs
+
+ %float = fptrunc fp128 %val to float
+ store float %float, float* @varfloat
+; CHECK: bl __trunctfsf2
+; CHECK: str s0, [{{x[0-9]+}}, #:lo12:varfloat]
+
+ %double = fptrunc fp128 %val to double
+ store double %double, double* @vardouble
+; CHECK: bl __trunctfdf2
+; CHECK: str d0, [{{x[0-9]+}}, #:lo12:vardouble]
+
+ ret void
+}
+
+define void @test_extend() {
+; CHECK: test_extend:
+
+ %val = load fp128* @lhs
+
+ %float = load float* @varfloat
+ %fromfloat = fpext float %float to fp128
+ store volatile fp128 %fromfloat, fp128* @lhs
+; CHECK: bl __extendsftf2
+; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs]
+
+ %double = load double* @vardouble
+ %fromdouble = fpext double %double to fp128
+ store volatile fp128 %fromdouble, fp128* @lhs
+; CHECK: bl __extenddftf2
+; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs]
+
+ ret void
+; CHECK: ret
+}
+
+define fp128 @test_neg(fp128 %in) {
+; CHECK: test_neg:
+
+ ; Could in principle be optimized to fneg which we can't select, this makes
+ ; sure that doesn't happen.
+ %ret = fsub fp128 0xL00000000000000008000000000000000, %in
+; CHECK: str q0, [sp, #-16]
+; CHECK-NEXT: ldr q1, [sp], #16
+; CHECK: ldr q0, [[MINUS0:.LCPI[0-9]+_0]]
+; CHECK: bl __subtf3
+
+ ret fp128 %ret
+; CHECK: ret
+
+; CHECK: [[MINUS0]]:
+; Make sure the weird hex constant below *is* -0.0
+; CHECK-NEXT: fp128 -0
+}
diff --git a/test/CodeGen/AArch64/fpimm.ll b/test/CodeGen/AArch64/fpimm.ll
new file mode 100644
index 0000000000..64c512126c
--- /dev/null
+++ b/test/CodeGen/AArch64/fpimm.ll
@@ -0,0 +1,34 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@varf32 = global float 0.0
+@varf64 = global double 0.0
+
+define void @check_float() {
+; CHECK: check_float:
+
+ %val = load float* @varf32
+ %newval1 = fadd float %val, 8.5
+ store volatile float %newval1, float* @varf32
+; CHECK: fmov {{s[0-9]+}}, #8.5
+
+ %newval2 = fadd float %val, 128.0
+ store volatile float %newval2, float* @varf32
+; CHECK: ldr {{s[0-9]+}}, .LCPI0_0
+
+ ret void
+}
+
+define void @check_double() {
+; CHECK: check_double:
+
+ %val = load double* @varf64
+ %newval1 = fadd double %val, 8.5
+ store volatile double %newval1, double* @varf64
+; CHECK: fmov {{d[0-9]+}}, #8.5
+
+ %newval2 = fadd double %val, 128.0
+ store volatile double %newval2, double* @varf64
+; CHECK: ldr {{d[0-9]+}}, .LCPI1_0
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/func-argpassing.ll b/test/CodeGen/AArch64/func-argpassing.ll
new file mode 100644
index 0000000000..2c4dd03911
--- /dev/null
+++ b/test/CodeGen/AArch64/func-argpassing.ll
@@ -0,0 +1,192 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+%myStruct = type { i64 , i8, i32 }
+
+@var8 = global i8 0
+@var32 = global i32 0
+@var64 = global i64 0
+@var128 = global i128 0
+@varfloat = global float 0.0
+@vardouble = global double 0.0
+@varstruct = global %myStruct zeroinitializer
+
+define void @take_i8s(i8 %val1, i8 %val2) {
+; CHECK: take_i8s:
+ store i8 %val2, i8* @var8
+ ; Not using w1 may be technically allowed, but it would indicate a
+ ; problem in itself.
+; CHECK: strb w1, [{{x[0-9]+}}, #:lo12:var8]
+ ret void
+}
+
+define void @add_floats(float %val1, float %val2) {
+; CHECK: add_floats:
+ %newval = fadd float %val1, %val2
+; CHECK: fadd [[ADDRES:s[0-9]+]], s0, s1
+ store float %newval, float* @varfloat
+; CHECK: str [[ADDRES]], [{{x[0-9]+}}, #:lo12:varfloat]
+ ret void
+}
+
+; byval pointers should be allocated to the stack and copied as if
+; with memcpy.
+define void @take_struct(%myStruct* byval %structval) {
+; CHECK: take_struct:
+ %addr0 = getelementptr %myStruct* %structval, i64 0, i32 2
+ %addr1 = getelementptr %myStruct* %structval, i64 0, i32 0
+
+ %val0 = load i32* %addr0
+ ; Some weird move means x0 is used for one access
+; CHECK: ldr [[REG32:w[0-9]+]], [{{x[0-9]+|sp}}, #12]
+ store i32 %val0, i32* @var32
+; CHECK: str [[REG32]], [{{x[0-9]+}}, #:lo12:var32]
+
+ %val1 = load i64* %addr1
+; CHECK: ldr [[REG64:x[0-9]+]], [{{x[0-9]+|sp}}]
+ store i64 %val1, i64* @var64
+; CHECK str [[REG64]], [{{x[0-9]+}}, #:lo12:var64]
+
+ ret void
+}
+
+; %structval should be at sp + 16
+define void @check_byval_align(i32* byval %ignore, %myStruct* byval align 16 %structval) {
+; CHECK: check_byval_align:
+
+ %addr0 = getelementptr %myStruct* %structval, i64 0, i32 2
+ %addr1 = getelementptr %myStruct* %structval, i64 0, i32 0
+
+ %val0 = load i32* %addr0
+ ; Some weird move means x0 is used for one access
+; CHECK: add x[[STRUCTVAL_ADDR:[0-9]+]], sp, #16
+; CHECK: ldr [[REG32:w[0-9]+]], [x[[STRUCTVAL_ADDR]], #12]
+ store i32 %val0, i32* @var32
+; CHECK: str [[REG32]], [{{x[0-9]+}}, #:lo12:var32]
+
+ %val1 = load i64* %addr1
+; CHECK: ldr [[REG64:x[0-9]+]], [sp, #16]
+ store i64 %val1, i64* @var64
+; CHECK str [[REG64]], [{{x[0-9]+}}, #:lo12:var64]
+
+ ret void
+}
+
+define i32 @return_int() {
+; CHECK: return_int:
+ %val = load i32* @var32
+ ret i32 %val
+; CHECK: ldr w0, [{{x[0-9]+}}, #:lo12:var32]
+ ; Make sure epilogue follows
+; CHECK-NEXT: ret
+}
+
+define double @return_double() {
+; CHECK: return_double:
+ ret double 3.14
+; CHECK: ldr d0, .LCPI
+}
+
+; This is the kind of IR clang will produce for returning a struct
+; small enough to go into registers. Not all that pretty, but it
+; works.
+define [2 x i64] @return_struct() {
+; CHECK: return_struct:
+ %addr = bitcast %myStruct* @varstruct to [2 x i64]*
+ %val = load [2 x i64]* %addr
+ ret [2 x i64] %val
+; CHECK: ldr x0, [{{x[0-9]+}}, #:lo12:varstruct]
+ ; Odd register regex below disallows x0 which we want to be live now.
+; CHECK: add {{x[1-9][0-9]*}}, {{x[1-9][0-9]*}}, #:lo12:varstruct
+; CHECK-NEXT: ldr x1, [{{x[1-9][0-9]*}}, #8]
+ ; Make sure epilogue immediately follows
+; CHECK-NEXT: ret
+}
+
+; Large structs are passed by reference (storage allocated by caller
+; to preserve value semantics) in x8. Strictly this only applies to
+; structs larger than 16 bytes, but C semantics can still be provided
+; if LLVM does it to %myStruct too. So this is the simplest check
+define void @return_large_struct(%myStruct* sret %retval) {
+; CHECK: return_large_struct:
+ %addr0 = getelementptr %myStruct* %retval, i64 0, i32 0
+ %addr1 = getelementptr %myStruct* %retval, i64 0, i32 1
+ %addr2 = getelementptr %myStruct* %retval, i64 0, i32 2
+
+ store i64 42, i64* %addr0
+ store i8 2, i8* %addr1
+ store i32 9, i32* %addr2
+; CHECK: str {{x[0-9]+}}, [x8]
+; CHECK: strb {{w[0-9]+}}, [x8, #8]
+; CHECK: str {{w[0-9]+}}, [x8, #12]
+
+ ret void
+}
+
+; This struct is just too far along to go into registers: (only x7 is
+; available, but it needs two). Also make sure that %stacked doesn't
+; sneak into x7 behind.
+define i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var45,
+ i32* %var6, %myStruct* byval %struct, i32* byval %stacked,
+ double %notstacked) {
+; CHECK: struct_on_stack:
+ %addr = getelementptr %myStruct* %struct, i64 0, i32 0
+ %val64 = load i64* %addr
+ store i64 %val64, i64* @var64
+ ; Currently nothing on local stack, so struct should be at sp
+; CHECK: ldr [[VAL64:x[0-9]+]], [sp]
+; CHECK: str [[VAL64]], [{{x[0-9]+}}, #:lo12:var64]
+
+ store double %notstacked, double* @vardouble
+; CHECK-NOT: ldr d0
+; CHECK: str d0, [{{x[0-9]+}}, #:lo12:vardouble
+
+ %retval = load i32* %stacked
+ ret i32 %retval
+; CHECK: ldr w0, [sp, #16]
+}
+
+define void @stacked_fpu(float %var0, double %var1, float %var2, float %var3,
+ float %var4, float %var5, float %var6, float %var7,
+ float %var8) {
+; CHECK: stacked_fpu:
+ store float %var8, float* @varfloat
+ ; Beware as above: the offset would be different on big-endian
+ ; machines if the first ldr were changed to use s-registers.
+; CHECK: ldr d[[VALFLOAT:[0-9]+]], [sp]
+; CHECK: str s[[VALFLOAT]], [{{x[0-9]+}}, #:lo12:varfloat]
+
+ ret void
+}
+
+; 128-bit integer types should be passed in xEVEN, xODD rather than
+; the reverse. In this case x2 and x3. Nothing should use x1.
+define i32 @check_i128_regalign(i32 %val0, i128 %val1, i32 %val2) {
+; CHECK: check_i128_regalign
+ store i128 %val1, i128* @var128
+; CHECK: str x2, [{{x[0-9]+}}, #:lo12:var128]
+; CHECK: str x3, [{{x[0-9]+}}, #8]
+
+ ret i32 %val2
+; CHECK: mov x0, x4
+}
+
+define void @check_i128_stackalign(i32 %val0, i32 %val1, i32 %val2, i32 %val3,
+ i32 %val4, i32 %val5, i32 %val6, i32 %val7,
+ i32 %stack1, i128 %stack2) {
+; CHECK: check_i128_stackalign
+ store i128 %stack2, i128* @var128
+ ; Nothing local on stack in current codegen, so first stack is 16 away
+; CHECK: ldr {{x[0-9]+}}, [sp, #16]
+ ; Important point is that we address sp+24 for second dword
+; CHECK: ldr {{x[0-9]+}}, [sp, #24]
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
+
+define i32 @test_extern() {
+; CHECK: test_extern:
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* undef, i32 undef, i32 4, i1 0)
+; CHECK: bl memcpy
+ ret i32 0
+}
diff --git a/test/CodeGen/AArch64/func-calls.ll b/test/CodeGen/AArch64/func-calls.ll
new file mode 100644
index 0000000000..f96564d3a1
--- /dev/null
+++ b/test/CodeGen/AArch64/func-calls.ll
@@ -0,0 +1,140 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+%myStruct = type { i64 , i8, i32 }
+
+@var8 = global i8 0
+@var8_2 = global i8 0
+@var32 = global i32 0
+@var64 = global i64 0
+@var128 = global i128 0
+@varfloat = global float 0.0
+@varfloat_2 = global float 0.0
+@vardouble = global double 0.0
+@varstruct = global %myStruct zeroinitializer
+@varsmallstruct = global [2 x i64] zeroinitializer
+
+declare void @take_i8s(i8 %val1, i8 %val2)
+declare void @take_floats(float %val1, float %val2)
+
+define void @simple_args() {
+; CHECK: simple_args:
+ %char1 = load i8* @var8
+ %char2 = load i8* @var8_2
+ call void @take_i8s(i8 %char1, i8 %char2)
+; CHECK: ldrb w0, [{{x[0-9]+}}, #:lo12:var8]
+; CHECK: ldrb w1, [{{x[0-9]+}}, #:lo12:var8_2]
+; CHECK: bl take_i8s
+
+ %float1 = load float* @varfloat
+ %float2 = load float* @varfloat_2
+ call void @take_floats(float %float1, float %float2)
+; CHECK: ldr s1, [{{x[0-9]+}}, #:lo12:varfloat_2]
+; CHECK: ldr s0, [{{x[0-9]+}}, #:lo12:varfloat]
+; CHECK: bl take_floats
+
+ ret void
+}
+
+declare i32 @return_int()
+declare double @return_double()
+declare [2 x i64] @return_smallstruct()
+declare void @return_large_struct(%myStruct* sret %retval)
+
+define void @simple_rets() {
+; CHECK: simple_rets:
+
+ %int = call i32 @return_int()
+ store i32 %int, i32* @var32
+; CHECK: bl return_int
+; CHECK: str w0, [{{x[0-9]+}}, #:lo12:var32]
+
+ %dbl = call double @return_double()
+ store double %dbl, double* @vardouble
+; CHECK: bl return_double
+; CHECK: str d0, [{{x[0-9]+}}, #:lo12:vardouble]
+
+ %arr = call [2 x i64] @return_smallstruct()
+ store [2 x i64] %arr, [2 x i64]* @varsmallstruct
+; CHECK: bl return_smallstruct
+; CHECK: str x1, [{{x[0-9]+}}, #8]
+; CHECK: str x0, [{{x[0-9]+}}, #:lo12:varsmallstruct]
+
+ call void @return_large_struct(%myStruct* sret @varstruct)
+; CHECK: add x8, {{x[0-9]+}}, #:lo12:varstruct
+; CHECK bl return_large_struct
+
+ ret void
+}
+
+
+declare i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var45,
+ i32* %var6, %myStruct* byval %struct, i32 %stacked,
+ double %notstacked)
+declare void @stacked_fpu(float %var0, double %var1, float %var2, float %var3,
+ float %var4, float %var5, float %var6, float %var7,
+ float %var8)
+
+define void @check_stack_args() {
+ call i32 @struct_on_stack(i8 0, i16 12, i32 42, i64 99, i128 1,
+ i32* @var32, %myStruct* byval @varstruct,
+ i32 999, double 1.0)
+ ; Want to check that the final double is passed in registers and
+ ; that varstruct is passed on the stack. Rather dependent on how a
+ ; memcpy gets created, but the following works for now.
+; CHECK: mov x0, sp
+; CHECK: str {{w[0-9]+}}, [x0]
+; CHECK: str {{w[0-9]+}}, [x0, #12]
+; CHECK: fmov d0,
+; CHECK: bl struct_on_stack
+
+ call void @stacked_fpu(float -1.0, double 1.0, float 4.0, float 2.0,
+ float -2.0, float -8.0, float 16.0, float 1.0,
+ float 64.0)
+; CHECK: ldr s[[STACKEDREG:[0-9]+]], .LCPI
+; CHECK: mov x0, sp
+; CHECK: str d[[STACKEDREG]], [x0]
+; CHECK bl stacked_fpu
+ ret void
+}
+
+
+declare void @check_i128_stackalign(i32 %val0, i32 %val1, i32 %val2, i32 %val3,
+ i32 %val4, i32 %val5, i32 %val6, i32 %val7,
+ i32 %stack1, i128 %stack2)
+
+declare void @check_i128_regalign(i32 %val0, i128 %val1)
+
+
+define void @check_i128_align() {
+; CHECK: check_i128_align:
+ %val = load i128* @var128
+ call void @check_i128_stackalign(i32 0, i32 1, i32 2, i32 3,
+ i32 4, i32 5, i32 6, i32 7,
+ i32 42, i128 %val)
+; CHECK: ldr [[I128LO:x[0-9]+]], [{{x[0-9]+}}, #:lo12:var128]
+; CHECK: ldr [[I128HI:x[0-9]+]], [{{x[0-9]+}}, #8]
+; CHECK: mov x[[SPREG:[0-9]+]], sp
+; CHECK: str [[I128HI]], [x[[SPREG]], #24]
+; CHECK: str [[I128LO]], [x[[SPREG]], #16]
+; CHECK: bl check_i128_stackalign
+
+ call void @check_i128_regalign(i32 0, i128 42)
+; CHECK-NOT: mov x1
+; CHECK: movz x2, #42
+; CHECK: mov x3, xzr
+; CHECK: bl check_i128_regalign
+
+ ret void
+}
+
+@fptr = global void()* null
+
+define void @check_indirect_call() {
+; CHECK: check_indirect_call:
+ %func = load void()** @fptr
+ call void %func()
+; CHECK: ldr [[FPTR:x[0-9]+]], [{{x[0-9]+}}, #:lo12:fptr]
+; CHECK: blr [[FPTR]]
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/global-alignment.ll b/test/CodeGen/AArch64/global-alignment.ll
new file mode 100644
index 0000000000..afd70e08c7
--- /dev/null
+++ b/test/CodeGen/AArch64/global-alignment.ll
@@ -0,0 +1,69 @@
+; RUN: llc -march=aarch64 -verify-machineinstrs < %s | FileCheck %s
+
+@var32 = global [3 x i32] zeroinitializer
+@var64 = global [3 x i64] zeroinitializer
+@var32_align64 = global [3 x i32] zeroinitializer, align 8
+
+define i64 @test_align32() {
+; CHECK: test_align32:
+ %addr = bitcast [3 x i32]* @var32 to i64*
+
+ ; Since @var32 is only guaranteed to be aligned to 32-bits, it's invalid to
+ ; emit an "LDR x0, [x0, #:lo12:var32] instruction to implement this load.
+ %val = load i64* %addr
+; CHECK: adrp [[HIBITS:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], #:lo12:var32
+; CHECK: ldr x0, [x[[ADDR]]]
+
+ ret i64 %val
+}
+
+define i64 @test_align64() {
+; CHECK: test_align64:
+ %addr = bitcast [3 x i64]* @var64 to i64*
+
+ ; However, var64 *is* properly aligned and emitting an adrp/add/ldr would be
+ ; inefficient.
+ %val = load i64* %addr
+; CHECK: adrp x[[HIBITS:[0-9]+]], var64
+; CHECK-NOT: add x[[HIBITS]]
+; CHECK: ldr x0, [x[[HIBITS]], #:lo12:var64]
+
+ ret i64 %val
+}
+
+define i64 @test_var32_align64() {
+; CHECK: test_var32_align64:
+ %addr = bitcast [3 x i32]* @var32_align64 to i64*
+
+ ; Since @var32 is only guaranteed to be aligned to 32-bits, it's invalid to
+ ; emit an "LDR x0, [x0, #:lo12:var32] instruction to implement this load.
+ %val = load i64* %addr
+; CHECK: adrp x[[HIBITS:[0-9]+]], var32_align64
+; CHECK-NOT: add x[[HIBITS]]
+; CHECK: ldr x0, [x[[HIBITS]], #:lo12:var32_align64]
+
+ ret i64 %val
+}
+
+@yet_another_var = external global {i32, i32}
+
+define i64 @test_yet_another_var() {
+; CHECK: test_yet_another_var:
+
+ ; @yet_another_var has a preferred alignment of 8, but that's not enough if
+ ; we're going to be linking against other things. Its ABI alignment is only 4
+ ; so we can't fold the load.
+ %val = load i64* bitcast({i32, i32}* @yet_another_var to i64*)
+; CHECK: adrp [[HIBITS:x[0-9]+]], yet_another_var
+; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], #:lo12:yet_another_var
+; CHECK: ldr x0, [x[[ADDR]]]
+ ret i64 %val
+}
+
+define i64()* @test_functions() {
+; CHECK: test_functions:
+ ret i64()* @test_yet_another_var
+; CHECK: adrp [[HIBITS:x[0-9]+]], test_yet_another_var
+; CHECK: add x0, [[HIBITS]], #:lo12:test_yet_another_var
+}
diff --git a/test/CodeGen/AArch64/got-abuse.ll b/test/CodeGen/AArch64/got-abuse.ll
new file mode 100644
index 0000000000..b233697aa2
--- /dev/null
+++ b/test/CodeGen/AArch64/got-abuse.ll
@@ -0,0 +1,23 @@
+; RUN: llc -march=aarch64 -relocation-model=pic < %s | FileCheck %s
+; RUN: llc -march=aarch64 -relocation-model=pic -filetype=obj < %s
+
+; LLVM gives well-defined semantics to this horrible construct (though C says
+; it's undefined). Regardless, we shouldn't crash. The important feature here is
+; that in general the only way to access a GOT symbol is via a 64-bit
+; load. Neither of these alternatives has the ELF relocations required to
+; support it:
+; + ldr wD, [xN, #:got_lo12:func]
+; + add xD, xN, #:got_lo12:func
+
+declare void @consume(i32)
+declare void @func()
+
+define void @foo() nounwind {
+; CHECK: foo:
+entry:
+ call void @consume(i32 ptrtoint (void ()* @func to i32))
+; CHECK: adrp x[[ADDRHI:[0-9]+]], :got:func
+; CHECK: ldr {{x[0-9]+}}, [x[[ADDRHI]], #:got_lo12:func]
+ ret void
+}
+
diff --git a/test/CodeGen/AArch64/i128-align.ll b/test/CodeGen/AArch64/i128-align.ll
new file mode 100644
index 0000000000..2b6d2cdb43
--- /dev/null
+++ b/test/CodeGen/AArch64/i128-align.ll
@@ -0,0 +1,29 @@
+; RUN: llc -march=aarch64 -verify-machineinstrs < %s | FileCheck %s
+
+%struct = type { i32, i128, i8 }
+
+@var = global %struct zeroinitializer
+
+define i64 @check_size() {
+; CHECK: check_size:
+ %starti = ptrtoint %struct* @var to i64
+
+ %endp = getelementptr %struct* @var, i64 1
+ %endi = ptrtoint %struct* %endp to i64
+
+ %diff = sub i64 %endi, %starti
+ ret i64 %diff
+; CHECK: movz x0, #48
+}
+
+define i64 @check_field() {
+; CHECK: check_field:
+ %starti = ptrtoint %struct* @var to i64
+
+ %endp = getelementptr %struct* @var, i64 0, i32 1
+ %endi = ptrtoint i128* %endp to i64
+
+ %diff = sub i64 %endi, %starti
+ ret i64 %diff
+; CHECK: movz x0, #16
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/illegal-float-ops.ll b/test/CodeGen/AArch64/illegal-float-ops.ll
new file mode 100644
index 0000000000..8c735dda3e
--- /dev/null
+++ b/test/CodeGen/AArch64/illegal-float-ops.ll
@@ -0,0 +1,221 @@
+; RUN: llc -march=aarch64 -verify-machineinstrs < %s | FileCheck %s
+
+@varfloat = global float 0.0
+@vardouble = global double 0.0
+@varfp128 = global fp128 zeroinitializer
+
+declare float @llvm.cos.f32(float)
+declare double @llvm.cos.f64(double)
+declare fp128 @llvm.cos.f128(fp128)
+
+define void @test_cos(float %float, double %double, fp128 %fp128) {
+; CHECK: test_cos:
+
+ %cosfloat = call float @llvm.cos.f32(float %float)
+ store float %cosfloat, float* @varfloat
+; CHECK: bl cosf
+
+ %cosdouble = call double @llvm.cos.f64(double %double)
+ store double %cosdouble, double* @vardouble
+; CHECK: bl cos
+
+ %cosfp128 = call fp128 @llvm.cos.f128(fp128 %fp128)
+ store fp128 %cosfp128, fp128* @varfp128
+; CHECK: bl cosl
+
+ ret void
+}
+
+declare float @llvm.exp.f32(float)
+declare double @llvm.exp.f64(double)
+declare fp128 @llvm.exp.f128(fp128)
+
+define void @test_exp(float %float, double %double, fp128 %fp128) {
+; CHECK: test_exp:
+
+ %expfloat = call float @llvm.exp.f32(float %float)
+ store float %expfloat, float* @varfloat
+; CHECK: bl expf
+
+ %expdouble = call double @llvm.exp.f64(double %double)
+ store double %expdouble, double* @vardouble
+; CHECK: bl exp
+
+ %expfp128 = call fp128 @llvm.exp.f128(fp128 %fp128)
+ store fp128 %expfp128, fp128* @varfp128
+; CHECK: bl expl
+
+ ret void
+}
+
+declare float @llvm.exp2.f32(float)
+declare double @llvm.exp2.f64(double)
+declare fp128 @llvm.exp2.f128(fp128)
+
+define void @test_exp2(float %float, double %double, fp128 %fp128) {
+; CHECK: test_exp2:
+
+ %exp2float = call float @llvm.exp2.f32(float %float)
+ store float %exp2float, float* @varfloat
+; CHECK: bl exp2f
+
+ %exp2double = call double @llvm.exp2.f64(double %double)
+ store double %exp2double, double* @vardouble
+; CHECK: bl exp2
+
+ %exp2fp128 = call fp128 @llvm.exp2.f128(fp128 %fp128)
+ store fp128 %exp2fp128, fp128* @varfp128
+; CHECK: bl exp2l
+ ret void
+
+}
+
+declare float @llvm.log.f32(float)
+declare double @llvm.log.f64(double)
+declare fp128 @llvm.log.f128(fp128)
+
+define void @test_log(float %float, double %double, fp128 %fp128) {
+; CHECK: test_log:
+
+ %logfloat = call float @llvm.log.f32(float %float)
+ store float %logfloat, float* @varfloat
+; CHECK: bl logf
+
+ %logdouble = call double @llvm.log.f64(double %double)
+ store double %logdouble, double* @vardouble
+; CHECK: bl log
+
+ %logfp128 = call fp128 @llvm.log.f128(fp128 %fp128)
+ store fp128 %logfp128, fp128* @varfp128
+; CHECK: bl logl
+
+ ret void
+}
+
+declare float @llvm.log2.f32(float)
+declare double @llvm.log2.f64(double)
+declare fp128 @llvm.log2.f128(fp128)
+
+define void @test_log2(float %float, double %double, fp128 %fp128) {
+; CHECK: test_log2:
+
+ %log2float = call float @llvm.log2.f32(float %float)
+ store float %log2float, float* @varfloat
+; CHECK: bl log2f
+
+ %log2double = call double @llvm.log2.f64(double %double)
+ store double %log2double, double* @vardouble
+; CHECK: bl log2
+
+ %log2fp128 = call fp128 @llvm.log2.f128(fp128 %fp128)
+ store fp128 %log2fp128, fp128* @varfp128
+; CHECK: bl log2l
+ ret void
+
+}
+
+declare float @llvm.log10.f32(float)
+declare double @llvm.log10.f64(double)
+declare fp128 @llvm.log10.f128(fp128)
+
+define void @test_log10(float %float, double %double, fp128 %fp128) {
+; CHECK: test_log10:
+
+ %log10float = call float @llvm.log10.f32(float %float)
+ store float %log10float, float* @varfloat
+; CHECK: bl log10f
+
+ %log10double = call double @llvm.log10.f64(double %double)
+ store double %log10double, double* @vardouble
+; CHECK: bl log10
+
+ %log10fp128 = call fp128 @llvm.log10.f128(fp128 %fp128)
+ store fp128 %log10fp128, fp128* @varfp128
+; CHECK: bl log10l
+
+ ret void
+}
+
+declare float @llvm.sin.f32(float)
+declare double @llvm.sin.f64(double)
+declare fp128 @llvm.sin.f128(fp128)
+
+define void @test_sin(float %float, double %double, fp128 %fp128) {
+; CHECK: test_sin:
+
+ %sinfloat = call float @llvm.sin.f32(float %float)
+ store float %sinfloat, float* @varfloat
+; CHECK: bl sinf
+
+ %sindouble = call double @llvm.sin.f64(double %double)
+ store double %sindouble, double* @vardouble
+; CHECK: bl sin
+
+ %sinfp128 = call fp128 @llvm.sin.f128(fp128 %fp128)
+ store fp128 %sinfp128, fp128* @varfp128
+; CHECK: bl sinl
+ ret void
+
+}
+
+declare float @llvm.pow.f32(float, float)
+declare double @llvm.pow.f64(double, double)
+declare fp128 @llvm.pow.f128(fp128, fp128)
+
+define void @test_pow(float %float, double %double, fp128 %fp128) {
+; CHECK: test_pow:
+
+ %powfloat = call float @llvm.pow.f32(float %float, float %float)
+ store float %powfloat, float* @varfloat
+; CHECK: bl powf
+
+ %powdouble = call double @llvm.pow.f64(double %double, double %double)
+ store double %powdouble, double* @vardouble
+; CHECK: bl pow
+
+ %powfp128 = call fp128 @llvm.pow.f128(fp128 %fp128, fp128 %fp128)
+ store fp128 %powfp128, fp128* @varfp128
+; CHECK: bl powl
+
+ ret void
+}
+
+declare float @llvm.powi.f32(float, i32)
+declare double @llvm.powi.f64(double, i32)
+declare fp128 @llvm.powi.f128(fp128, i32)
+
+define void @test_powi(float %float, double %double, i32 %exponent, fp128 %fp128) {
+; CHECK: test_powi:
+
+ %powifloat = call float @llvm.powi.f32(float %float, i32 %exponent)
+ store float %powifloat, float* @varfloat
+; CHECK: bl __powisf2
+
+ %powidouble = call double @llvm.powi.f64(double %double, i32 %exponent)
+ store double %powidouble, double* @vardouble
+; CHECK: bl __powidf2
+
+ %powifp128 = call fp128 @llvm.powi.f128(fp128 %fp128, i32 %exponent)
+ store fp128 %powifp128, fp128* @varfp128
+; CHECK: bl __powitf2
+ ret void
+
+}
+
+define void @test_frem(float %float, double %double, fp128 %fp128) {
+; CHECK: test_frem:
+
+ %fremfloat = frem float %float, %float
+ store float %fremfloat, float* @varfloat
+; CHECK: bl fmodf
+
+ %fremdouble = frem double %double, %double
+ store double %fremdouble, double* @vardouble
+; CHECK: bl fmod
+
+ %fremfp128 = frem fp128 %fp128, %fp128
+ store fp128 %fremfp128, fp128* @varfp128
+; CHECK: bl fmodl
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/init-array.ll b/test/CodeGen/AArch64/init-array.ll
new file mode 100644
index 0000000000..d80be8f3a6
--- /dev/null
+++ b/test/CodeGen/AArch64/init-array.ll
@@ -0,0 +1,9 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -use-init-array < %s | FileCheck %s
+
+define internal void @_GLOBAL__I_a() section ".text.startup" {
+ ret void
+}
+
+@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }]
+
+; CHECK: .section .init_array \ No newline at end of file
diff --git a/test/CodeGen/AArch64/inline-asm-constraints-badI.ll b/test/CodeGen/AArch64/inline-asm-constraints-badI.ll
new file mode 100644
index 0000000000..c300482d10
--- /dev/null
+++ b/test/CodeGen/AArch64/inline-asm-constraints-badI.ll
@@ -0,0 +1,7 @@
+; RUN: not llc -march=aarch64 < %s
+
+define void @foo() {
+ ; Out of range immediate for I.
+ call void asm sideeffect "add x0, x0, $0", "I"(i32 4096)
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/inline-asm-constraints-badK.ll b/test/CodeGen/AArch64/inline-asm-constraints-badK.ll
new file mode 100644
index 0000000000..2b5229cd9c
--- /dev/null
+++ b/test/CodeGen/AArch64/inline-asm-constraints-badK.ll
@@ -0,0 +1,7 @@
+; RUN: not llc -march=aarch64 < %s
+
+define void @foo() {
+ ; 32-bit bitpattern ending in 1101 can't be produced.
+ call void asm sideeffect "and w0, w0, $0", "K"(i32 13)
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll b/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll
new file mode 100644
index 0000000000..f0ad87a39b
--- /dev/null
+++ b/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll
@@ -0,0 +1,7 @@
+; RUN: not llc -march=aarch64 < %s
+
+define void @foo() {
+ ; 32-bit bitpattern ending in 1101 can't be produced.
+ call void asm sideeffect "and w0, w0, $0", "K"(i64 4294967296)
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/inline-asm-constraints-badL.ll b/test/CodeGen/AArch64/inline-asm-constraints-badL.ll
new file mode 100644
index 0000000000..90da9ba128
--- /dev/null
+++ b/test/CodeGen/AArch64/inline-asm-constraints-badL.ll
@@ -0,0 +1,7 @@
+; RUN: not llc -march=aarch64 < %s
+
+define void @foo() {
+ ; 32-bit bitpattern ending in 1101 can't be produced.
+ call void asm sideeffect "and x0, x0, $0", "L"(i32 13)
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/inline-asm-constraints.ll b/test/CodeGen/AArch64/inline-asm-constraints.ll
new file mode 100644
index 0000000000..fb3e392187
--- /dev/null
+++ b/test/CodeGen/AArch64/inline-asm-constraints.ll
@@ -0,0 +1,117 @@
+; RUN: llc -march=aarch64 < %s | FileCheck %s
+
+define i64 @test_inline_constraint_r(i64 %base, i32 %offset) {
+; CHECK: test_inline_constraint_r:
+ %val = call i64 asm "add $0, $1, $2, sxtw", "=r,r,r"(i64 %base, i32 %offset)
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw
+ ret i64 %val
+}
+
+define i16 @test_small_reg(i16 %lhs, i16 %rhs) {
+; CHECK: test_small_reg:
+ %val = call i16 asm sideeffect "add $0, $1, $2, sxth", "=r,r,r"(i16 %lhs, i16 %rhs)
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxth
+ ret i16 %val
+}
+
+define i64 @test_inline_constraint_r_imm(i64 %base, i32 %offset) {
+; CHECK: test_inline_constraint_r_imm:
+ %val = call i64 asm "add $0, $1, $2, sxtw", "=r,r,r"(i64 4, i32 12)
+; CHECK: movz [[FOUR:x[0-9]+]], #4
+; CHECK: movz [[TWELVE:w[0-9]+]], #12
+; CHECK: add {{x[0-9]+}}, [[FOUR]], [[TWELVE]], sxtw
+ ret i64 %val
+}
+
+; m is permitted to have a base/offset form. We don't do that
+; currently though.
+define i32 @test_inline_constraint_m(i32 *%ptr) {
+; CHECK: test_inline_constraint_m:
+ %val = call i32 asm "ldr $0, $1", "=r,m"(i32 *%ptr)
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
+ ret i32 %val
+}
+
+@arr = global [8 x i32] zeroinitializer
+
+; Q should *never* have base/offset form even if given the chance.
+define i32 @test_inline_constraint_Q(i32 *%ptr) {
+; CHECK: test_inline_constraint_Q:
+ %val = call i32 asm "ldr $0, $1", "=r,Q"(i32* getelementptr([8 x i32]* @arr, i32 0, i32 1))
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
+ ret i32 %val
+}
+
+@dump = global fp128 zeroinitializer
+
+define void @test_inline_constraint_I() {
+; CHECK: test_inline_constraint_I:
+ call void asm sideeffect "add x0, x0, $0", "I"(i32 0)
+ call void asm sideeffect "add x0, x0, $0", "I"(i64 4095)
+; CHECK: add x0, x0, #0
+; CHECK: add x0, x0, #4095
+
+ ret void
+}
+
+; Skip J because it's useless
+
+define void @test_inline_constraint_K() {
+; CHECK: test_inline_constraint_K:
+ call void asm sideeffect "and w0, w0, $0", "K"(i32 2863311530) ; = 0xaaaaaaaa
+ call void asm sideeffect "and w0, w0, $0", "K"(i32 65535)
+; CHECK: and w0, w0, #-1431655766
+; CHECK: and w0, w0, #65535
+
+ ret void
+}
+
+define void @test_inline_constraint_L() {
+; CHECK: test_inline_constraint_L:
+ call void asm sideeffect "and x0, x0, $0", "L"(i64 4294967296) ; = 0xaaaaaaaa
+ call void asm sideeffect "and x0, x0, $0", "L"(i64 65535)
+; CHECK: and x0, x0, #4294967296
+; CHECK: and x0, x0, #65535
+
+ ret void
+}
+
+; Skip M and N because we don't support MOV pseudo-instructions yet.
+
+@var = global i32 0
+
+define void @test_inline_constraint_S() {
+; CHECK: test_inline_constraint_S:
+ call void asm sideeffect "adrp x0, $0", "S"(i32* @var)
+ call void asm sideeffect "adrp x0, ${0:A}", "S"(i32* @var)
+ call void asm sideeffect "add x0, x0, ${0:L}", "S"(i32* @var)
+; CHECK: adrp x0, var
+; CHECK: adrp x0, var
+; CHECK: add x0, x0, #:lo12:var
+ ret void
+}
+
+define i32 @test_inline_constraint_S_label(i1 %in) {
+; CHECK: test_inline_constraint_S_label:
+ call void asm sideeffect "adr x0, $0", "S"(i8* blockaddress(@test_inline_constraint_S_label, %loc))
+; CHECK: adr x0, .Ltmp{{[0-9]+}}
+ br i1 %in, label %loc, label %loc2
+loc:
+ ret i32 0
+loc2:
+ ret i32 42
+}
+
+define void @test_inline_constraint_Y() {
+; CHECK: test_inline_constraint_Y:
+ call void asm sideeffect "fcmp s0, $0", "Y"(float 0.0)
+; CHECK: fcmp s0, #0.0
+ ret void
+}
+
+define void @test_inline_constraint_Z() {
+; CHECK: test_inline_constraint_Z:
+ call void asm sideeffect "cmp w0, $0", "Z"(i32 0)
+; CHECK: cmp w0, #0
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/inline-asm-modifiers.ll b/test/CodeGen/AArch64/inline-asm-modifiers.ll
new file mode 100644
index 0000000000..5b485aac3f
--- /dev/null
+++ b/test/CodeGen/AArch64/inline-asm-modifiers.ll
@@ -0,0 +1,125 @@
+; RUN: llc -march=aarch64 -relocation-model=pic < %s | FileCheck %s
+; RUN: llc -march=aarch64 -relocation-model=pic -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-ELF %s
+
+@var_simple = hidden global i32 0
+@var_got = global i32 0
+@var_tlsgd = thread_local global i32 0
+@var_tlsld = thread_local(localdynamic) global i32 0
+@var_tlsie = thread_local(initialexec) global i32 0
+@var_tlsle = thread_local(localexec) global i32 0
+
+define void @test_inline_modifier_L() nounwind {
+; CHECK: test_inline_modifier_L:
+ call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_simple)
+ call void asm sideeffect "ldr x0, [x0, ${0:L}]", "S,~{x0}"(i32* @var_got)
+ call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_tlsgd)
+ call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_tlsld)
+ call void asm sideeffect "ldr x0, [x0, ${0:L}]", "S,~{x0}"(i32* @var_tlsie)
+ call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_tlsle)
+; CHECK: add x0, x0, #:lo12:var_simple
+; CHECK: ldr x0, [x0, #:got_lo12:var_got]
+; CHECK: add x0, x0, #:tlsdesc_lo12:var_tlsgd
+; CHECK: add x0, x0, #:dtprel_lo12:var_tlsld
+; CHECK: ldr x0, [x0, #:gottprel_lo12:var_tlsie]
+; CHECK: add x0, x0, #:tprel_lo12:var_tlsle
+
+; CHECK-ELF: R_AARCH64_ADD_ABS_LO12_NC var_simple
+; CHECK-ELF: R_AARCH64_LD64_GOT_LO12_NC var_got
+; CHECK-ELF: R_AARCH64_TLSDESC_ADD_LO12_NC var_tlsgd
+; CHECK-ELF: R_AARCH64_TLSLD_ADD_DTPREL_LO12 var_tlsld
+; CHECK-ELF: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC var_tlsie
+; CHECK-ELF: R_AARCH64_TLSLE_ADD_TPREL_LO12 var_tlsle
+
+ ret void
+}
+
+define void @test_inline_modifier_G() nounwind {
+; CHECK: test_inline_modifier_G:
+ call void asm sideeffect "add x0, x0, ${0:G}, lsl #12", "S,~{x0}"(i32* @var_tlsld)
+ call void asm sideeffect "add x0, x0, ${0:G}, lsl #12", "S,~{x0}"(i32* @var_tlsle)
+; CHECK: add x0, x0, #:dtprel_hi12:var_tlsld, lsl #12
+; CHECK: add x0, x0, #:tprel_hi12:var_tlsle, lsl #12
+
+; CHECK-ELF: R_AARCH64_TLSLD_ADD_DTPREL_HI12 var_tlsld
+; CHECK-ELF: R_AARCH64_TLSLE_ADD_TPREL_HI12 var_tlsle
+
+ ret void
+}
+
+define void @test_inline_modifier_A() nounwind {
+; CHECK: test_inline_modifier_A:
+ call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_simple)
+ call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_got)
+ call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_tlsgd)
+ call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_tlsie)
+ ; N.b. All tprel and dtprel relocs are modified: lo12 or granules.
+; CHECK: adrp x0, var_simple
+; CHECK: adrp x0, :got:var_got
+; CHECK: adrp x0, :tlsdesc:var_tlsgd
+; CHECK: adrp x0, :gottprel:var_tlsie
+
+; CHECK-ELF: R_AARCH64_ADR_PREL_PG_HI21 var_simple
+; CHECK-ELF: R_AARCH64_ADR_GOT_PAGE var_got
+; CHECK-ELF: R_AARCH64_TLSDESC_ADR_PAGE var_tlsgd
+; CHECK-ELF: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 var_tlsie
+
+ ret void
+}
+
+define void @test_inline_modifier_wx(i32 %small, i64 %big) nounwind {
+; CHECK: test_inline_modifier_wx:
+ call i32 asm sideeffect "add $0, $0, $0", "=r,0"(i32 %small)
+ call i32 asm sideeffect "add ${0:w}, ${0:w}, ${0:w}", "=r,0"(i32 %small)
+ call i32 asm sideeffect "add ${0:x}, ${0:x}, ${0:x}", "=r,0"(i32 %small)
+; CHECK: //APP
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+
+ call i64 asm sideeffect "add $0, $0, $0", "=r,0"(i64 %big)
+ call i64 asm sideeffect "add ${0:w}, ${0:w}, ${0:w}", "=r,0"(i64 %big)
+ call i64 asm sideeffect "add ${0:x}, ${0:x}, ${0:x}", "=r,0"(i64 %big)
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+
+ call i32 asm sideeffect "add ${0:w}, ${1:w}, ${1:w}", "=r,r"(i32 0)
+ call i32 asm sideeffect "add ${0:x}, ${1:x}, ${1:x}", "=r,r"(i32 0)
+; CHECK: add {{w[0-9]+}}, wzr, wzr
+; CHECK: add {{x[0-9]+}}, xzr, xzr
+ ret void
+}
+
+define void @test_inline_modifier_bhsdq() nounwind {
+; CHECK: test_inline_modifier_bhsdq:
+ call float asm sideeffect "ldr ${0:b}, [sp]", "=w"()
+ call float asm sideeffect "ldr ${0:h}, [sp]", "=w"()
+ call float asm sideeffect "ldr ${0:s}, [sp]", "=w"()
+ call float asm sideeffect "ldr ${0:d}, [sp]", "=w"()
+ call float asm sideeffect "ldr ${0:q}, [sp]", "=w"()
+; CHECK: ldr b0, [sp]
+; CHECK: ldr h0, [sp]
+; CHECK: ldr s0, [sp]
+; CHECK: ldr d0, [sp]
+; CHECK: ldr q0, [sp]
+
+ call double asm sideeffect "ldr ${0:b}, [sp]", "=w"()
+ call double asm sideeffect "ldr ${0:h}, [sp]", "=w"()
+ call double asm sideeffect "ldr ${0:s}, [sp]", "=w"()
+ call double asm sideeffect "ldr ${0:d}, [sp]", "=w"()
+ call double asm sideeffect "ldr ${0:q}, [sp]", "=w"()
+; CHECK: ldr b0, [sp]
+; CHECK: ldr h0, [sp]
+; CHECK: ldr s0, [sp]
+; CHECK: ldr d0, [sp]
+; CHECK: ldr q0, [sp]
+ ret void
+}
+
+define void @test_inline_modifier_c() nounwind {
+; CHECK: test_inline_modifier_c:
+ call void asm sideeffect "adr x0, ${0:c}", "i"(i32 3)
+; CHECK: adr x0, 3
+
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/jump-table.ll b/test/CodeGen/AArch64/jump-table.ll
new file mode 100644
index 0000000000..037813398b
--- /dev/null
+++ b/test/CodeGen/AArch64/jump-table.ll
@@ -0,0 +1,56 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 -filetype=obj | elf-dump | FileCheck %s -check-prefix=CHECK-ELF
+
+define i32 @test_jumptable(i32 %in) {
+; CHECK: test_jumptable
+
+ switch i32 %in, label %def [
+ i32 0, label %lbl1
+ i32 1, label %lbl2
+ i32 2, label %lbl3
+ i32 4, label %lbl4
+ ]
+; CHECK: adrp [[JTPAGE:x[0-9]+]], .LJTI0_0
+; CHECK: add x[[JT:[0-9]+]], [[JTPAGE]], #:lo12:.LJTI0_0
+; CHECK: ldr [[DEST:x[0-9]+]], [x[[JT]], {{x[0-9]+}}, lsl #3]
+; CHECK: br [[DEST]]
+
+def:
+ ret i32 0
+
+lbl1:
+ ret i32 1
+
+lbl2:
+ ret i32 2
+
+lbl3:
+ ret i32 4
+
+lbl4:
+ ret i32 8
+
+}
+
+; CHECK: .rodata
+
+; CHECK: .LJTI0_0:
+; CHECK-NEXT: .xword
+; CHECK-NEXT: .xword
+; CHECK-NEXT: .xword
+; CHECK-NEXT: .xword
+; CHECK-NEXT: .xword
+
+; ELF tests:
+
+; First make sure we get a page/lo12 pair in .text to pick up the jump-table
+; CHECK-ELF: .rela.text
+; CHECK-ELF: ('r_sym', 0x00000008)
+; CHECK-ELF-NEXT: ('r_type', 0x00000113)
+; CHECK-ELF: ('r_sym', 0x00000008)
+; CHECK-ELF-NEXT: ('r_type', 0x00000115)
+
+; Also check the targets in .rodata are relocated
+; CHECK-ELF: .rela.rodata
+; CHECK-ELF: ('r_sym', 0x00000005)
+; CHECK-ELF-NEXT: ('r_type', 0x00000101) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/large-frame.ll b/test/CodeGen/AArch64/large-frame.ll
new file mode 100644
index 0000000000..e924461f87
--- /dev/null
+++ b/test/CodeGen/AArch64/large-frame.ll
@@ -0,0 +1,117 @@
+; RUN: llc -verify-machineinstrs -march=aarch64 < %s | FileCheck %s
+declare void @use_addr(i8*)
+
+@addr = global i8* null
+
+define void @test_bigframe() {
+; CHECK: test_bigframe:
+
+ %var1 = alloca i8, i32 20000000
+ %var2 = alloca i8, i32 16
+ %var3 = alloca i8, i32 20000000
+; CHECK: sub sp, sp, #496
+; CHECK: str x30, [sp, #488]
+; CHECK: ldr [[FRAMEOFFSET:x[0-9]+]], [[FRAMEOFFSET_CPI:.LCPI0_[0-9]+]]
+; CHECK: sub sp, sp, [[FRAMEOFFSET]]
+
+; CHECK: ldr [[VAR1OFFSET:x[0-9]+]], [[VAR1LOC_CPI:.LCPI0_[0-9]+]]
+; CHECK: add {{x[0-9]+}}, sp, [[VAR1OFFSET]]
+ store volatile i8* %var1, i8** @addr
+
+ %var1plus2 = getelementptr i8* %var1, i32 2
+ store volatile i8* %var1plus2, i8** @addr
+
+; CHECK: ldr [[VAR2OFFSET:x[0-9]+]], [[VAR2LOC_CPI:.LCPI0_[0-9]+]]
+; CHECK: add {{x[0-9]+}}, sp, [[VAR2OFFSET]]
+ store volatile i8* %var2, i8** @addr
+
+ %var2plus2 = getelementptr i8* %var2, i32 2
+ store volatile i8* %var2plus2, i8** @addr
+
+ store volatile i8* %var3, i8** @addr
+
+ %var3plus2 = getelementptr i8* %var3, i32 2
+ store volatile i8* %var3plus2, i8** @addr
+
+; CHECK: ldr [[FRAMEOFFSET:x[0-9]+]], [[FRAMEOFFSET_CPI]]
+; CHECK: add sp, sp, [[FRAMEOFFSET]]
+ ret void
+
+; CHECK: [[FRAMEOFFSET_CPI]]:
+; CHECK-NEXT: 39999536
+
+; CHECK: [[VAR1LOC_CPI]]:
+; CHECK-NEXT: 20000024
+
+; CHECK: [[VAR2LOC_CPI]]:
+; CHECK-NEXT: 20000008
+}
+
+define void @test_mediumframe() {
+; CHECK: test_mediumframe:
+ %var1 = alloca i8, i32 1000000
+ %var2 = alloca i8, i32 16
+ %var3 = alloca i8, i32 1000000
+; CHECK: sub sp, sp, #496
+; CHECK: str x30, [sp, #488]
+; CHECK: sub sp, sp, #688
+; CHECK-NEXT: sub sp, sp, #488, lsl #12
+
+ store volatile i8* %var1, i8** @addr
+; CHECK: add [[VAR1ADDR:x[0-9]+]], sp, #600
+; CHECK: add [[VAR1ADDR]], [[VAR1ADDR]], #244, lsl #12
+
+ %var1plus2 = getelementptr i8* %var1, i32 2
+ store volatile i8* %var1plus2, i8** @addr
+; CHECK: add [[VAR1PLUS2:x[0-9]+]], {{x[0-9]+}}, #2
+
+ store volatile i8* %var2, i8** @addr
+; CHECK: add [[VAR2ADDR:x[0-9]+]], sp, #584
+; CHECK: add [[VAR2ADDR]], [[VAR2ADDR]], #244, lsl #12
+
+ %var2plus2 = getelementptr i8* %var2, i32 2
+ store volatile i8* %var2plus2, i8** @addr
+; CHECK: add [[VAR2PLUS2:x[0-9]+]], {{x[0-9]+}}, #2
+
+ store volatile i8* %var3, i8** @addr
+
+ %var3plus2 = getelementptr i8* %var3, i32 2
+ store volatile i8* %var3plus2, i8** @addr
+
+; CHECK: add sp, sp, #688
+; CHECK: add sp, sp, #488, lsl #12
+; CHECK: ldr x30, [sp, #488]
+; CHECK: add sp, sp, #496
+ ret void
+}
+
+
+@bigspace = global [8 x i64] zeroinitializer
+
+; If temporary registers are allocated for adjustment, they should *not* clobber
+; argument registers.
+define void @test_tempallocation([8 x i64] %val) nounwind {
+; CHECK: test_tempallocation:
+ %var = alloca i8, i32 1000000
+; CHECK: sub sp, sp,
+
+; Make sure the prologue is reasonably efficient
+; CHECK-NEXT: stp x29, x30, [sp,
+; CHECK-NEXT: stp x25, x26, [sp,
+; CHECK-NEXT: stp x23, x24, [sp,
+; CHECK-NEXT: stp x21, x22, [sp,
+; CHECK-NEXT: stp x19, x20, [sp,
+
+; Make sure we don't trash an argument register
+; CHECK-NOT: ldr {{x[0-7]}}, .LCPI1
+; CHECK: sub sp, sp,
+
+; CHECK-NOT: ldr {{x[0-7]}}, .LCPI1
+
+; CHECK: bl use_addr
+ call void @use_addr(i8* %var)
+
+ store [8 x i64] %val, [8 x i64]* @bigspace
+ ret void
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/ldst-regoffset.ll b/test/CodeGen/AArch64/ldst-regoffset.ll
new file mode 100644
index 0000000000..13c682c655
--- /dev/null
+++ b/test/CodeGen/AArch64/ldst-regoffset.ll
@@ -0,0 +1,333 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var_8bit = global i8 0
+@var_16bit = global i16 0
+@var_32bit = global i32 0
+@var_64bit = global i64 0
+
+@var_float = global float 0.0
+@var_double = global double 0.0
+
+define void @ldst_8bit(i8* %base, i32 %off32, i64 %off64) {
+; CHECK: ldst_8bit:
+
+ %addr8_sxtw = getelementptr i8* %base, i32 %off32
+ %val8_sxtw = load volatile i8* %addr8_sxtw
+ %val32_signed = sext i8 %val8_sxtw to i32
+ store volatile i32 %val32_signed, i32* @var_32bit
+; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+
+ %addr_lsl = getelementptr i8* %base, i64 %off64
+ %val8_lsl = load volatile i8* %addr_lsl
+ %val32_unsigned = zext i8 %val8_lsl to i32
+ store volatile i32 %val32_unsigned, i32* @var_32bit
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
+
+ %addrint_uxtw = ptrtoint i8* %base to i64
+ %offset_uxtw = zext i32 %off32 to i64
+ %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
+ %addr_uxtw = inttoptr i64 %addrint1_uxtw to i8*
+ %val8_uxtw = load volatile i8* %addr_uxtw
+ %newval8 = add i8 %val8_uxtw, 1
+ store volatile i8 %newval8, i8* @var_8bit
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+
+ ret void
+}
+
+
+define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) {
+; CHECK: ldst_16bit:
+
+ %addr8_sxtwN = getelementptr i16* %base, i32 %off32
+ %val8_sxtwN = load volatile i16* %addr8_sxtwN
+ %val32_signed = sext i16 %val8_sxtwN to i32
+ store volatile i32 %val32_signed, i32* @var_32bit
+; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #1]
+
+ %addr_lslN = getelementptr i16* %base, i64 %off64
+ %val8_lslN = load volatile i16* %addr_lslN
+ %val32_unsigned = zext i16 %val8_lslN to i32
+ store volatile i32 %val32_unsigned, i32* @var_32bit
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #1]
+
+ %addrint_uxtw = ptrtoint i16* %base to i64
+ %offset_uxtw = zext i32 %off32 to i64
+ %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
+ %addr_uxtw = inttoptr i64 %addrint1_uxtw to i16*
+ %val8_uxtw = load volatile i16* %addr_uxtw
+ %newval8 = add i16 %val8_uxtw, 1
+ store volatile i16 %newval8, i16* @var_16bit
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+
+ %base_sxtw = ptrtoint i16* %base to i64
+ %offset_sxtw = sext i32 %off32 to i64
+ %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
+ %addr_sxtw = inttoptr i64 %addrint_sxtw to i16*
+ %val16_sxtw = load volatile i16* %addr_sxtw
+ %val64_signed = sext i16 %val16_sxtw to i64
+ store volatile i64 %val64_signed, i64* @var_64bit
+; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+
+
+ %base_lsl = ptrtoint i16* %base to i64
+ %addrint_lsl = add i64 %base_lsl, %off64
+ %addr_lsl = inttoptr i64 %addrint_lsl to i16*
+ %val16_lsl = load volatile i16* %addr_lsl
+ %val64_unsigned = zext i16 %val16_lsl to i64
+ store volatile i64 %val64_unsigned, i64* @var_64bit
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
+
+ %base_uxtwN = ptrtoint i16* %base to i64
+ %offset_uxtwN = zext i32 %off32 to i64
+ %offset2_uxtwN = shl i64 %offset_uxtwN, 1
+ %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
+ %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i16*
+ %val32 = load volatile i32* @var_32bit
+ %val16_trunc32 = trunc i32 %val32 to i16
+ store volatile i16 %val16_trunc32, i16* %addr_uxtwN
+; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #1]
+ ret void
+}
+
+define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) {
+; CHECK: ldst_32bit:
+
+ %addr_sxtwN = getelementptr i32* %base, i32 %off32
+ %val_sxtwN = load volatile i32* %addr_sxtwN
+ store volatile i32 %val_sxtwN, i32* @var_32bit
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #2]
+
+ %addr_lslN = getelementptr i32* %base, i64 %off64
+ %val_lslN = load volatile i32* %addr_lslN
+ store volatile i32 %val_lslN, i32* @var_32bit
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #2]
+
+ %addrint_uxtw = ptrtoint i32* %base to i64
+ %offset_uxtw = zext i32 %off32 to i64
+ %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
+ %addr_uxtw = inttoptr i64 %addrint1_uxtw to i32*
+ %val_uxtw = load volatile i32* %addr_uxtw
+ %newval8 = add i32 %val_uxtw, 1
+ store volatile i32 %newval8, i32* @var_32bit
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+
+
+ %base_sxtw = ptrtoint i32* %base to i64
+ %offset_sxtw = sext i32 %off32 to i64
+ %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
+ %addr_sxtw = inttoptr i64 %addrint_sxtw to i32*
+ %val16_sxtw = load volatile i32* %addr_sxtw
+ %val64_signed = sext i32 %val16_sxtw to i64
+ store volatile i64 %val64_signed, i64* @var_64bit
+; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+
+
+ %base_lsl = ptrtoint i32* %base to i64
+ %addrint_lsl = add i64 %base_lsl, %off64
+ %addr_lsl = inttoptr i64 %addrint_lsl to i32*
+ %val16_lsl = load volatile i32* %addr_lsl
+ %val64_unsigned = zext i32 %val16_lsl to i64
+ store volatile i64 %val64_unsigned, i64* @var_64bit
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
+
+ %base_uxtwN = ptrtoint i32* %base to i64
+ %offset_uxtwN = zext i32 %off32 to i64
+ %offset2_uxtwN = shl i64 %offset_uxtwN, 2
+ %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
+ %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i32*
+ %val32 = load volatile i32* @var_32bit
+ store volatile i32 %val32, i32* %addr_uxtwN
+; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
+ ret void
+}
+
+define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) {
+; CHECK: ldst_64bit:
+
+ %addr_sxtwN = getelementptr i64* %base, i32 %off32
+ %val_sxtwN = load volatile i64* %addr_sxtwN
+ store volatile i64 %val_sxtwN, i64* @var_64bit
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #3]
+
+ %addr_lslN = getelementptr i64* %base, i64 %off64
+ %val_lslN = load volatile i64* %addr_lslN
+ store volatile i64 %val_lslN, i64* @var_64bit
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #3]
+
+ %addrint_uxtw = ptrtoint i64* %base to i64
+ %offset_uxtw = zext i32 %off32 to i64
+ %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
+ %addr_uxtw = inttoptr i64 %addrint1_uxtw to i64*
+ %val8_uxtw = load volatile i64* %addr_uxtw
+ %newval8 = add i64 %val8_uxtw, 1
+ store volatile i64 %newval8, i64* @var_64bit
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+
+ %base_sxtw = ptrtoint i64* %base to i64
+ %offset_sxtw = sext i32 %off32 to i64
+ %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
+ %addr_sxtw = inttoptr i64 %addrint_sxtw to i64*
+ %val64_sxtw = load volatile i64* %addr_sxtw
+ store volatile i64 %val64_sxtw, i64* @var_64bit
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+
+ %base_lsl = ptrtoint i64* %base to i64
+ %addrint_lsl = add i64 %base_lsl, %off64
+ %addr_lsl = inttoptr i64 %addrint_lsl to i64*
+ %val64_lsl = load volatile i64* %addr_lsl
+ store volatile i64 %val64_lsl, i64* @var_64bit
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
+
+ %base_uxtwN = ptrtoint i64* %base to i64
+ %offset_uxtwN = zext i32 %off32 to i64
+ %offset2_uxtwN = shl i64 %offset_uxtwN, 3
+ %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
+ %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i64*
+ %val64 = load volatile i64* @var_64bit
+ store volatile i64 %val64, i64* %addr_uxtwN
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
+ ret void
+}
+
+define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
+; CHECK: ldst_float:
+
+ %addr_sxtwN = getelementptr float* %base, i32 %off32
+ %val_sxtwN = load volatile float* %addr_sxtwN
+ store volatile float %val_sxtwN, float* @var_float
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #2]
+
+ %addr_lslN = getelementptr float* %base, i64 %off64
+ %val_lslN = load volatile float* %addr_lslN
+ store volatile float %val_lslN, float* @var_float
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #2]
+
+ %addrint_uxtw = ptrtoint float* %base to i64
+ %offset_uxtw = zext i32 %off32 to i64
+ %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
+ %addr_uxtw = inttoptr i64 %addrint1_uxtw to float*
+ %val_uxtw = load volatile float* %addr_uxtw
+ store volatile float %val_uxtw, float* @var_float
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+
+ %base_sxtw = ptrtoint float* %base to i64
+ %offset_sxtw = sext i32 %off32 to i64
+ %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
+ %addr_sxtw = inttoptr i64 %addrint_sxtw to float*
+ %val64_sxtw = load volatile float* %addr_sxtw
+ store volatile float %val64_sxtw, float* @var_float
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+
+ %base_lsl = ptrtoint float* %base to i64
+ %addrint_lsl = add i64 %base_lsl, %off64
+ %addr_lsl = inttoptr i64 %addrint_lsl to float*
+ %val64_lsl = load volatile float* %addr_lsl
+ store volatile float %val64_lsl, float* @var_float
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
+
+ %base_uxtwN = ptrtoint float* %base to i64
+ %offset_uxtwN = zext i32 %off32 to i64
+ %offset2_uxtwN = shl i64 %offset_uxtwN, 2
+ %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
+ %addr_uxtwN = inttoptr i64 %addrint_uxtwN to float*
+ %val64 = load volatile float* @var_float
+ store volatile float %val64, float* %addr_uxtwN
+; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
+ ret void
+}
+
+define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
+; CHECK: ldst_double:
+
+ %addr_sxtwN = getelementptr double* %base, i32 %off32
+ %val_sxtwN = load volatile double* %addr_sxtwN
+ store volatile double %val_sxtwN, double* @var_double
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #3]
+
+ %addr_lslN = getelementptr double* %base, i64 %off64
+ %val_lslN = load volatile double* %addr_lslN
+ store volatile double %val_lslN, double* @var_double
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #3]
+
+ %addrint_uxtw = ptrtoint double* %base to i64
+ %offset_uxtw = zext i32 %off32 to i64
+ %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
+ %addr_uxtw = inttoptr i64 %addrint1_uxtw to double*
+ %val_uxtw = load volatile double* %addr_uxtw
+ store volatile double %val_uxtw, double* @var_double
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+
+ %base_sxtw = ptrtoint double* %base to i64
+ %offset_sxtw = sext i32 %off32 to i64
+ %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
+ %addr_sxtw = inttoptr i64 %addrint_sxtw to double*
+ %val64_sxtw = load volatile double* %addr_sxtw
+ store volatile double %val64_sxtw, double* @var_double
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+
+ %base_lsl = ptrtoint double* %base to i64
+ %addrint_lsl = add i64 %base_lsl, %off64
+ %addr_lsl = inttoptr i64 %addrint_lsl to double*
+ %val64_lsl = load volatile double* %addr_lsl
+ store volatile double %val64_lsl, double* @var_double
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
+
+ %base_uxtwN = ptrtoint double* %base to i64
+ %offset_uxtwN = zext i32 %off32 to i64
+ %offset2_uxtwN = shl i64 %offset_uxtwN, 3
+ %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
+ %addr_uxtwN = inttoptr i64 %addrint_uxtwN to double*
+ %val64 = load volatile double* @var_double
+ store volatile double %val64, double* %addr_uxtwN
+; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
+ ret void
+}
+
+
+define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
+; CHECK: ldst_128bit:
+
+ %addr_sxtwN = getelementptr fp128* %base, i32 %off32
+ %val_sxtwN = load volatile fp128* %addr_sxtwN
+ store volatile fp128 %val_sxtwN, fp128* %base
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
+
+ %addr_lslN = getelementptr fp128* %base, i64 %off64
+ %val_lslN = load volatile fp128* %addr_lslN
+ store volatile fp128 %val_lslN, fp128* %base
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #4]
+
+ %addrint_uxtw = ptrtoint fp128* %base to i64
+ %offset_uxtw = zext i32 %off32 to i64
+ %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
+ %addr_uxtw = inttoptr i64 %addrint1_uxtw to fp128*
+ %val_uxtw = load volatile fp128* %addr_uxtw
+ store volatile fp128 %val_uxtw, fp128* %base
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+
+ %base_sxtw = ptrtoint fp128* %base to i64
+ %offset_sxtw = sext i32 %off32 to i64
+ %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
+ %addr_sxtw = inttoptr i64 %addrint_sxtw to fp128*
+ %val64_sxtw = load volatile fp128* %addr_sxtw
+ store volatile fp128 %val64_sxtw, fp128* %base
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+
+ %base_lsl = ptrtoint fp128* %base to i64
+ %addrint_lsl = add i64 %base_lsl, %off64
+ %addr_lsl = inttoptr i64 %addrint_lsl to fp128*
+ %val64_lsl = load volatile fp128* %addr_lsl
+ store volatile fp128 %val64_lsl, fp128* %base
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
+
+ %base_uxtwN = ptrtoint fp128* %base to i64
+ %offset_uxtwN = zext i32 %off32 to i64
+ %offset2_uxtwN = shl i64 %offset_uxtwN, 4
+ %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
+ %addr_uxtwN = inttoptr i64 %addrint_uxtwN to fp128*
+ %val64 = load volatile fp128* %base
+ store volatile fp128 %val64, fp128* %addr_uxtwN
+; CHECK: str {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #4]
+ ret void
+}
diff --git a/test/CodeGen/AArch64/ldst-unscaledimm.ll b/test/CodeGen/AArch64/ldst-unscaledimm.ll
new file mode 100644
index 0000000000..dcc50ae11f
--- /dev/null
+++ b/test/CodeGen/AArch64/ldst-unscaledimm.ll
@@ -0,0 +1,218 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var_8bit = global i8 0
+@var_16bit = global i16 0
+@var_32bit = global i32 0
+@var_64bit = global i64 0
+
+@var_float = global float 0.0
+@var_double = global double 0.0
+
+@varptr = global i8* null
+
+define void @ldst_8bit() {
+; CHECK: ldst_8bit:
+
+; No architectural support for loads to 16-bit or 8-bit since we
+; promote i8 during lowering.
+ %addr_8bit = load i8** @varptr
+
+; match a sign-extending load 8-bit -> 32-bit
+ %addr_sext32 = getelementptr i8* %addr_8bit, i64 -256
+ %val8_sext32 = load volatile i8* %addr_sext32
+ %val32_signed = sext i8 %val8_sext32 to i32
+ store volatile i32 %val32_signed, i32* @var_32bit
+; CHECK: ldursb {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
+
+; match a zero-extending load volatile 8-bit -> 32-bit
+ %addr_zext32 = getelementptr i8* %addr_8bit, i64 -12
+ %val8_zext32 = load volatile i8* %addr_zext32
+ %val32_unsigned = zext i8 %val8_zext32 to i32
+ store volatile i32 %val32_unsigned, i32* @var_32bit
+; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-12]
+
+; match an any-extending load volatile 8-bit -> 32-bit
+ %addr_anyext = getelementptr i8* %addr_8bit, i64 -1
+ %val8_anyext = load volatile i8* %addr_anyext
+ %newval8 = add i8 %val8_anyext, 1
+ store volatile i8 %newval8, i8* @var_8bit
+; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
+
+; match a sign-extending load volatile 8-bit -> 64-bit
+ %addr_sext64 = getelementptr i8* %addr_8bit, i64 -5
+ %val8_sext64 = load volatile i8* %addr_sext64
+ %val64_signed = sext i8 %val8_sext64 to i64
+ store volatile i64 %val64_signed, i64* @var_64bit
+; CHECK: ldursb {{x[0-9]+}}, [{{x[0-9]+}}, #-5]
+
+; match a zero-extending load volatile 8-bit -> 64-bit.
+; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
+; of x0 so it's identical to load volatileing to 32-bits.
+ %addr_zext64 = getelementptr i8* %addr_8bit, i64 -9
+ %val8_zext64 = load volatile i8* %addr_zext64
+ %val64_unsigned = zext i8 %val8_zext64 to i64
+ store volatile i64 %val64_unsigned, i64* @var_64bit
+; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-9]
+
+; truncating store volatile 32-bits to 8-bits
+ %addr_trunc32 = getelementptr i8* %addr_8bit, i64 -256
+ %val32 = load volatile i32* @var_32bit
+ %val8_trunc32 = trunc i32 %val32 to i8
+ store volatile i8 %val8_trunc32, i8* %addr_trunc32
+; CHECK: sturb {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
+
+; truncating store volatile 64-bits to 8-bits
+ %addr_trunc64 = getelementptr i8* %addr_8bit, i64 -1
+ %val64 = load volatile i64* @var_64bit
+ %val8_trunc64 = trunc i64 %val64 to i8
+ store volatile i8 %val8_trunc64, i8* %addr_trunc64
+; CHECK: sturb {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
+
+ ret void
+}
+
+define void @ldst_16bit() {
+; CHECK: ldst_16bit:
+
+; No architectural support for loads to 16-bit or 16-bit since we
+; promote i16 during lowering.
+ %addr_8bit = load i8** @varptr
+
+; match a sign-extending load 16-bit -> 32-bit
+ %addr8_sext32 = getelementptr i8* %addr_8bit, i64 -256
+ %addr_sext32 = bitcast i8* %addr8_sext32 to i16*
+ %val16_sext32 = load volatile i16* %addr_sext32
+ %val32_signed = sext i16 %val16_sext32 to i32
+ store volatile i32 %val32_signed, i32* @var_32bit
+; CHECK: ldursh {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
+
+; match a zero-extending load volatile 16-bit -> 32-bit. With offset that would be unaligned.
+ %addr8_zext32 = getelementptr i8* %addr_8bit, i64 15
+ %addr_zext32 = bitcast i8* %addr8_zext32 to i16*
+ %val16_zext32 = load volatile i16* %addr_zext32
+ %val32_unsigned = zext i16 %val16_zext32 to i32
+ store volatile i32 %val32_unsigned, i32* @var_32bit
+; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #15]
+
+; match an any-extending load volatile 16-bit -> 32-bit
+ %addr8_anyext = getelementptr i8* %addr_8bit, i64 -1
+ %addr_anyext = bitcast i8* %addr8_anyext to i16*
+ %val16_anyext = load volatile i16* %addr_anyext
+ %newval16 = add i16 %val16_anyext, 1
+ store volatile i16 %newval16, i16* @var_16bit
+; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
+
+; match a sign-extending load volatile 16-bit -> 64-bit
+ %addr8_sext64 = getelementptr i8* %addr_8bit, i64 -5
+ %addr_sext64 = bitcast i8* %addr8_sext64 to i16*
+ %val16_sext64 = load volatile i16* %addr_sext64
+ %val64_signed = sext i16 %val16_sext64 to i64
+ store volatile i64 %val64_signed, i64* @var_64bit
+; CHECK: ldursh {{x[0-9]+}}, [{{x[0-9]+}}, #-5]
+
+; match a zero-extending load volatile 16-bit -> 64-bit.
+; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
+; of x0 so it's identical to load volatileing to 32-bits.
+ %addr8_zext64 = getelementptr i8* %addr_8bit, i64 9
+ %addr_zext64 = bitcast i8* %addr8_zext64 to i16*
+ %val16_zext64 = load volatile i16* %addr_zext64
+ %val64_unsigned = zext i16 %val16_zext64 to i64
+ store volatile i64 %val64_unsigned, i64* @var_64bit
+; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #9]
+
+; truncating store volatile 32-bits to 16-bits
+ %addr8_trunc32 = getelementptr i8* %addr_8bit, i64 -256
+ %addr_trunc32 = bitcast i8* %addr8_trunc32 to i16*
+ %val32 = load volatile i32* @var_32bit
+ %val16_trunc32 = trunc i32 %val32 to i16
+ store volatile i16 %val16_trunc32, i16* %addr_trunc32
+; CHECK: sturh {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
+
+; truncating store volatile 64-bits to 16-bits
+ %addr8_trunc64 = getelementptr i8* %addr_8bit, i64 -1
+ %addr_trunc64 = bitcast i8* %addr8_trunc64 to i16*
+ %val64 = load volatile i64* @var_64bit
+ %val16_trunc64 = trunc i64 %val64 to i16
+ store volatile i16 %val16_trunc64, i16* %addr_trunc64
+; CHECK: sturh {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
+
+ ret void
+}
+
+define void @ldst_32bit() {
+; CHECK: ldst_32bit:
+
+ %addr_8bit = load i8** @varptr
+
+; Straight 32-bit load/store
+ %addr32_8_noext = getelementptr i8* %addr_8bit, i64 1
+ %addr32_noext = bitcast i8* %addr32_8_noext to i32*
+ %val32_noext = load volatile i32* %addr32_noext
+ store volatile i32 %val32_noext, i32* %addr32_noext
+; CHECK: ldur {{w[0-9]+}}, [{{x[0-9]+}}, #1]
+; CHECK: stur {{w[0-9]+}}, [{{x[0-9]+}}, #1]
+
+; Zero-extension to 64-bits
+ %addr32_8_zext = getelementptr i8* %addr_8bit, i64 -256
+ %addr32_zext = bitcast i8* %addr32_8_zext to i32*
+ %val32_zext = load volatile i32* %addr32_zext
+ %val64_unsigned = zext i32 %val32_zext to i64
+ store volatile i64 %val64_unsigned, i64* @var_64bit
+; CHECK: ldur {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+
+; Sign-extension to 64-bits
+ %addr32_8_sext = getelementptr i8* %addr_8bit, i64 -12
+ %addr32_sext = bitcast i8* %addr32_8_sext to i32*
+ %val32_sext = load volatile i32* %addr32_sext
+ %val64_signed = sext i32 %val32_sext to i64
+ store volatile i64 %val64_signed, i64* @var_64bit
+; CHECK: ldursw {{x[0-9]+}}, [{{x[0-9]+}}, #-12]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+
+; Truncation from 64-bits
+ %addr64_8_trunc = getelementptr i8* %addr_8bit, i64 255
+ %addr64_trunc = bitcast i8* %addr64_8_trunc to i64*
+ %addr32_8_trunc = getelementptr i8* %addr_8bit, i64 -20
+ %addr32_trunc = bitcast i8* %addr32_8_trunc to i32*
+
+ %val64_trunc = load volatile i64* %addr64_trunc
+ %val32_trunc = trunc i64 %val64_trunc to i32
+ store volatile i32 %val32_trunc, i32* %addr32_trunc
+; CHECK: ldur {{x[0-9]+}}, [{{x[0-9]+}}, #255]
+; CHECK: stur {{w[0-9]+}}, [{{x[0-9]+}}, #-20]
+
+ ret void
+}
+
+define void @ldst_float() {
+; CHECK: ldst_float:
+
+ %addr_8bit = load i8** @varptr
+ %addrfp_8 = getelementptr i8* %addr_8bit, i64 -5
+ %addrfp = bitcast i8* %addrfp_8 to float*
+
+ %valfp = load volatile float* %addrfp
+; CHECK: ldur {{s[0-9]+}}, [{{x[0-9]+}}, #-5]
+
+ store volatile float %valfp, float* %addrfp
+; CHECK: stur {{s[0-9]+}}, [{{x[0-9]+}}, #-5]
+
+ ret void
+}
+
+define void @ldst_double() {
+; CHECK: ldst_double:
+
+ %addr_8bit = load i8** @varptr
+ %addrfp_8 = getelementptr i8* %addr_8bit, i64 4
+ %addrfp = bitcast i8* %addrfp_8 to double*
+
+ %valfp = load volatile double* %addrfp
+; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #4]
+
+ store volatile double %valfp, double* %addrfp
+; CHECK: stur {{d[0-9]+}}, [{{x[0-9]+}}, #4]
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/ldst-unsignedimm.ll b/test/CodeGen/AArch64/ldst-unsignedimm.ll
new file mode 100644
index 0000000000..aa513f507f
--- /dev/null
+++ b/test/CodeGen/AArch64/ldst-unsignedimm.ll
@@ -0,0 +1,251 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var_8bit = global i8 0
+@var_16bit = global i16 0
+@var_32bit = global i32 0
+@var_64bit = global i64 0
+
+@var_float = global float 0.0
+@var_double = global double 0.0
+
+define void @ldst_8bit() {
+; CHECK: ldst_8bit:
+
+; No architectural support for loads to 16-bit or 8-bit since we
+; promote i8 during lowering.
+
+; match a sign-extending load 8-bit -> 32-bit
+ %val8_sext32 = load volatile i8* @var_8bit
+ %val32_signed = sext i8 %val8_sext32 to i32
+ store volatile i32 %val32_signed, i32* @var_32bit
+; CHECK: adrp {{x[0-9]+}}, var_8bit
+; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+
+; match a zero-extending load volatile 8-bit -> 32-bit
+ %val8_zext32 = load volatile i8* @var_8bit
+ %val32_unsigned = zext i8 %val8_zext32 to i32
+ store volatile i32 %val32_unsigned, i32* @var_32bit
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+
+; match an any-extending load volatile 8-bit -> 32-bit
+ %val8_anyext = load volatile i8* @var_8bit
+ %newval8 = add i8 %val8_anyext, 1
+ store volatile i8 %newval8, i8* @var_8bit
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+
+; match a sign-extending load volatile 8-bit -> 64-bit
+ %val8_sext64 = load volatile i8* @var_8bit
+ %val64_signed = sext i8 %val8_sext64 to i64
+ store volatile i64 %val64_signed, i64* @var_64bit
+; CHECK: ldrsb {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+
+; match a zero-extending load volatile 8-bit -> 64-bit.
+; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
+; of x0 so it's identical to load volatileing to 32-bits.
+ %val8_zext64 = load volatile i8* @var_8bit
+ %val64_unsigned = zext i8 %val8_zext64 to i64
+ store volatile i64 %val64_unsigned, i64* @var_64bit
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+
+; truncating store volatile 32-bits to 8-bits
+ %val32 = load volatile i32* @var_32bit
+ %val8_trunc32 = trunc i32 %val32 to i8
+ store volatile i8 %val8_trunc32, i8* @var_8bit
+; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+
+; truncating store volatile 64-bits to 8-bits
+ %val64 = load volatile i64* @var_64bit
+ %val8_trunc64 = trunc i64 %val64 to i8
+ store volatile i8 %val8_trunc64, i8* @var_8bit
+; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+
+ ret void
+}
+
+define void @ldst_16bit() {
+; CHECK: ldst_16bit:
+
+; No architectural support for load volatiles to 16-bit promote i16 during
+; lowering.
+
+; match a sign-extending load volatile 16-bit -> 32-bit
+ %val16_sext32 = load volatile i16* @var_16bit
+ %val32_signed = sext i16 %val16_sext32 to i32
+ store volatile i32 %val32_signed, i32* @var_32bit
+; CHECK: adrp {{x[0-9]+}}, var_16bit
+; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+
+; match a zero-extending load volatile 16-bit -> 32-bit
+ %val16_zext32 = load volatile i16* @var_16bit
+ %val32_unsigned = zext i16 %val16_zext32 to i32
+ store volatile i32 %val32_unsigned, i32* @var_32bit
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+
+; match an any-extending load volatile 16-bit -> 32-bit
+ %val16_anyext = load volatile i16* @var_16bit
+ %newval16 = add i16 %val16_anyext, 1
+ store volatile i16 %newval16, i16* @var_16bit
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+
+; match a sign-extending load volatile 16-bit -> 64-bit
+ %val16_sext64 = load volatile i16* @var_16bit
+ %val64_signed = sext i16 %val16_sext64 to i64
+ store volatile i64 %val64_signed, i64* @var_64bit
+; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+
+; match a zero-extending load volatile 16-bit -> 64-bit.
+; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
+; of x0 so it's identical to load volatileing to 32-bits.
+ %val16_zext64 = load volatile i16* @var_16bit
+ %val64_unsigned = zext i16 %val16_zext64 to i64
+ store volatile i64 %val64_unsigned, i64* @var_64bit
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+
+; truncating store volatile 32-bits to 16-bits
+ %val32 = load volatile i32* @var_32bit
+ %val16_trunc32 = trunc i32 %val32 to i16
+ store volatile i16 %val16_trunc32, i16* @var_16bit
+; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+
+; truncating store volatile 64-bits to 16-bits
+ %val64 = load volatile i64* @var_64bit
+ %val16_trunc64 = trunc i64 %val64 to i16
+ store volatile i16 %val16_trunc64, i16* @var_16bit
+; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+
+ ret void
+}
+
+define void @ldst_32bit() {
+; CHECK: ldst_32bit:
+
+; Straight 32-bit load/store
+ %val32_noext = load volatile i32* @var_32bit
+ store volatile i32 %val32_noext, i32* @var_32bit
+; CHECK: adrp {{x[0-9]+}}, var_32bit
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
+; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
+
+; Zero-extension to 64-bits
+ %val32_zext = load volatile i32* @var_32bit
+ %val64_unsigned = zext i32 %val32_zext to i64
+ store volatile i64 %val64_unsigned, i64* @var_64bit
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+
+; Sign-extension to 64-bits
+ %val32_sext = load volatile i32* @var_32bit
+ %val64_signed = sext i32 %val32_sext to i64
+ store volatile i64 %val64_signed, i64* @var_64bit
+; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+
+; Truncation from 64-bits
+ %val64_trunc = load volatile i64* @var_64bit
+ %val32_trunc = trunc i64 %val64_trunc to i32
+ store volatile i32 %val32_trunc, i32* @var_32bit
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
+
+ ret void
+}
+
+@arr8 = global i8* null
+@arr16 = global i16* null
+@arr32 = global i32* null
+@arr64 = global i64* null
+
+; Now check that our selection copes with accesses more complex than a
+; single symbol. Permitted offsets should be folded into the loads and
+; stores. Since all forms use the same Operand it's only necessary to
+; check the various access-sizes involved.
+
+define void @ldst_complex_offsets() {
+; CHECK: ldst_complex_offsets
+ %arr8_addr = load volatile i8** @arr8
+; CHECK: adrp {{x[0-9]+}}, arr8
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:arr8]
+
+ %arr8_sub1_addr = getelementptr i8* %arr8_addr, i64 1
+ %arr8_sub1 = load volatile i8* %arr8_sub1_addr
+ store volatile i8 %arr8_sub1, i8* @var_8bit
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #1]
+
+ %arr8_sub4095_addr = getelementptr i8* %arr8_addr, i64 4095
+ %arr8_sub4095 = load volatile i8* %arr8_sub4095_addr
+ store volatile i8 %arr8_sub4095, i8* @var_8bit
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #4095]
+
+
+ %arr16_addr = load volatile i16** @arr16
+; CHECK: adrp {{x[0-9]+}}, arr16
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:arr16]
+
+ %arr16_sub1_addr = getelementptr i16* %arr16_addr, i64 1
+ %arr16_sub1 = load volatile i16* %arr16_sub1_addr
+ store volatile i16 %arr16_sub1, i16* @var_16bit
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #2]
+
+ %arr16_sub4095_addr = getelementptr i16* %arr16_addr, i64 4095
+ %arr16_sub4095 = load volatile i16* %arr16_sub4095_addr
+ store volatile i16 %arr16_sub4095, i16* @var_16bit
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #8190]
+
+
+ %arr32_addr = load volatile i32** @arr32
+; CHECK: adrp {{x[0-9]+}}, arr32
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:arr32]
+
+ %arr32_sub1_addr = getelementptr i32* %arr32_addr, i64 1
+ %arr32_sub1 = load volatile i32* %arr32_sub1_addr
+ store volatile i32 %arr32_sub1, i32* @var_32bit
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #4]
+
+ %arr32_sub4095_addr = getelementptr i32* %arr32_addr, i64 4095
+ %arr32_sub4095 = load volatile i32* %arr32_sub4095_addr
+ store volatile i32 %arr32_sub4095, i32* @var_32bit
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #16380]
+
+
+ %arr64_addr = load volatile i64** @arr64
+; CHECK: adrp {{x[0-9]+}}, arr64
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:arr64]
+
+ %arr64_sub1_addr = getelementptr i64* %arr64_addr, i64 1
+ %arr64_sub1 = load volatile i64* %arr64_sub1_addr
+ store volatile i64 %arr64_sub1, i64* @var_64bit
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #8]
+
+ %arr64_sub4095_addr = getelementptr i64* %arr64_addr, i64 4095
+ %arr64_sub4095 = load volatile i64* %arr64_sub4095_addr
+ store volatile i64 %arr64_sub4095, i64* @var_64bit
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #32760]
+
+ ret void
+}
+
+define void @ldst_float() {
+; CHECK: ldst_float:
+
+ %valfp = load volatile float* @var_float
+; CHECK: adrp {{x[0-9]+}}, var_float
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_float]
+
+ store volatile float %valfp, float* @var_float
+; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_float]
+
+ ret void
+}
+
+define void @ldst_double() {
+; CHECK: ldst_double:
+
+ %valfp = load volatile double* @var_double
+; CHECK: adrp {{x[0-9]+}}, var_double
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_double]
+
+ store volatile double %valfp, double* @var_double
+; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_double]
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/lit.local.cfg b/test/CodeGen/AArch64/lit.local.cfg
new file mode 100644
index 0000000000..c5ce2411ed
--- /dev/null
+++ b/test/CodeGen/AArch64/lit.local.cfg
@@ -0,0 +1,6 @@
+config.suffixes = ['.ll', '.c', '.cpp']
+
+targets = set(config.root.targets_to_build.split())
+if not 'AArch64' in targets:
+ config.unsupported = True
+
diff --git a/test/CodeGen/AArch64/literal_pools.ll b/test/CodeGen/AArch64/literal_pools.ll
new file mode 100644
index 0000000000..370d65cdf6
--- /dev/null
+++ b/test/CodeGen/AArch64/literal_pools.ll
@@ -0,0 +1,49 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @foo() {
+; CHECK: foo:
+ %val32 = load i32* @var32
+ %val64 = load i64* @var64
+
+ %val32_lit32 = and i32 %val32, 123456785
+ store volatile i32 %val32_lit32, i32* @var32
+; CHECK: ldr {{w[0-9]+}}, .LCPI0
+
+ %val64_lit32 = and i64 %val64, 305402420
+ store volatile i64 %val64_lit32, i64* @var64
+; CHECK: ldr {{w[0-9]+}}, .LCPI0
+
+ %val64_lit32signed = and i64 %val64, -12345678
+ store volatile i64 %val64_lit32signed, i64* @var64
+; CHECK: ldrsw {{x[0-9]+}}, .LCPI0
+
+ %val64_lit64 = and i64 %val64, 1234567898765432
+ store volatile i64 %val64_lit64, i64* @var64
+; CHECK: ldr {{x[0-9]+}}, .LCPI0
+
+ ret void
+}
+
+@varfloat = global float 0.0
+@vardouble = global double 0.0
+
+define void @floating_lits() {
+; CHECK: floating_lits:
+
+ %floatval = load float* @varfloat
+ %newfloat = fadd float %floatval, 128.0
+; CHECK: ldr {{s[0-9]+}}, .LCPI1
+; CHECK: fadd
+ store float %newfloat, float* @varfloat
+
+ %doubleval = load double* @vardouble
+ %newdouble = fadd double %doubleval, 129.0
+; CHECK: ldr {{d[0-9]+}}, .LCPI1
+; CHECK: fadd
+ store double %newdouble, double* @vardouble
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/local_vars.ll b/test/CodeGen/AArch64/local_vars.ll
new file mode 100644
index 0000000000..c9826053b0
--- /dev/null
+++ b/test/CodeGen/AArch64/local_vars.ll
@@ -0,0 +1,57 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 -O0 | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 -O0 -disable-fp-elim | FileCheck -check-prefix CHECK-WITHFP %s
+
+; Make sure a reasonably sane prologue and epilogue are
+; generated. This test is not robust in the face of an frame-handling
+; evolving, but still has value for unrelated changes, I
+; believe.
+;
+; In particular, it will fail when ldp/stp are used for frame setup,
+; when FP-elim is implemented, and when addressing from FP is
+; implemented.
+
+@var = global i64 0
+@local_addr = global i64* null
+
+declare void @foo()
+
+define void @trivial_func() nounwind {
+; CHECK: trivial_func: // @trivial_func
+; CHECK-NEXT: // BB#0
+; CHECK-NEXT: ret
+
+ ret void
+}
+
+define void @trivial_fp_func() {
+; CHECK-WITHFP: trivial_fp_func:
+
+; CHECK-WITHFP: sub sp, sp, #16
+; CHECK-WITHFP: stp x29, x30, [sp]
+; CHECK-WITHFP-NEXT: mov x29, sp
+
+; Dont't really care, but it would be a Bad Thing if this came after the epilogue.
+; CHECK: bl foo
+ call void @foo()
+ ret void
+
+; CHECK-WITHFP: ldp x29, x30, [sp]
+; CHECK-WITHFP: add sp, sp, #16
+
+; CHECK-WITHFP: ret
+}
+
+define void @stack_local() {
+ %local_var = alloca i64
+; CHECK: stack_local:
+; CHECK: sub sp, sp, #16
+
+ %val = load i64* @var
+ store i64 %val, i64* %local_var
+; CHECK: str {{x[0-9]+}}, [sp, #{{[0-9]+}}]
+
+ store i64* %local_var, i64** @local_addr
+; CHECK: add {{x[0-9]+}}, sp, #{{[0-9]+}}
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/logical-imm.ll b/test/CodeGen/AArch64/logical-imm.ll
new file mode 100644
index 0000000000..54c14dcd00
--- /dev/null
+++ b/test/CodeGen/AArch64/logical-imm.ll
@@ -0,0 +1,84 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @test_and(i32 %in32, i64 %in64) {
+; CHECK: test_and:
+
+ %val0 = and i32 %in32, 2863311530
+ store volatile i32 %val0, i32* @var32
+; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xaaaaaaaa
+
+ %val1 = and i32 %in32, 4293984240
+ store volatile i32 %val1, i32* @var32
+; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xfff0fff0
+
+ %val2 = and i64 %in64, 9331882296111890817
+ store volatile i64 %val2, i64* @var64
+; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0x8181818181818181
+
+ %val3 = and i64 %in64, 18429855317404942275
+ store volatile i64 %val3, i64* @var64
+; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffc3ffc3ffc3ffc3
+
+ ret void
+}
+
+define void @test_orr(i32 %in32, i64 %in64) {
+; CHECK: test_orr:
+
+ %val0 = or i32 %in32, 2863311530
+ store volatile i32 %val0, i32* @var32
+; CHECK: orr {{w[0-9]+}}, {{w[0-9]+}}, #0xaaaaaaaa
+
+ %val1 = or i32 %in32, 4293984240
+ store volatile i32 %val1, i32* @var32
+; CHECK: orr {{w[0-9]+}}, {{w[0-9]+}}, #0xfff0fff0
+
+ %val2 = or i64 %in64, 9331882296111890817
+ store volatile i64 %val2, i64* @var64
+; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, #0x8181818181818181
+
+ %val3 = or i64 %in64, 18429855317404942275
+ store volatile i64 %val3, i64* @var64
+; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, #0xffc3ffc3ffc3ffc3
+
+ ret void
+}
+
+define void @test_eor(i32 %in32, i64 %in64) {
+; CHECK: test_eor:
+
+ %val0 = xor i32 %in32, 2863311530
+ store volatile i32 %val0, i32* @var32
+; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, #0xaaaaaaaa
+
+ %val1 = xor i32 %in32, 4293984240
+ store volatile i32 %val1, i32* @var32
+; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, #0xfff0fff0
+
+ %val2 = xor i64 %in64, 9331882296111890817
+ store volatile i64 %val2, i64* @var64
+; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, #0x8181818181818181
+
+ %val3 = xor i64 %in64, 18429855317404942275
+ store volatile i64 %val3, i64* @var64
+; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, #0xffc3ffc3ffc3ffc3
+
+ ret void
+}
+
+define void @test_mov(i32 %in32, i64 %in64) {
+; CHECK: test_mov:
+ %val0 = add i32 %in32, 2863311530
+ store i32 %val0, i32* @var32
+; CHECK: orr {{w[0-9]+}}, wzr, #0xaaaaaaaa
+
+ %val1 = add i64 %in64, 11068046444225730969
+ store i64 %val1, i64* @var64
+; CHECK: orr {{x[0-9]+}}, xzr, #0x9999999999999999
+
+ ret void
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/logical_shifted_reg.ll b/test/CodeGen/AArch64/logical_shifted_reg.ll
new file mode 100644
index 0000000000..739381d344
--- /dev/null
+++ b/test/CodeGen/AArch64/logical_shifted_reg.ll
@@ -0,0 +1,224 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 -O0 | FileCheck %s
+
+@var1_32 = global i32 0
+@var2_32 = global i32 0
+
+@var1_64 = global i64 0
+@var2_64 = global i64 0
+
+define void @logical_32bit() {
+; CHECK: logical_32bit:
+ %val1 = load i32* @var1_32
+ %val2 = load i32* @var2_32
+
+ ; First check basic and/bic/or/orn/eor/eon patterns with no shift
+ %neg_val2 = xor i32 -1, %val2
+
+ %and_noshift = and i32 %val1, %val2
+; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %and_noshift, i32* @var1_32
+ %bic_noshift = and i32 %neg_val2, %val1
+; CHECK: bic {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %bic_noshift, i32* @var1_32
+
+ %or_noshift = or i32 %val1, %val2
+; CHECK: orr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %or_noshift, i32* @var1_32
+ %orn_noshift = or i32 %neg_val2, %val1
+; CHECK: orn {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %orn_noshift, i32* @var1_32
+
+ %xor_noshift = xor i32 %val1, %val2
+; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %xor_noshift, i32* @var1_32
+ %xorn_noshift = xor i32 %neg_val2, %val1
+; CHECK: eon {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ store volatile i32 %xorn_noshift, i32* @var1_32
+
+ ; Check the maximum shift on each
+ %operand_lsl31 = shl i32 %val2, 31
+ %neg_operand_lsl31 = xor i32 -1, %operand_lsl31
+
+ %and_lsl31 = and i32 %val1, %operand_lsl31
+; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
+ store volatile i32 %and_lsl31, i32* @var1_32
+ %bic_lsl31 = and i32 %val1, %neg_operand_lsl31
+; CHECK: bic {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
+ store volatile i32 %bic_lsl31, i32* @var1_32
+
+ %or_lsl31 = or i32 %val1, %operand_lsl31
+; CHECK: orr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
+ store volatile i32 %or_lsl31, i32* @var1_32
+ %orn_lsl31 = or i32 %val1, %neg_operand_lsl31
+; CHECK: orn {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
+ store volatile i32 %orn_lsl31, i32* @var1_32
+
+ %xor_lsl31 = xor i32 %val1, %operand_lsl31
+; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
+ store volatile i32 %xor_lsl31, i32* @var1_32
+ %xorn_lsl31 = xor i32 %val1, %neg_operand_lsl31
+; CHECK: eon {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
+ store volatile i32 %xorn_lsl31, i32* @var1_32
+
+ ; Check other shifts on a subset
+ %operand_asr10 = ashr i32 %val2, 10
+ %neg_operand_asr10 = xor i32 -1, %operand_asr10
+
+ %bic_asr10 = and i32 %val1, %neg_operand_asr10
+; CHECK: bic {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #10
+ store volatile i32 %bic_asr10, i32* @var1_32
+ %xor_asr10 = xor i32 %val1, %operand_asr10
+; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #10
+ store volatile i32 %xor_asr10, i32* @var1_32
+
+ %operand_lsr1 = lshr i32 %val2, 1
+ %neg_operand_lsr1 = xor i32 -1, %operand_lsr1
+
+ %orn_lsr1 = or i32 %val1, %neg_operand_lsr1
+; CHECK: orn {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #1
+ store volatile i32 %orn_lsr1, i32* @var1_32
+ %xor_lsr1 = xor i32 %val1, %operand_lsr1
+; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #1
+ store volatile i32 %xor_lsr1, i32* @var1_32
+
+ %operand_ror20_big = shl i32 %val2, 12
+ %operand_ror20_small = lshr i32 %val2, 20
+ %operand_ror20 = or i32 %operand_ror20_big, %operand_ror20_small
+ %neg_operand_ror20 = xor i32 -1, %operand_ror20
+
+ %xorn_ror20 = xor i32 %val1, %neg_operand_ror20
+; CHECK: eon {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, ror #20
+ store volatile i32 %xorn_ror20, i32* @var1_32
+ %and_ror20 = and i32 %val1, %operand_ror20
+; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, ror #20
+ store volatile i32 %and_ror20, i32* @var1_32
+
+ ret void
+}
+
+define void @logical_64bit() {
+; CHECK: logical_64bit:
+ %val1 = load i64* @var1_64
+ %val2 = load i64* @var2_64
+
+ ; First check basic and/bic/or/orn/eor/eon patterns with no shift
+ %neg_val2 = xor i64 -1, %val2
+
+ %and_noshift = and i64 %val1, %val2
+; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %and_noshift, i64* @var1_64
+ %bic_noshift = and i64 %neg_val2, %val1
+; CHECK: bic {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %bic_noshift, i64* @var1_64
+
+ %or_noshift = or i64 %val1, %val2
+; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %or_noshift, i64* @var1_64
+ %orn_noshift = or i64 %neg_val2, %val1
+; CHECK: orn {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %orn_noshift, i64* @var1_64
+
+ %xor_noshift = xor i64 %val1, %val2
+; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %xor_noshift, i64* @var1_64
+ %xorn_noshift = xor i64 %neg_val2, %val1
+; CHECK: eon {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ store volatile i64 %xorn_noshift, i64* @var1_64
+
+ ; Check the maximum shift on each
+ %operand_lsl63 = shl i64 %val2, 63
+ %neg_operand_lsl63 = xor i64 -1, %operand_lsl63
+
+ %and_lsl63 = and i64 %val1, %operand_lsl63
+; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
+ store volatile i64 %and_lsl63, i64* @var1_64
+ %bic_lsl63 = and i64 %val1, %neg_operand_lsl63
+; CHECK: bic {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
+ store volatile i64 %bic_lsl63, i64* @var1_64
+
+ %or_lsl63 = or i64 %val1, %operand_lsl63
+; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
+ store volatile i64 %or_lsl63, i64* @var1_64
+ %orn_lsl63 = or i64 %val1, %neg_operand_lsl63
+; CHECK: orn {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
+ store volatile i64 %orn_lsl63, i64* @var1_64
+
+ %xor_lsl63 = xor i64 %val1, %operand_lsl63
+; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
+ store volatile i64 %xor_lsl63, i64* @var1_64
+ %xorn_lsl63 = xor i64 %val1, %neg_operand_lsl63
+; CHECK: eon {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
+ store volatile i64 %xorn_lsl63, i64* @var1_64
+
+ ; Check other shifts on a subset
+ %operand_asr10 = ashr i64 %val2, 10
+ %neg_operand_asr10 = xor i64 -1, %operand_asr10
+
+ %bic_asr10 = and i64 %val1, %neg_operand_asr10
+; CHECK: bic {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #10
+ store volatile i64 %bic_asr10, i64* @var1_64
+ %xor_asr10 = xor i64 %val1, %operand_asr10
+; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #10
+ store volatile i64 %xor_asr10, i64* @var1_64
+
+ %operand_lsr1 = lshr i64 %val2, 1
+ %neg_operand_lsr1 = xor i64 -1, %operand_lsr1
+
+ %orn_lsr1 = or i64 %val1, %neg_operand_lsr1
+; CHECK: orn {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #1
+ store volatile i64 %orn_lsr1, i64* @var1_64
+ %xor_lsr1 = xor i64 %val1, %operand_lsr1
+; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #1
+ store volatile i64 %xor_lsr1, i64* @var1_64
+
+ ; Construct a rotate-right from a bunch of other logical
+ ; operations. DAGCombiner should ensure we the ROTR during
+ ; selection
+ %operand_ror20_big = shl i64 %val2, 44
+ %operand_ror20_small = lshr i64 %val2, 20
+ %operand_ror20 = or i64 %operand_ror20_big, %operand_ror20_small
+ %neg_operand_ror20 = xor i64 -1, %operand_ror20
+
+ %xorn_ror20 = xor i64 %val1, %neg_operand_ror20
+; CHECK: eon {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, ror #20
+ store volatile i64 %xorn_ror20, i64* @var1_64
+ %and_ror20 = and i64 %val1, %operand_ror20
+; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, ror #20
+ store volatile i64 %and_ror20, i64* @var1_64
+
+ ret void
+}
+
+define void @flag_setting() {
+; CHECK: flag_setting:
+ %val1 = load i64* @var1_64
+ %val2 = load i64* @var2_64
+
+; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: b.gt .L
+ %simple_and = and i64 %val1, %val2
+ %tst1 = icmp sgt i64 %simple_and, 0
+ br i1 %tst1, label %ret, label %test2
+
+test2:
+; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
+; CHECK: b.lt .L
+ %shifted_op = shl i64 %val2, 63
+ %shifted_and = and i64 %val1, %shifted_op
+ %tst2 = icmp slt i64 %shifted_and, 0
+ br i1 %tst2, label %ret, label %test3
+
+test3:
+; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}, asr #12
+; CHECK: b.gt .L
+ %asr_op = ashr i64 %val2, 12
+ %asr_and = and i64 %asr_op, %val1
+ %tst3 = icmp sgt i64 %asr_and, 0
+ br i1 %tst3, label %ret, label %other_exit
+
+other_exit:
+ store volatile i64 %val1, i64* @var1_64
+ ret void
+ret:
+ ret void
+}
diff --git a/test/CodeGen/AArch64/logical_shifted_reg.s b/test/CodeGen/AArch64/logical_shifted_reg.s
new file mode 100644
index 0000000000..89aea58011
--- /dev/null
+++ b/test/CodeGen/AArch64/logical_shifted_reg.s
@@ -0,0 +1,208 @@
+ .file "/home/timnor01/a64-trunk/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll"
+ .text
+ .globl logical_32bit
+ .type logical_32bit,@function
+logical_32bit: // @logical_32bit
+ .cfi_startproc
+// BB#0:
+ adrp x0, var1_32
+ ldr w1, [x0, #:lo12:var1_32]
+ adrp x0, var2_32
+ ldr w2, [x0, #:lo12:var2_32]
+ and w3, w1, w2
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ bic w3, w1, w2
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ orr w3, w1, w2
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ orn w3, w1, w2
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ eor w3, w1, w2
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ eon w3, w2, w1
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ and w3, w1, w2, lsl #31
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ bic w3, w1, w2, lsl #31
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ orr w3, w1, w2, lsl #31
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ orn w3, w1, w2, lsl #31
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ eor w3, w1, w2, lsl #31
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ eon w3, w1, w2, lsl #31
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ bic w3, w1, w2, asr #10
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ eor w3, w1, w2, asr #10
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ orn w3, w1, w2, lsr #1
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ eor w3, w1, w2, lsr #1
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ eon w3, w1, w2, ror #20
+ adrp x0, var1_32
+ str w3, [x0, #:lo12:var1_32]
+ and w1, w1, w2, ror #20
+ adrp x0, var1_32
+ str w1, [x0, #:lo12:var1_32]
+ ret
+.Ltmp0:
+ .size logical_32bit, .Ltmp0-logical_32bit
+ .cfi_endproc
+
+ .globl logical_64bit
+ .type logical_64bit,@function
+logical_64bit: // @logical_64bit
+ .cfi_startproc
+// BB#0:
+ adrp x0, var1_64
+ ldr x0, [x0, #:lo12:var1_64]
+ adrp x1, var2_64
+ ldr x1, [x1, #:lo12:var2_64]
+ and x2, x0, x1
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ bic x2, x0, x1
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ orr x2, x0, x1
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ orn x2, x0, x1
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ eor x2, x0, x1
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ eon x2, x1, x0
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ and x2, x0, x1, lsl #63
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ bic x2, x0, x1, lsl #63
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ orr x2, x0, x1, lsl #63
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ orn x2, x0, x1, lsl #63
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ eor x2, x0, x1, lsl #63
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ eon x2, x0, x1, lsl #63
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ bic x2, x0, x1, asr #10
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ eor x2, x0, x1, asr #10
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ orn x2, x0, x1, lsr #1
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ eor x2, x0, x1, lsr #1
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ eon x2, x0, x1, ror #20
+ adrp x3, var1_64
+ str x2, [x3, #:lo12:var1_64]
+ and x0, x0, x1, ror #20
+ adrp x1, var1_64
+ str x0, [x1, #:lo12:var1_64]
+ ret
+.Ltmp1:
+ .size logical_64bit, .Ltmp1-logical_64bit
+ .cfi_endproc
+
+ .globl flag_setting
+ .type flag_setting,@function
+flag_setting: // @flag_setting
+ .cfi_startproc
+// BB#0:
+ sub sp, sp, #16
+ adrp x0, var1_64
+ ldr x0, [x0, #:lo12:var1_64]
+ adrp x1, var2_64
+ ldr x1, [x1, #:lo12:var2_64]
+ tst x0, x1
+ str x0, [sp, #8] // 8-byte Folded Spill
+ str x1, [sp] // 8-byte Folded Spill
+ b.gt .LBB2_4
+ b .LBB2_1
+.LBB2_1: // %test2
+ ldr x0, [sp, #8] // 8-byte Folded Reload
+ ldr x1, [sp] // 8-byte Folded Reload
+ tst x0, x1, lsl #63
+ b.lt .LBB2_4
+ b .LBB2_2
+.LBB2_2: // %test3
+ ldr x0, [sp, #8] // 8-byte Folded Reload
+ ldr x1, [sp] // 8-byte Folded Reload
+ tst x0, x1, asr #12
+ b.gt .LBB2_4
+ b .LBB2_3
+.LBB2_3: // %other_exit
+ adrp x0, var1_64
+ ldr x1, [sp, #8] // 8-byte Folded Reload
+ str x1, [x0, #:lo12:var1_64]
+ add sp, sp, #16
+ ret
+.LBB2_4: // %ret
+ add sp, sp, #16
+ ret
+.Ltmp2:
+ .size flag_setting, .Ltmp2-flag_setting
+ .cfi_endproc
+
+ .type var1_32,@object // @var1_32
+ .bss
+ .globl var1_32
+ .align 2
+var1_32:
+ .word 0 // 0x0
+ .size var1_32, 4
+
+ .type var2_32,@object // @var2_32
+ .globl var2_32
+ .align 2
+var2_32:
+ .word 0 // 0x0
+ .size var2_32, 4
+
+ .type var1_64,@object // @var1_64
+ .globl var1_64
+ .align 3
+var1_64:
+ .xword 0 // 0x0
+ .size var1_64, 8
+
+ .type var2_64,@object // @var2_64
+ .globl var2_64
+ .align 3
+var2_64:
+ .xword 0 // 0x0
+ .size var2_64, 8
+
+
diff --git a/test/CodeGen/AArch64/movw-consts.ll b/test/CodeGen/AArch64/movw-consts.ll
new file mode 100644
index 0000000000..421043645f
--- /dev/null
+++ b/test/CodeGen/AArch64/movw-consts.ll
@@ -0,0 +1,124 @@
+; RUN: llc -verify-machineinstrs -O0 < %s -march=aarch64 | FileCheck %s
+
+define i64 @test0() {
+; CHECK: test0:
+; Not produced by move wide instructions, but good to make sure we can return 0 anyway:
+; CHECK: mov x0, xzr
+ ret i64 0
+}
+
+define i64 @test1() {
+; CHECK: test1:
+; CHECK: movz x0, #1
+ ret i64 1
+}
+
+define i64 @test2() {
+; CHECK: test2:
+; CHECK: movz x0, #65535
+ ret i64 65535
+}
+
+define i64 @test3() {
+; CHECK: test3:
+; CHECK: movz x0, #1, lsl #16
+ ret i64 65536
+}
+
+define i64 @test4() {
+; CHECK: test4:
+; CHECK: movz x0, #65535, lsl #16
+ ret i64 4294901760
+}
+
+define i64 @test5() {
+; CHECK: test5:
+; CHECK: movz x0, #1, lsl #32
+ ret i64 4294967296
+}
+
+define i64 @test6() {
+; CHECK: test6:
+; CHECK: movz x0, #65535, lsl #32
+ ret i64 281470681743360
+}
+
+define i64 @test7() {
+; CHECK: test7:
+; CHECK: movz x0, #1, lsl #48
+ ret i64 281474976710656
+}
+
+; A 32-bit MOVN can generate some 64-bit patterns that a 64-bit one
+; couldn't. Useful even for i64
+define i64 @test8() {
+; CHECK: test8:
+; CHECK: movn w0, #60875
+ ret i64 4294906420
+}
+
+define i64 @test9() {
+; CHECK: test9:
+; CHECK: movn x0, #0
+ ret i64 -1
+}
+
+define i64 @test10() {
+; CHECK: test10:
+; CHECK: movn x0, #60875, lsl #16
+ ret i64 18446744069720047615
+}
+
+; For reasonably legitimate reasons returning an i32 results in the
+; selection of an i64 constant, so we need a different idiom to test that selection
+@var32 = global i32 0
+
+define void @test11() {
+; CHECK: test11:
+; CHECK movz {{w[0-9]+}}, #0
+ store i32 0, i32* @var32
+ ret void
+}
+
+define void @test12() {
+; CHECK: test12:
+; CHECK: movz {{w[0-9]+}}, #1
+ store i32 1, i32* @var32
+ ret void
+}
+
+define void @test13() {
+; CHECK: test13:
+; CHECK: movz {{w[0-9]+}}, #65535
+ store i32 65535, i32* @var32
+ ret void
+}
+
+define void @test14() {
+; CHECK: test14:
+; CHECK: movz {{w[0-9]+}}, #1, lsl #16
+ store i32 65536, i32* @var32
+ ret void
+}
+
+define void @test15() {
+; CHECK: test15:
+; CHECK: movz {{w[0-9]+}}, #65535, lsl #16
+ store i32 4294901760, i32* @var32
+ ret void
+}
+
+define void @test16() {
+; CHECK: test16:
+; CHECK: movn {{w[0-9]+}}, #0
+ store i32 -1, i32* @var32
+ ret void
+}
+
+define i64 @test17() {
+; CHECK: test17:
+
+ ; Mustn't MOVN w0 here.
+; CHECK: movn x0, #2
+ ret i64 -3
+}
diff --git a/test/CodeGen/AArch64/pic-eh-stubs.ll b/test/CodeGen/AArch64/pic-eh-stubs.ll
new file mode 100644
index 0000000000..77bf691cbc
--- /dev/null
+++ b/test/CodeGen/AArch64/pic-eh-stubs.ll
@@ -0,0 +1,60 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -o - %s | FileCheck %s
+
+; Make sure exception-handling PIC code can be linked correctly. An alternative
+; to the sequence described below would have .gcc_except_table itself writable
+; and not use the indirection, but this isn't what LLVM does right now.
+
+ ; There should be a read-only .gcc_except_table section...
+; CHECK: .section .gcc_except_table,"a"
+
+ ; ... referring indirectly to stubs for its typeinfo ...
+; CHECK: // @TType Encoding = indirect pcrel sdata8
+ ; ... one of which is "int"'s typeinfo
+; CHECK: .Ltmp9:
+; CHECK-NEXT: .xword .L_ZTIi.DW.stub-.Ltmp9
+
+ ; .. and which is properly defined (in a writable section for the dynamic loader) later.
+; CHECK: .section .data.rel,"aw"
+; CHECK: .L_ZTIi.DW.stub:
+; CHECK-NEXT: .xword _ZTIi
+
+@_ZTIi = external constant i8*
+
+define i32 @_Z3barv() {
+entry:
+ invoke void @_Z3foov()
+ to label %return unwind label %lpad
+
+lpad: ; preds = %entry
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* bitcast (i8** @_ZTIi to i8*)
+ %1 = extractvalue { i8*, i32 } %0, 1
+ %2 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) nounwind
+ %matches = icmp eq i32 %1, %2
+ br i1 %matches, label %catch, label %eh.resume
+
+catch: ; preds = %lpad
+ %3 = extractvalue { i8*, i32 } %0, 0
+ %4 = tail call i8* @__cxa_begin_catch(i8* %3) nounwind
+ %5 = bitcast i8* %4 to i32*
+ %exn.scalar = load i32* %5, align 4
+ tail call void @__cxa_end_catch() nounwind
+ br label %return
+
+return: ; preds = %entry, %catch
+ %retval.0 = phi i32 [ %exn.scalar, %catch ], [ 42, %entry ]
+ ret i32 %retval.0
+
+eh.resume: ; preds = %lpad
+ resume { i8*, i32 } %0
+}
+
+declare void @_Z3foov()
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i32 @llvm.eh.typeid.for(i8*) nounwind readnone
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_end_catch() \ No newline at end of file
diff --git a/test/CodeGen/AArch64/regress-bitcast-formals.ll b/test/CodeGen/AArch64/regress-bitcast-formals.ll
new file mode 100644
index 0000000000..774b0fdee2
--- /dev/null
+++ b/test/CodeGen/AArch64/regress-bitcast-formals.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=aarch64 -verify-machineinstrs < %s | FileCheck %s
+
+; CallingConv.td requires a bitcast for vector arguments. Make sure we're
+; actually capable of that (the test was omitted from LowerFormalArguments).
+
+define void @test_bitcast_lower(<2 x i32> %a) {
+; CHECK: test_bitcast_lower:
+
+ ret void
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/regress-f128csel-flags.ll b/test/CodeGen/AArch64/regress-f128csel-flags.ll
new file mode 100644
index 0000000000..a1ffb09178
--- /dev/null
+++ b/test/CodeGen/AArch64/regress-f128csel-flags.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=aarch64 -verify-machineinstrs < %s | FileCheck %s
+
+; We used to not mark NZCV as being used in the continuation basic-block
+; when lowering a 128-bit "select" to branches. This meant a subsequent use
+; of the same flags gave an internal fault here.
+
+declare void @foo(fp128)
+
+define double @test_f128csel_flags(i32 %lhs, fp128 %a, fp128 %b) nounwind {
+; CHECK: test_f128csel_flags
+
+ %tst = icmp ne i32 %lhs, 42
+ %val = select i1 %tst, fp128 %a, fp128 %b
+; CHECK: cmp w0, #42
+; CHECK: b.eq .LBB0
+
+ call void @foo(fp128 %val)
+ %retval = select i1 %tst, double 4.0, double 5.0
+
+ ; It's also reasonably important that the actual fcsel comes before the
+ ; function call since bl may corrupt NZCV. We were doing the right thing anyway,
+ ; but just as well test it while we're here.
+; CHECK: fcsel {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, ne
+; CHECK: bl foo
+
+ ret double %retval
+}
diff --git a/test/CodeGen/AArch64/regress-tail-livereg.ll b/test/CodeGen/AArch64/regress-tail-livereg.ll
new file mode 100644
index 0000000000..0c7f8cbffa
--- /dev/null
+++ b/test/CodeGen/AArch64/regress-tail-livereg.ll
@@ -0,0 +1,19 @@
+; RUN: llc -verify-machineinstrs -march=aarch64 < %s | FileCheck %s
+@var = global void()* zeroinitializer
+
+declare void @bar()
+
+define void @foo() {
+; CHECK: foo:
+ %func = load void()** @var
+
+ ; Calling a function encourages @foo to use a callee-saved register,
+ ; which makes it a natural choice for the tail call itself. But we don't
+ ; want that: the final "br xN" has to use a temporary or argument
+ ; register.
+ call void @bar()
+
+ tail call void %func()
+; CHECK: br {{x([0-79]|1[0-8])}}
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/regress-tblgen-chains.ll b/test/CodeGen/AArch64/regress-tblgen-chains.ll
new file mode 100644
index 0000000000..0c53f83b66
--- /dev/null
+++ b/test/CodeGen/AArch64/regress-tblgen-chains.ll
@@ -0,0 +1,36 @@
+; RUN: llc -verify-machineinstrs -march=aarch64 < %s | FileCheck %s
+
+; When generating DAG selection tables, TableGen used to only flag an
+; instruction as needing a chain on its own account if it had a built-in pattern
+; which used the chain. This meant that the AArch64 load/stores weren't
+; recognised and so both loads from %locvar below were coalesced into a single
+; LS8_LDR instruction (same operands other than the non-existent chain) and the
+; increment was lost at return.
+
+; This was obviously a Bad Thing.
+
+declare void @bar(i8*)
+
+define i64 @test_chains() {
+; CHECK: test_chains:
+
+ %locvar = alloca i8
+
+ call void @bar(i8* %locvar)
+; CHECK: bl bar
+
+ %inc.1 = load i8* %locvar
+ %inc.2 = zext i8 %inc.1 to i64
+ %inc.3 = add i64 %inc.2, 1
+ %inc.4 = trunc i64 %inc.3 to i8
+ store i8 %inc.4, i8* %locvar
+; CHECK: ldrb {{w[0-9]+}}, [sp, [[LOCADDR:#[0-9]+]]]
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #1
+; CHECK: strb {{w[0-9]+}}, [sp, [[LOCADDR]]]
+; CHECK: ldrb {{w[0-9]+}}, [sp, [[LOCADDR]]]
+
+ %ret.1 = load i8* %locvar
+ %ret.2 = zext i8 %ret.1 to i64
+ ret i64 %ret.2
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll b/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
new file mode 100644
index 0000000000..98bd92b06c
--- /dev/null
+++ b/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
@@ -0,0 +1,37 @@
+; RUN: llc -march=aarch64 -disable-fp-elim < %s | FileCheck %s
+@var = global i32 0
+
+declare void @bar()
+
+define void @test_w29_reserved() {
+; CHECK: test_w29_reserved:
+; CHECK add x29, sp, #{{[0-9]+}}
+
+ %val1 = load volatile i32* @var
+ %val2 = load volatile i32* @var
+ %val3 = load volatile i32* @var
+ %val4 = load volatile i32* @var
+ %val5 = load volatile i32* @var
+ %val6 = load volatile i32* @var
+ %val7 = load volatile i32* @var
+ %val8 = load volatile i32* @var
+ %val9 = load volatile i32* @var
+
+; CHECK-NOT: ldr w29,
+
+ ; Call to prevent fp-elim that occurs regardless in leaf functions.
+ call void @bar()
+
+ store volatile i32 %val1, i32* @var
+ store volatile i32 %val2, i32* @var
+ store volatile i32 %val3, i32* @var
+ store volatile i32 %val4, i32* @var
+ store volatile i32 %val5, i32* @var
+ store volatile i32 %val6, i32* @var
+ store volatile i32 %val7, i32* @var
+ store volatile i32 %val8, i32* @var
+ store volatile i32 %val9, i32* @var
+
+ ret void
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/regress-wzr-allocatable.ll b/test/CodeGen/AArch64/regress-wzr-allocatable.ll
new file mode 100644
index 0000000000..a587d83bd8
--- /dev/null
+++ b/test/CodeGen/AArch64/regress-wzr-allocatable.ll
@@ -0,0 +1,41 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 -O0
+
+; When WZR wasn't marked as reserved, this function tried to allocate
+; it at O0 and then generated an internal fault (mostly incidentally)
+; when it discovered that it was already in use for a multiplication.
+
+; I'm not really convinced this is a good test since it could easily
+; stop testing what it does now with no-one any the wiser. However, I
+; can't think of a better way to force the allocator to use WZR
+; specifically.
+
+define void @test() nounwind {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.body, %entry
+ br i1 undef, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ br label %for.cond6
+
+for.cond6: ; preds = %for.body9, %for.end
+ br i1 undef, label %for.body9, label %while.cond30
+
+for.body9: ; preds = %for.cond6
+ store i16 0, i16* undef, align 2
+ %0 = load i32* undef, align 4
+ %1 = load i32* undef, align 4
+ %mul15 = mul i32 %0, %1
+ %add16 = add i32 %mul15, 32768
+ %div = udiv i32 %add16, 65535
+ %add17 = add i32 %div, 1
+ store i32 %add17, i32* undef, align 4
+ br label %for.cond6
+
+while.cond30: ; preds = %for.cond6
+ ret void
+}
diff --git a/test/CodeGen/AArch64/setcc-takes-i32.ll b/test/CodeGen/AArch64/setcc-takes-i32.ll
new file mode 100644
index 0000000000..795747af41
--- /dev/null
+++ b/test/CodeGen/AArch64/setcc-takes-i32.ll
@@ -0,0 +1,22 @@
+; RUN: llc -verify-machineinstrs -march=aarch64 < %s | FileCheck %s
+
+; Most important point here is that the promotion of the i1 works
+; correctly. Previously LLVM thought that i64 was the appropriate SetCC output,
+; which meant it proceded in two steps and produced an i64 -> i64 any_ext which
+; couldn't be selected and faulted.
+
+; It was expecting the smallest legal promotion of i1 to be the preferred SetCC
+; type, so we'll satisfy it (this actually arguably gives better code anyway,
+; with flag-manipulation operations allowed to use W-registers).
+
+declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64)
+
+define i64 @test_select(i64 %lhs, i64 %rhs) {
+; CHECK: test_select:
+
+ %res = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %lhs, i64 %rhs)
+ %flag = extractvalue {i64, i1} %res, 1
+ %retval = select i1 %flag, i64 %lhs, i64 %rhs
+ ret i64 %retval
+; CHECK: ret
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/sibling-call.ll b/test/CodeGen/AArch64/sibling-call.ll
new file mode 100644
index 0000000000..a4ea064d12
--- /dev/null
+++ b/test/CodeGen/AArch64/sibling-call.ll
@@ -0,0 +1,97 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+declare void @callee_stack0()
+declare void @callee_stack8([8 x i32], i64)
+declare void @callee_stack16([8 x i32], i64, i64)
+
+define void @caller_to0_from0() nounwind {
+; CHECK: caller_to0_from0:
+; CHECK-NEXT: // BB
+ tail call void @callee_stack0()
+ ret void
+; CHECK-NEXT: b callee_stack0
+}
+
+define void @caller_to0_from8([8 x i32], i64) nounwind{
+; CHECK: caller_to0_from8:
+; CHECK-NEXT: // BB
+
+ tail call void @callee_stack0()
+ ret void
+; CHECK-NEXT: b callee_stack0
+}
+
+define void @caller_to8_from0() {
+; CHECK: caller_to8_from0:
+
+; Caller isn't going to clean up any extra stack we allocate, so it
+; can't be a tail call.
+ tail call void @callee_stack8([8 x i32] undef, i64 42)
+ ret void
+; CHECK: bl callee_stack8
+}
+
+define void @caller_to8_from8([8 x i32], i64 %a) {
+; CHECK: caller_to8_from8:
+; CHECK-NOT: sub sp, sp,
+
+; This should reuse our stack area for the 42
+ tail call void @callee_stack8([8 x i32] undef, i64 42)
+ ret void
+; CHECK: str {{x[0-9]+}}, [sp]
+; CHECK-NEXT: b callee_stack8
+}
+
+define void @caller_to16_from8([8 x i32], i64 %a) {
+; CHECK: caller_to16_from8:
+
+; Shouldn't be a tail call: we can't use SP+8 because our caller might
+; have something there. This may sound obvious but implementation does
+; some funky aligning.
+ tail call void @callee_stack16([8 x i32] undef, i64 undef, i64 undef)
+; CHECK: bl callee_stack16
+ ret void
+}
+
+define void @caller_to8_from24([8 x i32], i64 %a, i64 %b, i64 %c) {
+; CHECK: caller_to8_from24:
+; CHECK-NOT: sub sp, sp
+
+; Reuse our area, putting "42" at incoming sp
+ tail call void @callee_stack8([8 x i32] undef, i64 42)
+ ret void
+; CHECK: str {{x[0-9]+}}, [sp]
+; CHECK-NEXT: b callee_stack8
+}
+
+define void @caller_to16_from16([8 x i32], i64 %a, i64 %b) {
+; CHECK: caller_to16_from16:
+; CHECK-NOT: sub sp, sp,
+
+; Here we want to make sure that both loads happen before the stores:
+; otherwise either %a or %b will be wrongly clobbered.
+ tail call void @callee_stack16([8 x i32] undef, i64 %b, i64 %a)
+ ret void
+
+; CHECK: ldr x0,
+; CHECK: ldr x1,
+; CHECK: str x1,
+; CHECK: str x0,
+
+; CHECK-NOT: add sp, sp,
+; CHECK: b callee_stack16
+}
+
+@func = global void(i32)* null
+
+define void @indirect_tail() {
+; CHECK: indirect_tail:
+; CHECK-NOT: sub sp, sp
+
+ %fptr = load void(i32)** @func
+ tail call void %fptr(i32 42)
+ ret void
+; CHECK: movz w0, #42
+; CHECK: ldr [[FPTR:x[1-9]+]], [{{x[0-9]+}}, #:lo12:func]
+; CHECK: br [[FPTR]]
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/tail-call.ll b/test/CodeGen/AArch64/tail-call.ll
new file mode 100644
index 0000000000..eed6ae5f04
--- /dev/null
+++ b/test/CodeGen/AArch64/tail-call.ll
@@ -0,0 +1,94 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 -tailcallopt | FileCheck %s
+
+declare fastcc void @callee_stack0()
+declare fastcc void @callee_stack8([8 x i32], i64)
+declare fastcc void @callee_stack16([8 x i32], i64, i64)
+
+define fastcc void @caller_to0_from0() nounwind {
+; CHECK: caller_to0_from0:
+; CHECK-NEXT: // BB
+ tail call fastcc void @callee_stack0()
+ ret void
+; CHECK-NEXT: b callee_stack0
+}
+
+define fastcc void @caller_to0_from8([8 x i32], i64) {
+; CHECK: caller_to0_from8:
+
+ tail call fastcc void @callee_stack0()
+ ret void
+; CHECK: add sp, sp, #16
+; CHECK-NEXT: b callee_stack0
+}
+
+define fastcc void @caller_to8_from0() {
+; CHECK: caller_to8_from0:
+; CHECK: sub sp, sp, #32
+
+; Key point is that the "42" should go #16 below incoming stack
+; pointer (we didn't have arg space to reuse).
+ tail call fastcc void @callee_stack8([8 x i32] undef, i64 42)
+ ret void
+; CHECK: str {{x[0-9]+}}, [sp, #16]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: b callee_stack8
+}
+
+define fastcc void @caller_to8_from8([8 x i32], i64 %a) {
+; CHECK: caller_to8_from8:
+; CHECK: sub sp, sp, #16
+
+; Key point is that the "%a" should go where at SP on entry.
+ tail call fastcc void @callee_stack8([8 x i32] undef, i64 42)
+ ret void
+; CHECK: str {{x[0-9]+}}, [sp, #16]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: b callee_stack8
+}
+
+define fastcc void @caller_to16_from8([8 x i32], i64 %a) {
+; CHECK: caller_to16_from8:
+; CHECK: sub sp, sp, #16
+
+; Important point is that the call reuses the "dead" argument space
+; above %a on the stack. If it tries to go below incoming-SP then the
+; callee will not deallocate the space, even in fastcc.
+ tail call fastcc void @callee_stack16([8 x i32] undef, i64 42, i64 2)
+; CHECK: str {{x[0-9]+}}, [sp, #24]
+; CHECK: str {{x[0-9]+}}, [sp, #16]
+; CHECK: add sp, sp, #16
+; CHECK: b callee_stack16
+ ret void
+}
+
+
+define fastcc void @caller_to8_from24([8 x i32], i64 %a, i64 %b, i64 %c) {
+; CHECK: caller_to8_from24:
+; CHECK: sub sp, sp, #16
+
+; Key point is that the "%a" should go where at #16 above SP on entry.
+ tail call fastcc void @callee_stack8([8 x i32] undef, i64 42)
+ ret void
+; CHECK: str {{x[0-9]+}}, [sp, #32]
+; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: b callee_stack8
+}
+
+
+define fastcc void @caller_to16_from16([8 x i32], i64 %a, i64 %b) {
+; CHECK: caller_to16_from16:
+; CHECK: sub sp, sp, #16
+
+; Here we want to make sure that both loads happen before the stores:
+; otherwise either %a or %b will be wrongly clobbered.
+ tail call fastcc void @callee_stack16([8 x i32] undef, i64 %b, i64 %a)
+ ret void
+
+; CHECK: ldr x0,
+; CHECK: ldr x1,
+; CHECK: str x1,
+; CHECK: str x0,
+
+; CHECK: add sp, sp, #16
+; CHECK: b callee_stack16
+}
diff --git a/test/CodeGen/AArch64/tls-dynamic-together.ll b/test/CodeGen/AArch64/tls-dynamic-together.ll
new file mode 100644
index 0000000000..bad2298c8a
--- /dev/null
+++ b/test/CodeGen/AArch64/tls-dynamic-together.ll
@@ -0,0 +1,18 @@
+; RUN: llc -O0 -mtriple=aarch64-none-linux-gnu -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s
+
+; If the .tlsdesccall and blr parts are emitted completely separately (even with
+; glue) then LLVM will separate them quite happily (with a spill at O0, hence
+; the option). This is definitely wrong, so we make sure they are emitted
+; together.
+
+@general_dynamic_var = external thread_local global i32
+
+define i32 @test_generaldynamic() {
+; CHECK: test_generaldynamic:
+
+ %val = load i32* @general_dynamic_var
+ ret i32 %val
+
+; CHECK: .tlsdesccall general_dynamic_var
+; CHECK-NEXT: blr {{x[0-9]+}}
+}
diff --git a/test/CodeGen/AArch64/tls-dynamics.ll b/test/CodeGen/AArch64/tls-dynamics.ll
new file mode 100644
index 0000000000..cdfd11783c
--- /dev/null
+++ b/test/CodeGen/AArch64/tls-dynamics.ll
@@ -0,0 +1,121 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-RELOC %s
+
+@general_dynamic_var = external thread_local global i32
+
+define i32 @test_generaldynamic() {
+; CHECK: test_generaldynamic:
+
+ %val = load i32* @general_dynamic_var
+ ret i32 %val
+
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:general_dynamic_var
+; CHECK: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var
+; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var]
+; CHECK: .tlsdesccall general_dynamic_var
+; CHECK-NEXT: blr [[CALLEE]]
+
+; CHECK: mrs x[[TP:[0-9]+]], tpidr_el0
+; CHECK: ldr w0, [x[[TP]], x0]
+
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
+
+}
+
+define i32* @test_generaldynamic_addr() {
+; CHECK: test_generaldynamic_addr:
+
+ ret i32* @general_dynamic_var
+
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:general_dynamic_var
+; CHECK: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var
+; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var]
+; CHECK: .tlsdesccall general_dynamic_var
+; CHECK-NEXT: blr [[CALLEE]]
+
+; CHECK: mrs [[TP:x[0-9]+]], tpidr_el0
+; CHECK: add x0, [[TP]], x0
+
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
+
+}
+
+@local_dynamic_var = external thread_local(localdynamic) global i32
+
+define i32 @test_localdynamic() {
+; CHECK: test_localdynamic:
+
+ %val = load i32* @local_dynamic_var
+ ret i32 %val
+
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
+; CHECK: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_
+; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_]
+; CHECK: .tlsdesccall _TLS_MODULE_BASE_
+; CHECK-NEXT: blr [[CALLEE]]
+
+; CHECK: movz [[DTP_OFFSET:x[0-9]+]], #:dtprel_g1:local_dynamic_var
+; CHECK: movk [[DTP_OFFSET]], #:dtprel_g0_nc:local_dynamic_var
+
+; CHECK: ldr w0, [x0, [[DTP_OFFSET]]]
+
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
+
+}
+
+define i32* @test_localdynamic_addr() {
+; CHECK: test_localdynamic_addr:
+
+ ret i32* @local_dynamic_var
+
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
+; CHECK: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_
+; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_]
+; CHECK: .tlsdesccall _TLS_MODULE_BASE_
+; CHECK-NEXT: blr [[CALLEE]]
+
+; CHECK: movz [[DTP_OFFSET:x[0-9]+]], #:dtprel_g1:local_dynamic_var
+; CHECK: movk [[DTP_OFFSET]], #:dtprel_g0_nc:local_dynamic_var
+
+; CHECK: add x0, x0, [[DTP_OFFSET]]
+
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
+
+}
+
+; The entire point of the local-dynamic access model is to have a single call to
+; the expensive resolver. Make sure we achieve that goal.
+
+@local_dynamic_var2 = external thread_local(localdynamic) global i32
+
+define i32 @test_localdynamic_deduplicate() {
+; CHECK: test_localdynamic_deduplicate:
+
+ %val = load i32* @local_dynamic_var
+ %val2 = load i32* @local_dynamic_var2
+
+ %sum = add i32 %val, %val2
+ ret i32 %sum
+
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
+; CHECK: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_
+; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_]
+; CHECK: .tlsdesccall _TLS_MODULE_BASE_
+; CHECK-NEXT: blr [[CALLEE]]
+
+; CHECK-NOT: _TLS_MODULE_BASE_
+
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/tls-execs.ll b/test/CodeGen/AArch64/tls-execs.ll
new file mode 100644
index 0000000000..a665884227
--- /dev/null
+++ b/test/CodeGen/AArch64/tls-execs.ll
@@ -0,0 +1,63 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-RELOC %s
+
+@initial_exec_var = external thread_local(initialexec) global i32
+
+define i32 @test_initial_exec() {
+; CHECK: test_initial_exec:
+ %val = load i32* @initial_exec_var
+
+; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
+; CHECK: ldr x[[TP_OFFSET:[0-9]+]], [x[[GOTADDR]], #:gottprel_lo12:initial_exec_var]
+; CHECK: mrs x[[TP:[0-9]+]], tpidr_el0
+; CHECK: ldr w0, [x[[TP]], x[[TP_OFFSET]]]
+
+; CHECK-RELOC: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
+; CHECK-RELOC: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
+
+ ret i32 %val
+}
+
+define i32* @test_initial_exec_addr() {
+; CHECK: test_initial_exec_addr:
+ ret i32* @initial_exec_var
+
+; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
+; CHECK: ldr [[TP_OFFSET:x[0-9]+]], [x[[GOTADDR]], #:gottprel_lo12:initial_exec_var]
+; CHECK: mrs [[TP:x[0-9]+]], tpidr_el0
+; CHECK: add x0, [[TP]], [[TP_OFFSET]]
+
+; CHECK-RELOC: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
+; CHECK-RELOC: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
+
+}
+
+@local_exec_var = thread_local(initialexec) global i32 0
+
+define i32 @test_local_exec() {
+; CHECK: test_local_exec:
+ %val = load i32* @local_exec_var
+
+; CHECK: movz [[TP_OFFSET:x[0-9]+]], #:tprel_g1:local_exec_var
+; CHECK: movk [[TP_OFFSET]], #:tprel_g0_nc:local_exec_var
+; CHECK: mrs x[[TP:[0-9]+]], tpidr_el0
+; CHECK: ldr w0, [x[[TP]], [[TP_OFFSET]]]
+
+; CHECK-RELOC: R_AARCH64_TLSLE_MOVW_TPREL_G1
+; CHECK-RELOC: R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
+
+ ret i32 %val
+}
+
+define i32* @test_local_exec_addr() {
+; CHECK: test_local_exec_addr:
+ ret i32* @local_exec_var
+
+; CHECK: movz [[TP_OFFSET:x[0-9]+]], #:tprel_g1:local_exec_var
+; CHECK: movk [[TP_OFFSET]], #:tprel_g0_nc:local_exec_var
+; CHECK: mrs [[TP:x[0-9]+]], tpidr_el0
+; CHECK: add x0, [[TP]], [[TP_OFFSET]]
+
+; CHECK-RELOC: R_AARCH64_TLSLE_MOVW_TPREL_G1
+; CHECK-RELOC: R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
+}
diff --git a/test/CodeGen/AArch64/tst-br.ll b/test/CodeGen/AArch64/tst-br.ll
new file mode 100644
index 0000000000..17a328fe4d
--- /dev/null
+++ b/test/CodeGen/AArch64/tst-br.ll
@@ -0,0 +1,48 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+; We've got the usual issues with LLVM reordering blocks here. The
+; tests are correct for the current order, but who knows when that
+; will change. Beware!
+@var32 = global i32 0
+@var64 = global i64 0
+
+define i32 @test_tbz() {
+; CHECK: test_tbz:
+
+ %val = load i32* @var32
+ %val64 = load i64* @var64
+
+ %tbit0 = and i32 %val, 32768
+ %tst0 = icmp ne i32 %tbit0, 0
+ br i1 %tst0, label %test1, label %end1
+; CHECK: tbz {{w[0-9]+}}, #15, [[LBL_end1:.LBB0_[0-9]+]]
+
+test1:
+ %tbit1 = and i32 %val, 4096
+ %tst1 = icmp ne i32 %tbit1, 0
+ br i1 %tst1, label %test2, label %end1
+; CHECK: tbz {{w[0-9]+}}, #12, [[LBL_end1]]
+
+test2:
+ %tbit2 = and i64 %val64, 32768
+ %tst2 = icmp ne i64 %tbit2, 0
+ br i1 %tst2, label %test3, label %end1
+; CHECK: tbz {{x[0-9]+}}, #15, [[LBL_end1]]
+
+test3:
+ %tbit3 = and i64 %val64, 4096
+ %tst3 = icmp ne i64 %tbit3, 0
+ br i1 %tst3, label %end2, label %end1
+; CHECK: tbz {{x[0-9]+}}, #12, [[LBL_end1]]
+
+end2:
+; CHECK: movz x0, #1
+; CHECK-NEXT: ret
+ ret i32 1
+
+end1:
+; CHECK: [[LBL_end1]]:
+; CHECK-NEXT: mov x0, xzr
+; CHECK-NEXT: ret
+ ret i32 0
+}
diff --git a/test/CodeGen/AArch64/variadic.ll b/test/CodeGen/AArch64/variadic.ll
new file mode 100644
index 0000000000..f601d4731e
--- /dev/null
+++ b/test/CodeGen/AArch64/variadic.ll
@@ -0,0 +1,144 @@
+; RUN: llc -verify-machineinstrs -march=aarch64 < %s | FileCheck %s
+
+%va_list = type {i8*, i8*, i8*, i32, i32}
+
+@var = global %va_list zeroinitializer
+
+declare void @llvm.va_start(i8*)
+
+define void @test_simple(i32 %n, ...) {
+; CHECK: test_simple:
+; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
+; CHECK: mov x[[FPRBASE:[0-9]+]], sp
+; CHECK: str q7, [x[[FPRBASE]], #112]
+; CHECK: add x[[GPRBASE:[0-9]+]], sp, #[[GPRFROMSP:[0-9]+]]
+; CHECK: str x7, [x[[GPRBASE]], #48]
+
+; Omit the middle ones
+
+; CHECK: str q0, [sp]
+; CHECK: str x1, [sp, #[[GPRFROMSP]]]
+
+ %addr = bitcast %va_list* @var to i8*
+ call void @llvm.va_start(i8* %addr)
+; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
+; CHECK: movn [[VR_OFFS:w[0-9]+]], #127
+; CHECK: str [[VR_OFFS]], [x[[VA_LIST]], #28]
+; CHECK: movn [[GR_OFFS:w[0-9]+]], #55
+; CHECK: str [[GR_OFFS]], [x[[VA_LIST]], #24]
+; CHECK: add [[VR_TOP:x[0-9]+]], x[[FPRBASE]], #128
+; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16]
+; CHECK: add [[GR_TOP:x[0-9]+]], x[[GPRBASE]], #56
+; CHECK: str [[GR_TOP]], [x[[VA_LIST]], #8]
+; CHECK: add [[STACK:x[0-9]+]], sp, #[[STACKSIZE]]
+; CHECK: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
+
+ ret void
+}
+
+define void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
+; CHECK: test_fewargs:
+; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
+; CHECK: mov x[[FPRBASE:[0-9]+]], sp
+; CHECK: str q7, [x[[FPRBASE]], #96]
+; CHECK: add x[[GPRBASE:[0-9]+]], sp, #[[GPRFROMSP:[0-9]+]]
+; CHECK: str x7, [x[[GPRBASE]], #32]
+
+; Omit the middle ones
+
+; CHECK: str q1, [sp]
+; CHECK: str x3, [sp, #[[GPRFROMSP]]]
+
+ %addr = bitcast %va_list* @var to i8*
+ call void @llvm.va_start(i8* %addr)
+; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
+; CHECK: movn [[VR_OFFS:w[0-9]+]], #111
+; CHECK: str [[VR_OFFS]], [x[[VA_LIST]], #28]
+; CHECK: movn [[GR_OFFS:w[0-9]+]], #39
+; CHECK: str [[GR_OFFS]], [x[[VA_LIST]], #24]
+; CHECK: add [[VR_TOP:x[0-9]+]], x[[FPRBASE]], #112
+; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16]
+; CHECK: add [[GR_TOP:x[0-9]+]], x[[GPRBASE]], #40
+; CHECK: str [[GR_TOP]], [x[[VA_LIST]], #8]
+; CHECK: add [[STACK:x[0-9]+]], sp, #[[STACKSIZE]]
+; CHECK: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
+
+ ret void
+}
+
+define void @test_nospare([8 x i64], [8 x float], ...) {
+; CHECK: test_nospare:
+
+ %addr = bitcast %va_list* @var to i8*
+ call void @llvm.va_start(i8* %addr)
+; CHECK-NOT: sub sp, sp
+; CHECK: mov [[STACK:x[0-9]+]], sp
+; CHECK: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
+
+ ret void
+}
+
+; If there are non-variadic arguments on the stack (here two i64s) then the
+; __stack field should point just past them.
+define void @test_offsetstack([10 x i64], [3 x float], ...) {
+; CHECK: test_offsetstack:
+; CHECK: sub sp, sp, #80
+; CHECK: mov x[[FPRBASE:[0-9]+]], sp
+; CHECK: str q7, [x[[FPRBASE]], #64]
+
+; CHECK-NOT: str x{{[0-9]+}},
+; Omit the middle ones
+
+; CHECK: str q3, [sp]
+
+ %addr = bitcast %va_list* @var to i8*
+ call void @llvm.va_start(i8* %addr)
+; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
+; CHECK: movn [[VR_OFFS:w[0-9]+]], #79
+; CHECK: str [[VR_OFFS]], [x[[VA_LIST]], #28]
+; CHECK: str wzr, [x[[VA_LIST]], #24]
+; CHECK: add [[VR_TOP:x[0-9]+]], x[[FPRBASE]], #80
+; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16]
+; CHECK: add [[STACK:x[0-9]+]], sp, #96
+; CHECK: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
+
+ ret void
+}
+
+declare void @llvm.va_end(i8*)
+
+define void @test_va_end() nounwind {
+; CHECK: test_va_end:
+; CHECK-NEXT: BB#0
+
+ %addr = bitcast %va_list* @var to i8*
+ call void @llvm.va_end(i8* %addr)
+
+ ret void
+; CHECK-NEXT: ret
+}
+
+declare void @llvm.va_copy(i8* %dest, i8* %src)
+
+@second_list = global %va_list zeroinitializer
+
+define void @test_va_copy() {
+; CHECK: test_va_copy:
+ %srcaddr = bitcast %va_list* @var to i8*
+ %dstaddr = bitcast %va_list* @second_list to i8*
+ call void @llvm.va_copy(i8* %dstaddr, i8* %srcaddr)
+
+; Check beginning and end again:
+
+; CHECK: ldr [[BLOCK:x[0-9]+]], [{{x[0-9]+}}, #:lo12:var]
+; CHECK: str [[BLOCK]], [{{x[0-9]+}}, #:lo12:second_list]
+
+; CHECK: add x[[DEST_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:second_list
+; CHECK: add x[[SRC_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
+
+; CHECK: ldr [[BLOCK:x[0-9]+]], [x[[SRC_LIST]], #24]
+; CHECK: str [[BLOCK]], [x[[DEST_LIST]], #24]
+
+ ret void
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/zero-reg.ll b/test/CodeGen/AArch64/zero-reg.ll
new file mode 100644
index 0000000000..f4f76bef21
--- /dev/null
+++ b/test/CodeGen/AArch64/zero-reg.ll
@@ -0,0 +1,31 @@
+; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s
+
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @test_zr() {
+; CHECK: test_zr:
+
+ store i32 0, i32* @var32
+; CHECK: str wzr, [{{x[0-9]+}}, #:lo12:var32]
+ store i64 0, i64* @var64
+; CHECK: str xzr, [{{x[0-9]+}}, #:lo12:var64]
+
+ ret void
+; CHECK: ret
+}
+
+define void @test_sp(i32 %val) {
+; CHECK: test_sp:
+
+; Important correctness point here is that LLVM doesn't try to use xzr
+; as an addressing register: "str w0, [xzr]" is not a valid A64
+; instruction (0b11111 in the Rn field would mean "sp").
+ %addr = getelementptr i32* null, i64 0
+ store i32 %val, i32* %addr
+; CHECK: mov x[[NULL:[0-9]+]], xzr
+; CHECK: str {{w[0-9]+}}, [x[[NULL]]]
+
+ ret void
+; CHECK: ret
+} \ No newline at end of file
diff --git a/test/DebugInfo/AArch64/cfi-frame.ll b/test/DebugInfo/AArch64/cfi-frame.ll
new file mode 100644
index 0000000000..4217925b98
--- /dev/null
+++ b/test/DebugInfo/AArch64/cfi-frame.ll
@@ -0,0 +1,58 @@
+; RUN: llc -verify-machineinstrs -march aarch64 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -march aarch64 -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-WITH-FP
+
+@bigspace = global [8 x i64] zeroinitializer
+
+declare void @use_addr(i8*)
+
+define void @test_frame([8 x i64] %val) {
+; CHECK: test_frame:
+; CHECK: .cfi_startproc
+
+ %var = alloca i8, i32 1000000
+; CHECK: sub sp, sp, #[[SP_INIT_ADJ:[0-9]+]]
+; CHECK-NEXT: .Ltmp
+; CHECK-NEXT: .cfi_def_cfa sp, [[SP_INIT_ADJ]]
+
+; Make sure the prologue is reasonably efficient
+; CHECK-NEXT: stp x29, x30, [sp,
+; CHECK-NEXT: stp x25, x26, [sp,
+; CHECK-NEXT: stp x23, x24, [sp,
+; CHECK-NEXT: stp x21, x22, [sp,
+; CHECK-NEXT: stp x19, x20, [sp,
+; CHECK-NEXT: sub sp, sp, #160
+; CHECK-NEXT: sub sp, sp, #244, lsl #12
+; CHECK-NEXT: .Ltmp
+; CHECK-NEXT: .cfi_def_cfa sp, 1000080
+; CHECK-NEXT: .Ltmp
+; CHECK-NEXT: .cfi_offset x30, -8
+; CHECK-NEXT: .Ltmp
+; CHECK-NEXT: .cfi_offset x29, -16
+; [...]
+; CHECK: .cfi_offset x19, -80
+
+; CHECK: bl use_addr
+ call void @use_addr(i8* %var)
+
+ store [8 x i64] %val, [8 x i64]* @bigspace
+ ret void
+; CHECK: ret
+; CHECK: .cfi_endproc
+}
+
+; CHECK-WITH-FP: test_frame:
+
+; CHECK-WITH-FP: sub sp, sp, #[[SP_INIT_ADJ:[0-9]+]]
+; CHECK-WITH-FP-NEXT: .Ltmp
+; CHECK-WITH-FP-NEXT: .cfi_def_cfa sp, [[SP_INIT_ADJ]]
+
+; CHECK-WITH-FP: stp x29, x30, [sp, [[OFFSET:#[0-9]+]]]
+; CHECK-WITH-FP-NEXT: add x29, sp, [[OFFSET]]
+; CHECK-WITH-FP-NEXT: .Ltmp
+; CHECK-WITH-FP-NEXT: .cfi_def_cfa x29, 16
+
+ ; We shouldn't emit any kind of update for the second stack adjustment if the
+ ; FP is in use.
+; CHECK-WITH-FP-NOT: .cfi_def_cfa_offset
+
+; CHECK-WITH-FP: bl use_addr
diff --git a/test/DebugInfo/AArch64/eh_frame.ll b/test/DebugInfo/AArch64/eh_frame.ll
new file mode 100644
index 0000000000..13436596a5
--- /dev/null
+++ b/test/DebugInfo/AArch64/eh_frame.ll
@@ -0,0 +1,51 @@
+; RUN: llc -verify-machineinstrs -march=aarch64 %s -filetype=obj -o %t
+; RUN: llvm-objdump -s %t | FileCheck %s
+@var = global i32 0
+
+declare void @bar()
+
+define i64 @check_largest_class(i32 %in) {
+ %res = load i32* @var
+ call void @bar()
+ %ext = zext i32 %res to i64
+ ret i64 %ext
+}
+
+; The really key points we're checking here are:
+; * Return register is x30.
+; * Pointer format is 0x1b (GNU doesn't appear to understand others).
+
+; The rest is largely incidental, but not expected to change regularly.
+
+; Output is:
+
+; CHECK: Contents of section .eh_frame:
+; CHECK-NEXT: 0000 10000000 00000000 017a5200 017c1e01 .........zR..|..
+; CHECK-NEXT: 0010 1b0c1f00 18000000 18000000 00000000 ................
+
+
+; Won't check the rest, it's rather incidental.
+; 0020 24000000 00440c1f 10449e02 93040000 $....D...D......
+
+
+; The first CIE:
+; -------------------
+; 10000000: length of first CIE = 0x10
+; 00000000: This is a CIE
+; 01: version = 0x1
+; 7a 52 00: augmentation string "zR" -- pointer format is specified
+; 01: code alignment factor 1
+; 7c: data alignment factor -4
+; 1e: return address register 30 (== x30).
+; 01: 1 byte of augmentation
+; 1b: pointer format 1b: DW_EH_PE_pcrel | DW_EH_PE_sdata4
+; 0c 1f 00: initial instructions: "DW_CFA_def_cfa x31 ofs 0" in this case
+
+; Next the FDE:
+; -------------
+; 18000000: FDE length 0x18
+; 18000000: Uses CIE 0x18 backwards (only coincidentally same as above)
+; 00000000: PC begin for this FDE is at 00000000 (relocation is applied here)
+; 24000000: FDE applies up to PC begin+0x24
+; 00: Augmentation string length 0 for this FDE
+; Rest: call frame instructions
diff --git a/test/DebugInfo/AArch64/eh_frame_personality.ll b/test/DebugInfo/AArch64/eh_frame_personality.ll
new file mode 100644
index 0000000000..ab06d211fd
--- /dev/null
+++ b/test/DebugInfo/AArch64/eh_frame_personality.ll
@@ -0,0 +1,46 @@
+; RUN: llc -verify-machineinstrs -march=aarch64 %s -filetype=obj -o %t
+; RUN: llvm-objdump -s %t | FileCheck %s
+
+declare i32 @__gxx_personality_v0(...)
+
+declare void @bar()
+
+define i64 @foo(i64 %lhs, i64 %rhs) {
+ invoke void @bar() to label %end unwind label %clean
+end:
+ ret i64 0
+
+clean:
+ %tst = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) cleanup
+ ret i64 42
+}
+
+; CHECK: Contents of section .eh_frame:
+; CHECK: 0000 1c000000 00000000 017a504c 5200017c .........zPLR..|
+; CHECK: 0010 1e0b0000 00000000 00000000 1b0c1f00 ................
+
+; Don't really care about the rest:
+
+; 0020 1c000000 24000000 00000000 24000000 ....$.......$...
+; 0030 08000000 00000000 00440c1f 10449e02 .........D...D..
+
+; The key test here is that the personality routine is sanely encoded (under the
+; small memory model it must be an 8-byte value for full generality: code+data <
+; 4GB, but you might need both +4GB and -4GB depending on where things end
+; up. However, for completeness:
+
+; First CIE:
+; ----------
+; 1c000000: Length = 0x1c
+; 00000000: This is a CIE
+; 01: Version 1
+; 7a 50 4c 52 00: Augmentation string "zPLR" (personality routine, language-specific data, pointer format)
+; 01: Code alignment factor 1
+; 78: Data alignment factor: -8
+; 1e: Return address in x30
+; 07: Augmentation data 0xb bytes (this is key!)
+; 00: Personality encoding is DW_EH_PE_absptr
+; 00 00 00 00 00 00 00 00: First part of aug (personality routine). Relocated, obviously
+; 00: Second part of aug (language-specific data): absolute pointer format used
+; 1b: pointer format: pc-relative signed 4-byte. Just like GNU.
+; 0c 1f 00: Initial instructions ("DW_CFA_def_cfa x31 ofs 0" in this case)
diff --git a/test/DebugInfo/AArch64/lit.local.cfg b/test/DebugInfo/AArch64/lit.local.cfg
new file mode 100644
index 0000000000..c5ce2411ed
--- /dev/null
+++ b/test/DebugInfo/AArch64/lit.local.cfg
@@ -0,0 +1,6 @@
+config.suffixes = ['.ll', '.c', '.cpp']
+
+targets = set(config.root.targets_to_build.split())
+if not 'AArch64' in targets:
+ config.unsupported = True
+
diff --git a/test/DebugInfo/AArch64/variable-loc.ll b/test/DebugInfo/AArch64/variable-loc.ll
new file mode 100644
index 0000000000..32772046ef
--- /dev/null
+++ b/test/DebugInfo/AArch64/variable-loc.ll
@@ -0,0 +1,87 @@
+; RUN: llc -march=aarch64 -disable-fp-elim < %s | FileCheck %s
+
+; This is a regression test making sure the location of variables is correct in
+; debugging information, even if they're addressed via the frame pointer.
+
+ ; First make sure main_arr is where we expect it: sp + 12 == x29 - 420:
+; CHECK: main:
+; CHECK: sub sp, sp, #448
+; CHECK: stp x29, x30, [sp, #432]
+; CHECK: add x29, sp, #432
+; CHECK: add {{x[0-9]+}}, sp, #12
+
+ ; Now check the debugging information reflects this:
+; CHECK: DW_TAG_variable
+; CHECK-NEXT: .word .Linfo_string7
+
+ ; Rather hard-coded, but 145 => DW_OP_fbreg and the .ascii is LEB128 encoded -420.
+; CHECK: DW_AT_location
+; CHECK-NEXT: .byte 145
+; CHECK-NEXT: .ascii "\334|"
+
+; CHECK: .Linfo_string7:
+; CHECK-NEXT: main_arr
+
+
+target datalayout = "e-p:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-i128:128:128-f32:32:32-f64:64:64-f128:128:128-n32:64-S128"
+target triple = "aarch64-none-linux-gnu"
+
+@.str = private unnamed_addr constant [13 x i8] c"Total is %d\0A\00", align 1
+
+declare void @populate_array(i32*, i32) nounwind
+
+declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
+
+declare i32 @sum_array(i32*, i32) nounwind
+
+define i32 @main() nounwind {
+entry:
+ %retval = alloca i32, align 4
+ %main_arr = alloca [100 x i32], align 4
+ %val = alloca i32, align 4
+ store i32 0, i32* %retval
+ call void @llvm.dbg.declare(metadata !{[100 x i32]* %main_arr}, metadata !17), !dbg !22
+ call void @llvm.dbg.declare(metadata !{i32* %val}, metadata !23), !dbg !24
+ %arraydecay = getelementptr inbounds [100 x i32]* %main_arr, i32 0, i32 0, !dbg !25
+ call void @populate_array(i32* %arraydecay, i32 100), !dbg !25
+ %arraydecay1 = getelementptr inbounds [100 x i32]* %main_arr, i32 0, i32 0, !dbg !26
+ %call = call i32 @sum_array(i32* %arraydecay1, i32 100), !dbg !26
+ store i32 %call, i32* %val, align 4, !dbg !26
+ %0 = load i32* %val, align 4, !dbg !27
+ %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.str, i32 0, i32 0), i32 %0), !dbg !27
+ ret i32 0, !dbg !28
+}
+
+declare i32 @printf(i8*, ...)
+
+!llvm.dbg.cu = !{!0}
+
+!0 = metadata !{i32 786449, i32 0, i32 12, metadata !"simple.c", metadata !"/home/timnor01/a64-trunk/build", metadata !"clang version 3.2 ", i1 true, i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1} ; [ DW_TAG_compile_unit ] [/home/timnor01/a64-trunk/build/simple.c] [DW_LANG_C99]
+!1 = metadata !{metadata !2}
+!2 = metadata !{i32 0}
+!3 = metadata !{metadata !4}
+!4 = metadata !{metadata !5, metadata !11, metadata !14}
+!5 = metadata !{i32 786478, i32 0, metadata !6, metadata !"populate_array", metadata !"populate_array", metadata !"", metadata !6, i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32*, i32)* @populate_array, null, null, metadata !1, i32 4} ; [ DW_TAG_subprogram ] [line 4] [def] [populate_array]
+!6 = metadata !{i32 786473, metadata !"simple.c", metadata !"/home/timnor01/a64-trunk/build", null} ; [ DW_TAG_file_type ]
+!7 = metadata !{i32 786453, i32 0, metadata !"", i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = metadata !{null, metadata !9, metadata !10}
+!9 = metadata !{i32 786447, null, metadata !"", null, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !10} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from int]
+!10 = metadata !{i32 786468, null, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!11 = metadata !{i32 786478, i32 0, metadata !6, metadata !"sum_array", metadata !"sum_array", metadata !"", metadata !6, i32 9, metadata !12, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32*, i32)* @sum_array, null, null, metadata !1, i32 9} ; [ DW_TAG_subprogram ] [line 9] [def] [sum_array]
+!12 = metadata !{i32 786453, i32 0, metadata !"", i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !13, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!13 = metadata !{metadata !10, metadata !9, metadata !10}
+!14 = metadata !{i32 786478, i32 0, metadata !6, metadata !"main", metadata !"main", metadata !"", metadata !6, i32 18, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !1, i32 18} ; [ DW_TAG_subprogram ] [line 18] [def] [main]
+!15 = metadata !{i32 786453, i32 0, metadata !"", i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !16, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!16 = metadata !{metadata !10}
+!17 = metadata !{i32 786688, metadata !18, metadata !"main_arr", metadata !6, i32 19, metadata !19, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [main_arr] [line 19]
+!18 = metadata !{i32 786443, metadata !14, i32 18, i32 16, metadata !6, i32 4} ; [ DW_TAG_lexical_block ] [/home/timnor01/a64-trunk/build/simple.c]
+!19 = metadata !{i32 786433, null, metadata !"", null, i32 0, i64 3200, i64 32, i32 0, i32 0, metadata !10, metadata !20, i32 0, i32 0} ; [ DW_TAG_array_type ] [line 0, size 3200, align 32, offset 0] [from int]
+!20 = metadata !{metadata !21}
+!21 = metadata !{i32 786465, i64 0, i64 99} ; [ DW_TAG_subrange_type ] [0, 99]
+!22 = metadata !{i32 19, i32 7, metadata !18, null}
+!23 = metadata !{i32 786688, metadata !18, metadata !"val", metadata !6, i32 20, metadata !10, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [val] [line 20]
+!24 = metadata !{i32 20, i32 7, metadata !18, null}
+!25 = metadata !{i32 22, i32 3, metadata !18, null}
+!26 = metadata !{i32 23, i32 9, metadata !18, null}
+!27 = metadata !{i32 24, i32 3, metadata !18, null}
+!28 = metadata !{i32 26, i32 3, metadata !18, null}
diff --git a/test/MC/AArch64/basic-a64-diagnostics.s b/test/MC/AArch64/basic-a64-diagnostics.s
new file mode 100644
index 0000000000..eb13aa60b6
--- /dev/null
+++ b/test/MC/AArch64/basic-a64-diagnostics.s
@@ -0,0 +1,3709 @@
+// RUN: not llvm-mc -triple=aarch64 < %s 2> %t
+// RUN: FileCheck --check-prefix=CHECK-ERROR < %t %s
+
+//------------------------------------------------------------------------------
+// Add/sub (extended register)
+//------------------------------------------------------------------------------
+
+ // Mismatched final register and extend
+ add x2, x3, x5, sxtb
+ add x2, x4, w2, uxtx
+ add w5, w7, x9, sxtx
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: add x2, x3, x5, sxtb
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: add x2, x4, w2, uxtx
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: add w5, w7, x9, sxtx
+// CHECK-ERROR: ^
+
+ // Out of range extends
+ add x9, x10, w11, uxtb #-1
+ add x3, x5, w7, uxtb #5
+ sub x9, x15, x2, uxth #5
+// CHECK-ERROR: error: expected integer shift amount
+// CHECK-ERROR: add x9, x10, w11, uxtb #-1
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: add x3, x5, w7, uxtb #5
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: sub x9, x15, x2, uxth #5
+// CHECK-ERROR: ^
+
+ // Wrong registers on normal variants
+ add xzr, x3, x5, uxtx
+ sub x3, xzr, w9, sxth #1
+ add x1, x2, sp, uxtx
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: add xzr, x3, x5, uxtx
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: sub x3, xzr, w9, sxth #1
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: add x1, x2, sp, uxtx
+// CHECK-ERROR: ^
+
+ // Wrong registers on flag-setting variants
+ adds sp, x3, w2, uxtb
+ adds x3, xzr, x9, uxtx
+ subs x2, x1, sp, uxtx
+ adds x2, x1, sp, uxtb #2
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: adds sp, x3, w2, uxtb
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: adds x3, xzr, x9, uxtx
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: subs x2, x1, sp, uxtx
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: adds x2, x1, sp, uxtb #2
+// CHECK-ERROR: ^
+
+ // Amount not optional if lsl valid and used
+ add sp, x5, x7, lsl
+// CHECK-ERROR: error: expected #imm after shift specifier
+// CHECK-ERROR: add sp, x5, x7, lsl
+// CHECK-ERROR: ^
+
+//------------------------------------------------------------------------------
+// Add/sub (immediate)
+//------------------------------------------------------------------------------
+
+// Out of range immediates: < 0 or more than 12 bits
+ add w4, w5, #-1
+ add w5, w6, #0x1000
+ add w4, w5, #-1, lsl #12
+ add w5, w6, #0x1000, lsl #12
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add w4, w5, #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add w5, w6, #0x1000
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add w4, w5, #-1, lsl #12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add w5, w6, #0x1000, lsl #12
+// CHECK-ERROR-NEXT: ^
+
+// Only lsl #0 and lsl #12 are allowed
+ add w2, w3, #0x1, lsl #1
+ add w5, w17, #0xfff, lsl #13
+ add w17, w20, #0x1000, lsl #12
+ sub xsp, x34, #0x100, lsl #-1
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add w2, w3, #0x1, lsl #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add w5, w17, #0xfff, lsl #13
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add w17, w20, #0x1000, lsl #12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: only 'lsl #+N' valid after immediate
+// CHECK-ERROR-NEXT: sub xsp, x34, #0x100, lsl #-1
+// CHECK-ERROR-NEXT: ^
+
+// Incorrect registers (w31 doesn't exist at all, and 31 decodes to sp for these).
+ add w31, w20, #1234
+ add wzr, w20, #0x123
+ add w20, wzr, #0x321
+ add wzr, wzr, #0xfff
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add w31, w20, #1234
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add wzr, w20, #0x123
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add w20, wzr, #0x321
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add wzr, wzr, #0xfff
+// CHECK-ERROR-NEXT: ^
+
+// Mixed register classes
+ add xsp, w2, #123
+ sub w2, x30, #32
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add xsp, w2, #123
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sub w2, x30, #32
+// CHECK-ERROR-NEXT: ^
+
+// Out of range immediate
+ adds w0, w5, #0x10000
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adds w0, w5, #0x10000
+// CHECK-ERROR-NEXT: ^
+
+// Wn|WSP should be in second place
+ adds w4, wzr, #0x123
+// ...but wzr is the 31 destination
+ subs wsp, w5, #123
+ subs x5, xzr, #0x456, lsl #12
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adds w4, wzr, #0x123
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: subs wsp, w5, #123
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: subs x5, xzr, #0x456, lsl #12
+// CHECK-ERROR-NEXT: ^
+
+ // MOV alias should not accept any fiddling
+ mov x2, xsp, #123
+ mov wsp, w27, #0xfff, lsl #12
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mov x2, xsp, #123
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mov wsp, w27, #0xfff, lsl #12
+// CHECK-ERROR-NEXT: ^
+
+ // A relocation should be provided for symbols
+ add x3, x9, #variable
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add x3, x9, #variable
+// CHECK-ERROR-NEXT: ^
+
+
+//------------------------------------------------------------------------------
+// Add-subtract (shifted register)
+//------------------------------------------------------------------------------
+
+ add wsp, w1, w2, lsr #3
+ add x4, sp, x9, asr #5
+ add x9, x10, x5, ror #3
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add wsp, w1, w2, lsr #3
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add x4, sp, x9, asr #5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add x9, x10, x5, ror #3
+// CHECK-ERROR-NEXT: ^
+
+ add w1, w2, w3, lsl #-1
+ add w1, w2, w3, lsl #32
+ add w1, w2, w3, lsr #-1
+ add w1, w2, w3, lsr #32
+ add w1, w2, w3, asr #-1
+ add w1, w2, w3, asr #32
+ add x1, x2, x3, lsl #-1
+ add x1, x2, x3, lsl #64
+ add x1, x2, x3, lsr #-1
+ add x1, x2, x3, lsr #64
+ add x1, x2, x3, asr #-1
+ add x1, x2, x3, asr #64
+// CHECK-ERROR: error: expected integer shift amount
+// CHECK-ERROR-NEXT: add w1, w2, w3, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add w1, w2, w3, lsl #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: add w1, w2, w3, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add w1, w2, w3, lsr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: add w1, w2, w3, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add w1, w2, w3, asr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: add x1, x2, x3, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add x1, x2, x3, lsl #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: add x1, x2, x3, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add x1, x2, x3, lsr #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: add x1, x2, x3, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: add x1, x2, x3, asr #64
+// CHECK-ERROR-NEXT: ^
+
+ adds w1, w2, w3, lsl #-1
+ adds w1, w2, w3, lsl #32
+ adds w1, w2, w3, lsr #-1
+ adds w1, w2, w3, lsr #32
+ adds w1, w2, w3, asr #-1
+ adds w1, w2, w3, asr #32
+ adds x1, x2, x3, lsl #-1
+ adds x1, x2, x3, lsl #64
+ adds x1, x2, x3, lsr #-1
+ adds x1, x2, x3, lsr #64
+ adds x1, x2, x3, asr #-1
+ adds x1, x2, x3, asr #64
+// CHECK-ERROR: error: expected integer shift amount
+// CHECK-ERROR-NEXT: adds w1, w2, w3, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adds w1, w2, w3, lsl #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: adds w1, w2, w3, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adds w1, w2, w3, lsr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: adds w1, w2, w3, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adds w1, w2, w3, asr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: adds x1, x2, x3, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adds x1, x2, x3, lsl #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: adds x1, x2, x3, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adds x1, x2, x3, lsr #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: adds x1, x2, x3, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adds x1, x2, x3, asr #64
+// CHECK-ERROR-NEXT: ^
+
+ sub w1, w2, w3, lsl #-1
+ sub w1, w2, w3, lsl #32
+ sub w1, w2, w3, lsr #-1
+ sub w1, w2, w3, lsr #32
+ sub w1, w2, w3, asr #-1
+ sub w1, w2, w3, asr #32
+ sub x1, x2, x3, lsl #-1
+ sub x1, x2, x3, lsl #64
+ sub x1, x2, x3, lsr #-1
+ sub x1, x2, x3, lsr #64
+ sub x1, x2, x3, asr #-1
+ sub x1, x2, x3, asr #64
+// CHECK-ERROR: error: expected integer shift amount
+// CHECK-ERROR-NEXT: sub w1, w2, w3, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sub w1, w2, w3, lsl #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: sub w1, w2, w3, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sub w1, w2, w3, lsr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: sub w1, w2, w3, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sub w1, w2, w3, asr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: sub x1, x2, x3, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sub x1, x2, x3, lsl #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: sub x1, x2, x3, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sub x1, x2, x3, lsr #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: sub x1, x2, x3, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sub x1, x2, x3, asr #64
+// CHECK-ERROR-NEXT: ^
+
+ subs w1, w2, w3, lsl #-1
+ subs w1, w2, w3, lsl #32
+ subs w1, w2, w3, lsr #-1
+ subs w1, w2, w3, lsr #32
+ subs w1, w2, w3, asr #-1
+ subs w1, w2, w3, asr #32
+ subs x1, x2, x3, lsl #-1
+ subs x1, x2, x3, lsl #64
+ subs x1, x2, x3, lsr #-1
+ subs x1, x2, x3, lsr #64
+ subs x1, x2, x3, asr #-1
+ subs x1, x2, x3, asr #64
+// CHECK-ERROR: error: expected integer shift amount
+// CHECK-ERROR-NEXT: subs w1, w2, w3, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: subs w1, w2, w3, lsl #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: subs w1, w2, w3, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: subs w1, w2, w3, lsr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: subs w1, w2, w3, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: subs w1, w2, w3, asr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: subs x1, x2, x3, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: subs x1, x2, x3, lsl #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: subs x1, x2, x3, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: subs x1, x2, x3, lsr #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: subs x1, x2, x3, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: subs x1, x2, x3, asr #64
+// CHECK-ERROR-NEXT: ^
+
+ cmn w9, w10, lsl #-1
+ cmn w9, w10, lsl #32
+ cmn w11, w12, lsr #-1
+ cmn w11, w12, lsr #32
+ cmn w19, wzr, asr #-1
+ cmn wzr, wzr, asr #32
+ cmn x9, x10, lsl #-1
+ cmn x9, x10, lsl #64
+ cmn x11, x12, lsr #-1
+ cmn x11, x12, lsr #64
+ cmn x19, xzr, asr #-1
+ cmn xzr, xzr, asr #64
+// CHECK-ERROR: error: expected integer shift amount
+// CHECK-ERROR-NEXT: cmn w9, w10, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cmn w9, w10, lsl #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: cmn w11, w12, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cmn w11, w12, lsr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: cmn w19, wzr, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cmn wzr, wzr, asr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: cmn x9, x10, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cmn x9, x10, lsl #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: cmn x11, x12, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cmn x11, x12, lsr #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: cmn x19, xzr, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cmn xzr, xzr, asr #64
+// CHECK-ERROR-NEXT: ^
+
+ cmp w9, w10, lsl #-1
+ cmp w9, w10, lsl #32
+ cmp w11, w12, lsr #-1
+ cmp w11, w12, lsr #32
+ cmp w19, wzr, asr #-1
+ cmp wzr, wzr, asr #32
+ cmp x9, x10, lsl #-1
+ cmp x9, x10, lsl #64
+ cmp x11, x12, lsr #-1
+ cmp x11, x12, lsr #64
+ cmp x19, xzr, asr #-1
+ cmp xzr, xzr, asr #64
+// CHECK-ERROR: error: expected integer shift amount
+// CHECK-ERROR-NEXT: cmp w9, w10, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cmp w9, w10, lsl #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: cmp w11, w12, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cmp w11, w12, lsr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: cmp w19, wzr, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cmp wzr, wzr, asr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: cmp x9, x10, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cmp x9, x10, lsl #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: cmp x11, x12, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cmp x11, x12, lsr #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: cmp x19, xzr, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cmp xzr, xzr, asr #64
+// CHECK-ERROR-NEXT: ^
+
+ neg w9, w10, lsl #-1
+ neg w9, w10, lsl #32
+ neg w11, w12, lsr #-1
+ neg w11, w12, lsr #32
+ neg w19, wzr, asr #-1
+ neg wzr, wzr, asr #32
+ neg x9, x10, lsl #-1
+ neg x9, x10, lsl #64
+ neg x11, x12, lsr #-1
+ neg x11, x12, lsr #64
+ neg x19, xzr, asr #-1
+ neg xzr, xzr, asr #64
+// CHECK-ERROR: error: expected integer shift amount
+// CHECK-ERROR-NEXT: neg w9, w10, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: neg w9, w10, lsl #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: neg w11, w12, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: neg w11, w12, lsr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: neg w19, wzr, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: neg wzr, wzr, asr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: neg x9, x10, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: neg x9, x10, lsl #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: neg x11, x12, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: neg x11, x12, lsr #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: neg x19, xzr, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: neg xzr, xzr, asr #64
+// CHECK-ERROR-NEXT: ^
+
+ negs w9, w10, lsl #-1
+ negs w9, w10, lsl #32
+ negs w11, w12, lsr #-1
+ negs w11, w12, lsr #32
+ negs w19, wzr, asr #-1
+ negs wzr, wzr, asr #32
+ negs x9, x10, lsl #-1
+ negs x9, x10, lsl #64
+ negs x11, x12, lsr #-1
+ negs x11, x12, lsr #64
+ negs x19, xzr, asr #-1
+ negs xzr, xzr, asr #64
+// CHECK-ERROR: error: expected integer shift amount
+// CHECK-ERROR-NEXT: negs w9, w10, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: negs w9, w10, lsl #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: negs w11, w12, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: negs w11, w12, lsr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: negs w19, wzr, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: negs wzr, wzr, asr #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: negs x9, x10, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: negs x9, x10, lsl #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: negs x11, x12, lsr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: negs x11, x12, lsr #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: negs x19, xzr, asr #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: negs xzr, xzr, asr #64
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Add-subtract (shifted register)
+//------------------------------------------------------------------------------
+
+ adc wsp, w3, w5
+ adc w1, wsp, w2
+ adc w0, w10, wsp
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adc wsp, w3, w5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adc w1, wsp, w2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adc w0, w10, wsp
+// CHECK-ERROR-NEXT: ^
+
+ adc sp, x3, x5
+ adc x1, sp, x2
+ adc x0, x10, sp
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adc sp, x3, x5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adc x1, sp, x2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adc x0, x10, sp
+// CHECK-ERROR-NEXT: ^
+
+ adcs wsp, w3, w5
+ adcs w1, wsp, w2
+ adcs w0, w10, wsp
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adcs wsp, w3, w5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adcs w1, wsp, w2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adcs w0, w10, wsp
+// CHECK-ERROR-NEXT: ^
+
+ adcs sp, x3, x5
+ adcs x1, sp, x2
+ adcs x0, x10, sp
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adcs sp, x3, x5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adcs x1, sp, x2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adcs x0, x10, sp
+// CHECK-ERROR-NEXT: ^
+
+ sbc wsp, w3, w5
+ sbc w1, wsp, w2
+ sbc w0, w10, wsp
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbc wsp, w3, w5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbc w1, wsp, w2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbc w0, w10, wsp
+// CHECK-ERROR-NEXT: ^
+
+ sbc sp, x3, x5
+ sbc x1, sp, x2
+ sbc x0, x10, sp
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbc sp, x3, x5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbc x1, sp, x2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbc x0, x10, sp
+// CHECK-ERROR-NEXT: ^
+
+ sbcs wsp, w3, w5
+ sbcs w1, wsp, w2
+ sbcs w0, w10, wsp
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbcs wsp, w3, w5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbcs w1, wsp, w2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbcs w0, w10, wsp
+// CHECK-ERROR-NEXT: ^
+
+ sbcs sp, x3, x5
+ sbcs x1, sp, x2
+ sbcs x0, x10, sp
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbcs sp, x3, x5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbcs x1, sp, x2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbcs x0, x10, sp
+// CHECK-ERROR-NEXT: ^
+
+ ngc wsp, w3
+ ngc w9, wsp
+ ngc sp, x9
+ ngc x2, sp
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ngc wsp, w3
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ngc w9, wsp
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ngc sp, x9
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ngc x2, sp
+// CHECK-ERROR-NEXT: ^
+
+ ngcs wsp, w3
+ ngcs w9, wsp
+ ngcs sp, x9
+ ngcs x2, sp
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ngcs wsp, w3
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ngcs w9, wsp
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ngcs sp, x9
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ngcs x2, sp
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Bitfield
+//------------------------------------------------------------------------------
+
+ sbfm x3, w13, #0, #0
+ sbfm w12, x9, #0, #0
+ sbfm sp, x3, #3, #5
+ sbfm w3, wsp, #1, #9
+ sbfm x9, x5, #-1, #0
+ sbfm x9, x5, #0, #-1
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfm x3, w13, #0, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfm w12, x9, #0, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfm sp, x3, #3, #5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfm w3, wsp, #1, #9
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfm x9, x5, #-1, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfm x9, x5, #0, #-1
+// CHECK-ERROR-NEXT: ^
+
+ sbfm w3, w5, #32, #1
+ sbfm w7, w11, #19, #32
+ sbfm x29, x30, #64, #0
+ sbfm x10, x20, #63, #64
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfm w3, w5, #32, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfm w7, w11, #19, #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfm x29, x30, #64, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfm x10, x20, #63, #64
+// CHECK-ERROR-NEXT: ^
+
+ ubfm w3, w5, #32, #1
+ ubfm w7, w11, #19, #32
+ ubfm x29, x30, #64, #0
+ ubfm x10, x20, #63, #64
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfm w3, w5, #32, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfm w7, w11, #19, #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfm x29, x30, #64, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfm x10, x20, #63, #64
+// CHECK-ERROR-NEXT: ^
+
+ bfm w3, w5, #32, #1
+ bfm w7, w11, #19, #32
+ bfm x29, x30, #64, #0
+ bfm x10, x20, #63, #64
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfm w3, w5, #32, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfm w7, w11, #19, #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfm x29, x30, #64, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfm x10, x20, #63, #64
+// CHECK-ERROR-NEXT: ^
+
+ sxtb x3, x2
+ sxth xzr, xzr
+ sxtw x3, x5
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sxtb x3, x2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sxth xzr, xzr
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sxtw x3, x5
+// CHECK-ERROR-NEXT: ^
+
+ uxtb x3, x12
+ uxth x5, x9
+ uxtw x3, x5
+ uxtb x2, sp
+ uxtb sp, xzr
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: uxtb x3, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: uxth x5, x9
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid instruction
+// CHECK-ERROR-NEXT: uxtw x3, x5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: uxtb x2, sp
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: uxtb sp, xzr
+// CHECK-ERROR-NEXT: ^
+
+ asr x3, w2, #1
+ asr sp, x2, #1
+ asr x25, x26, #-1
+ asr x25, x26, #64
+ asr w9, w8, #32
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: asr x3, w2, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: asr sp, x2, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: asr x25, x26, #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: asr x25, x26, #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: asr w9, w8, #32
+// CHECK-ERROR-NEXT: ^
+
+ sbfiz w1, w2, #0, #0
+ sbfiz wsp, w9, #0, #1
+ sbfiz w9, w10, #32, #1
+ sbfiz w11, w12, #32, #0
+ sbfiz w9, w10, #10, #23
+ sbfiz x3, x5, #12, #53
+ sbfiz sp, x3, #5, #6
+ sbfiz w3, wsp, #7, #8
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfiz w1, w2, #0, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfiz wsp, w9, #0, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfiz w9, w10, #32, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfiz w11, w12, #32, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: requested insert overflows register
+// CHECK-ERROR-NEXT: sbfiz w9, w10, #10, #23
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: requested insert overflows register
+// CHECK-ERROR-NEXT: sbfiz x3, x5, #12, #53
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfiz sp, x3, #5, #6
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfiz w3, wsp, #7, #8
+// CHECK-ERROR-NEXT: ^
+
+ sbfx w1, w2, #0, #0
+ sbfx wsp, w9, #0, #1
+ sbfx w9, w10, #32, #1
+ sbfx w11, w12, #32, #0
+ sbfx w9, w10, #10, #23
+ sbfx x3, x5, #12, #53
+ sbfx sp, x3, #5, #6
+ sbfx w3, wsp, #7, #8
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfx w1, w2, #0, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfx wsp, w9, #0, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfx w9, w10, #32, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfx w11, w12, #32, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: requested extract overflows register
+// CHECK-ERROR-NEXT: sbfx w9, w10, #10, #23
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: requested extract overflows register
+// CHECK-ERROR-NEXT: sbfx x3, x5, #12, #53
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfx sp, x3, #5, #6
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sbfx w3, wsp, #7, #8
+// CHECK-ERROR-NEXT: ^
+
+ bfi w1, w2, #0, #0
+ bfi wsp, w9, #0, #1
+ bfi w9, w10, #32, #1
+ bfi w11, w12, #32, #0
+ bfi w9, w10, #10, #23
+ bfi x3, x5, #12, #53
+ bfi sp, x3, #5, #6
+ bfi w3, wsp, #7, #8
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfi w1, w2, #0, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfi wsp, w9, #0, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfi w9, w10, #32, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfi w11, w12, #32, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: requested insert overflows register
+// CHECK-ERROR-NEXT: bfi w9, w10, #10, #23
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: requested insert overflows register
+// CHECK-ERROR-NEXT: bfi x3, x5, #12, #53
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfi sp, x3, #5, #6
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfi w3, wsp, #7, #8
+// CHECK-ERROR-NEXT: ^
+
+ bfxil w1, w2, #0, #0
+ bfxil wsp, w9, #0, #1
+ bfxil w9, w10, #32, #1
+ bfxil w11, w12, #32, #0
+ bfxil w9, w10, #10, #23
+ bfxil x3, x5, #12, #53
+ bfxil sp, x3, #5, #6
+ bfxil w3, wsp, #7, #8
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfxil w1, w2, #0, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfxil wsp, w9, #0, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfxil w9, w10, #32, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfxil w11, w12, #32, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: requested extract overflows register
+// CHECK-ERROR-NEXT: bfxil w9, w10, #10, #23
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: requested extract overflows register
+// CHECK-ERROR-NEXT: bfxil x3, x5, #12, #53
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfxil sp, x3, #5, #6
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bfxil w3, wsp, #7, #8
+// CHECK-ERROR-NEXT: ^
+
+ ubfiz w1, w2, #0, #0
+ ubfiz wsp, w9, #0, #1
+ ubfiz w9, w10, #32, #1
+ ubfiz w11, w12, #32, #0
+ ubfiz w9, w10, #10, #23
+ ubfiz x3, x5, #12, #53
+ ubfiz sp, x3, #5, #6
+ ubfiz w3, wsp, #7, #8
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfiz w1, w2, #0, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfiz wsp, w9, #0, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfiz w9, w10, #32, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfiz w11, w12, #32, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: requested insert overflows register
+// CHECK-ERROR-NEXT: ubfiz w9, w10, #10, #23
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: requested insert overflows register
+// CHECK-ERROR-NEXT: ubfiz x3, x5, #12, #53
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfiz sp, x3, #5, #6
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfiz w3, wsp, #7, #8
+// CHECK-ERROR-NEXT: ^
+
+ ubfx w1, w2, #0, #0
+ ubfx wsp, w9, #0, #1
+ ubfx w9, w10, #32, #1
+ ubfx w11, w12, #32, #0
+ ubfx w9, w10, #10, #23
+ ubfx x3, x5, #12, #53
+ ubfx sp, x3, #5, #6
+ ubfx w3, wsp, #7, #8
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfx w1, w2, #0, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfx wsp, w9, #0, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfx w9, w10, #32, #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfx w11, w12, #32, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: requested extract overflows register
+// CHECK-ERROR-NEXT: ubfx w9, w10, #10, #23
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: requested extract overflows register
+// CHECK-ERROR-NEXT: ubfx x3, x5, #12, #53
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfx sp, x3, #5, #6
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ubfx w3, wsp, #7, #8
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Compare & branch (immediate)
+//------------------------------------------------------------------------------
+
+ cbnz wsp, lbl
+ cbz sp, lbl
+ cbz x3, x5
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cbnz wsp, lbl
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cbz sp, lbl
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cbz x3, x5
+// CHECK-ERROR-NEXT: ^
+
+ cbz w20, #1048576
+ cbnz xzr, #-1048580
+ cbz x29, #1
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cbz w20, #1048576
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cbnz xzr, #-1048580
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cbz x29, #1
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Conditional branch (immediate)
+//------------------------------------------------------------------------------
+
+ b.zf lbl
+// CHECK-ERROR: error: invalid condition code
+// CHECK-ERROR-NEXT: b.zf lbl
+// CHECK-ERROR-NEXT: ^
+
+ b.eq #1048576
+ b.ge #-1048580
+ b.cc #1
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: b.eq #1048576
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: b.ge #-1048580
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: b.cc #1
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Conditional compare (immediate)
+//------------------------------------------------------------------------------
+
+ ccmp wsp, #4, #2, ne
+ ccmp w25, #-1, #15, hs
+ ccmp w3, #32, #0, ge
+ ccmp w19, #5, #-1, lt
+ ccmp w20, #7, #16, hs
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp wsp, #4, #2, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp w25, #-1, #15, hs
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp w3, #32, #0, ge
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp w19, #5, #-1, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp w20, #7, #16, hs
+// CHECK-ERROR-NEXT: ^
+
+ ccmp sp, #4, #2, ne
+ ccmp x25, #-1, #15, hs
+ ccmp x3, #32, #0, ge
+ ccmp x19, #5, #-1, lt
+ ccmp x20, #7, #16, hs
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp sp, #4, #2, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp x25, #-1, #15, hs
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp x3, #32, #0, ge
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp x19, #5, #-1, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp x20, #7, #16, hs
+// CHECK-ERROR-NEXT: ^
+
+ ccmn wsp, #4, #2, ne
+ ccmn w25, #-1, #15, hs
+ ccmn w3, #32, #0, ge
+ ccmn w19, #5, #-1, lt
+ ccmn w20, #7, #16, hs
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn wsp, #4, #2, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn w25, #-1, #15, hs
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn w3, #32, #0, ge
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn w19, #5, #-1, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn w20, #7, #16, hs
+// CHECK-ERROR-NEXT: ^
+
+ ccmn sp, #4, #2, ne
+ ccmn x25, #-1, #15, hs
+ ccmn x3, #32, #0, ge
+ ccmn x19, #5, #-1, lt
+ ccmn x20, #7, #16, hs
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn sp, #4, #2, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn x25, #-1, #15, hs
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn x3, #32, #0, ge
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn x19, #5, #-1, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn x20, #7, #16, hs
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Conditional compare (register)
+//------------------------------------------------------------------------------
+
+ ccmp wsp, w4, #2, ne
+ ccmp w3, wsp, #0, ge
+ ccmp w19, w5, #-1, lt
+ ccmp w20, w7, #16, hs
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp wsp, w4, #2, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp w3, wsp, #0, ge
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp w19, w5, #-1, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp w20, w7, #16, hs
+// CHECK-ERROR-NEXT: ^
+
+ ccmp sp, x4, #2, ne
+ ccmp x25, sp, #15, hs
+ ccmp x19, x5, #-1, lt
+ ccmp x20, x7, #16, hs
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp sp, x4, #2, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp x25, sp, #15, hs
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp x19, x5, #-1, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmp x20, x7, #16, hs
+// CHECK-ERROR-NEXT: ^
+
+ ccmn wsp, w4, #2, ne
+ ccmn w25, wsp, #15, hs
+ ccmn w19, w5, #-1, lt
+ ccmn w20, w7, #16, hs
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn wsp, w4, #2, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn w25, wsp, #15, hs
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn w19, w5, #-1, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn w20, w7, #16, hs
+// CHECK-ERROR-NEXT: ^
+
+ ccmn sp, x4, #2, ne
+ ccmn x25, sp, #15, hs
+ ccmn x19, x5, #-1, lt
+ ccmn x20, x7, #16, hs
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn sp, x4, #2, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn x25, sp, #15, hs
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn x19, x5, #-1, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ccmn x20, x7, #16, hs
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Conditional select
+//------------------------------------------------------------------------------
+
+ csel w4, wsp, w9, eq
+ csel wsp, w2, w3, ne
+ csel w10, w11, wsp, ge
+ csel w1, w2, w3, #3
+ csel x4, sp, x9, eq
+ csel sp, x2, x3, ne
+ csel x10, x11, sp, ge
+ csel x1, x2, x3, #3
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csel w4, wsp, w9, eq
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csel wsp, w2, w3, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csel w10, w11, wsp, ge
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csel w1, w2, w3, #3
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csel x4, sp, x9, eq
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csel sp, x2, x3, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csel x10, x11, sp, ge
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csel x1, x2, x3, #3
+// CHECK-ERROR-NEXT: ^
+
+ csinc w20, w21, wsp, mi
+ csinc sp, x30, x29, eq
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csinc w20, w21, wsp, mi
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csinc sp, x30, x29, eq
+// CHECK-ERROR-NEXT: ^
+
+ csinv w20, wsp, wsp, mi
+ csinv sp, x30, x29, le
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csinv w20, wsp, wsp, mi
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csinv sp, x30, x29, le
+// CHECK-ERROR-NEXT: ^
+
+ csneg w20, w21, wsp, mi
+ csneg x0, sp, x29, le
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csneg w20, w21, wsp, mi
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csneg x0, sp, x29, le
+// CHECK-ERROR-NEXT: ^
+
+ cset wsp, lt
+ csetm sp, ge
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cset wsp, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: csetm sp, ge
+// CHECK-ERROR-NEXT: ^
+
+ cinc w3, wsp, ne
+ cinc sp, x9, eq
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cinc w3, wsp, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cinc sp, x9, eq
+// CHECK-ERROR-NEXT: ^
+
+ cinv w3, wsp, ne
+ cinv sp, x9, eq
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cinv w3, wsp, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cinv sp, x9, eq
+// CHECK-ERROR-NEXT: ^
+
+ cneg w3, wsp, ne
+ cneg sp, x9, eq
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cneg w3, wsp, ne
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: cneg sp, x9, eq
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Data Processing (1 source)
+//------------------------------------------------------------------------------
+ rbit x23, w2
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR-NEXT: rbit x23, w2
+
+ cls sp, x2
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR-NEXT: cls sp, x2
+
+ clz wsp, w3
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR-NEXT: clz wsp, w3
+
+//------------------------------------------------------------------------------
+// Data Processing (2 sources)
+//------------------------------------------------------------------------------
+ udiv x23, w2, x18
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR-NEXT: udiv x23, w2, x18
+
+ lsl sp, x2, x4
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR-NEXT: lsl sp, x2, x4
+
+ asr wsp, w3, w9
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR-NEXT: asr wsp, w3, w9
+
+//------------------------------------------------------------------------------
+// Data Processing (3 sources)
+//------------------------------------------------------------------------------
+
+ madd sp, x3, x9, x10
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR-NEXT: madd sp, x3, x9, x10
+
+//------------------------------------------------------------------------------
+// Exception generation
+//------------------------------------------------------------------------------
+ svc #-1
+ hlt #65536
+ dcps4 #43
+ dcps4
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: svc #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: hlt #65536
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid instruction
+// CHECK-ERROR-NEXT: dcps4 #43
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid instruction
+// CHECK-ERROR-NEXT: dcps4
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Extract (immediate)
+//------------------------------------------------------------------------------
+
+ extr w2, w20, w30, #-1
+ extr w9, w19, w20, #32
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: extr w2, w20, w30, #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: extr w9, w19, w20, #32
+// CHECK-ERROR-NEXT: ^
+
+ extr x10, x15, x20, #-1
+ extr x20, x25, x30, #64
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: extr x10, x15, x20, #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: extr x20, x25, x30, #64
+// CHECK-ERROR-NEXT: ^
+
+ ror w9, w10, #32
+ ror x10, x11, #64
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ror w9, w10, #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ror x10, x11, #64
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Floating-point compare
+//------------------------------------------------------------------------------
+
+ fcmp s3, d2
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcmp s3, d2
+// CHECK-ERROR-NEXT: ^
+
+ fcmp s9, #-0.0
+ fcmp d3, #-0.0
+ fcmp s1, #1.0
+ fcmpe s30, #-0.0
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcmp s9, #-0.0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcmp d3, #-0.0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcmp s1, #1.0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcmpe s30, #-0.0
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Floating-point conditional compare
+//------------------------------------------------------------------------------
+
+ fccmp s19, s5, #-1, lt
+ fccmp s20, s7, #16, hs
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fccmp s19, s5, #-1, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fccmp s20, s7, #16, hs
+// CHECK-ERROR-NEXT: ^
+
+ fccmp d19, d5, #-1, lt
+ fccmp d20, d7, #16, hs
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fccmp d19, d5, #-1, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fccmp d20, d7, #16, hs
+// CHECK-ERROR-NEXT: ^
+
+ fccmpe s19, s5, #-1, lt
+ fccmpe s20, s7, #16, hs
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fccmpe s19, s5, #-1, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fccmpe s20, s7, #16, hs
+// CHECK-ERROR-NEXT: ^
+
+ fccmpe d19, d5, #-1, lt
+ fccmpe d20, d7, #16, hs
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fccmpe d19, d5, #-1, lt
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fccmpe d20, d7, #16, hs
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Floating-point conditional compare
+//------------------------------------------------------------------------------
+
+ fcsel q3, q20, q9, pl
+ fcsel h9, h10, h11, mi
+ fcsel b9, b10, b11, mi
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcsel q3, q20, q9, pl
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcsel h9, h10, h11, mi
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcsel b9, b10, b11, mi
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Floating-point data-processing (1 source)
+//------------------------------------------------------------------------------
+
+ fmov d0, s3
+ fcvt d0, d1
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fmov d0, s3
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvt d0, d1
+// CHECK-ERROR-NEXT: ^
+
+
+//------------------------------------------------------------------------------
+// Floating-point data-processing (2 sources)
+//------------------------------------------------------------------------------
+
+ fadd s0, d3, d7
+ fmaxnm d3, s19, d12
+ fnmul d1, d9, s18
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fadd s0, d3, d7
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fmaxnm d3, s19, d12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fnmul d1, d9, s18
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Floating-point data-processing (3 sources)
+//------------------------------------------------------------------------------
+
+ fmadd b3, b4, b5, b6
+ fmsub h1, h2, h3, h4
+ fnmadd q3, q5, q6, q7
+ fnmsub s2, s4, d5, h9
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fmadd b3, b4, b5, b6
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fmsub h1, h2, h3, h4
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fnmadd q3, q5, q6, q7
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fnmsub s2, s4, d5, h9
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Floating-point conditional compare
+//------------------------------------------------------------------------------
+
+ fcvtzs w13, s31, #0
+ fcvtzs w19, s20, #33
+ fcvtzs wsp, s19, #14
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtzs w13, s31, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtzs w19, s20, #33
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtzs wsp, s19, #14
+// CHECK-ERROR-NEXT: ^
+
+ fcvtzs x13, s31, #0
+ fcvtzs x19, s20, #65
+ fcvtzs sp, s19, #14
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtzs x13, s31, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtzs x19, s20, #65
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtzs sp, s19, #14
+// CHECK-ERROR-NEXT: ^
+
+ fcvtzu w13, s31, #0
+ fcvtzu w19, s20, #33
+ fcvtzu wsp, s19, #14
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtzu w13, s31, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtzu w19, s20, #33
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtzu wsp, s19, #14
+// CHECK-ERROR-NEXT: ^
+
+ fcvtzu x13, s31, #0
+ fcvtzu x19, s20, #65
+ fcvtzu sp, s19, #14
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtzu x13, s31, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtzu x19, s20, #65
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtzu sp, s19, #14
+// CHECK-ERROR-NEXT: ^
+
+ scvtf w13, s31, #0
+ scvtf w19, s20, #33
+ scvtf wsp, s19, #14
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: scvtf w13, s31, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: scvtf w19, s20, #33
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: scvtf wsp, s19, #14
+// CHECK-ERROR-NEXT: ^
+
+ scvtf x13, s31, #0
+ scvtf x19, s20, #65
+ scvtf sp, s19, #14
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: scvtf x13, s31, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: scvtf x19, s20, #65
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: scvtf sp, s19, #14
+// CHECK-ERROR-NEXT: ^
+
+ ucvtf w13, s31, #0
+ ucvtf w19, s20, #33
+ ucvtf wsp, s19, #14
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ucvtf w13, s31, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ucvtf w19, s20, #33
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ucvtf wsp, s19, #14
+// CHECK-ERROR-NEXT: ^
+
+ ucvtf x13, s31, #0
+ ucvtf x19, s20, #65
+ ucvtf sp, s19, #14
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ucvtf x13, s31, #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ucvtf x19, s20, #65
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ucvtf sp, s19, #14
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Floating-point immediate
+//------------------------------------------------------------------------------
+ ;; Exponent too large
+ fmov d3, #0.0625
+ fmov s2, #32.0
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fmov d3, #0.0625
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fmov s2, #32.0
+// CHECK-ERROR-NEXT: ^
+
+ ;; Fraction too precise
+ fmov s9, #1.03125
+ fmov s28, #1.96875
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fmov s9, #1.03125
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fmov s28, #1.96875
+// CHECK-ERROR-NEXT: ^
+
+ ;; No particular reason, but a striking omission
+ fmov d0, #0.0
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fmov d0, #0.0
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Floating-point <-> integer conversion
+//------------------------------------------------------------------------------
+
+ fmov x3, v0.d[0]
+ fmov v29.1d[1], x2
+ fmov x7, v0.d[2]
+ fcvtns sp, s5
+ scvtf s6, wsp
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fmov x3, v0.d[0]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: lane number incompatible with layout
+// CHECK-ERROR-NEXT: fmov v29.1d[1], x2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: lane number incompatible with layout
+// CHECK-ERROR-NEXT: fmov x7, v0.d[2]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: fcvtns sp, s5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: scvtf s6, wsp
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Load-register (literal)
+//------------------------------------------------------------------------------
+
+ ldr sp, some_label
+ ldrsw w3, somewhere
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr sp, some_label
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsw w3, somewhere
+// CHECK-ERROR-NEXT: ^
+
+ ldrsw x2, #1048576
+ ldr q0, #-1048580
+ ldr x0, #2
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsw x2, #1048576
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr q0, #-1048580
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr x0, #2
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Load/store exclusive
+//------------------------------------------------------------------------------
+
+ stxrb w2, x3, [x4, #20]
+ stlxrh w10, w11, [w2]
+// CHECK-ERROR: error: expected '#0'
+// CHECK-ERROR-NEXT: stxrb w2, x3, [x4, #20]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stlxrh w10, w11, [w2]
+// CHECK-ERROR-NEXT: ^
+
+ stlxr x20, w21, [sp]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stlxr x20, w21, [sp]
+// CHECK-ERROR-NEXT: ^
+
+ ldxr sp, [sp]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldxr sp, [sp]
+// CHECK-ERROR-NEXT: ^
+
+ stxp x1, x2, x3, [x4]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stxp x1, x2, x3, [x4]
+// CHECK-ERROR-NEXT: ^
+
+ stlxp w5, x1, w4, [x5]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stlxp w5, x1, w4, [x5]
+// CHECK-ERROR-NEXT: ^
+
+ stlxp w17, w6, x7, [x22]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stlxp w17, w6, x7, [x22]
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Load/store (unscaled immediate)
+//------------------------------------------------------------------------------
+
+ ldurb w2, [sp, #256]
+ sturh w17, [x1, #256]
+ ldursw x20, [x1, #256]
+ ldur x12, [sp, #256]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldurb w2, [sp, #256]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sturh w17, [x1, #256]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldursw x20, [x1, #256]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldur x12, [sp, #256]
+// CHECK-ERROR-NEXT: ^
+
+ stur h2, [x2, #-257]
+ stur b2, [x2, #-257]
+ ldursb x9, [sp, #-257]
+ ldur w2, [x30, #-257]
+ stur q9, [x20, #-257]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stur h2, [x2, #-257]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stur b2, [x2, #-257]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldursb x9, [sp, #-257]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldur w2, [x30, #-257]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stur q9, [x20, #-257]
+// CHECK-ERROR-NEXT: ^
+
+ prfum pstl3strm, [xzr]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: prfum pstl3strm, [xzr]
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Load-store register (immediate post-indexed)
+//------------------------------------------------------------------------------
+ ldr x3, [x4, #25], #0
+ ldr x4, [x9, #0], #4
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr x3, [x4, #25], #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr x4, [x9, #0], #4
+// CHECK-ERROR-NEXT: ^
+
+ strb w1, [x19], #256
+ strb w9, [sp], #-257
+ strh w1, [x19], #256
+ strh w9, [sp], #-257
+ str w1, [x19], #256
+ str w9, [sp], #-257
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: strb w1, [x19], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: strb w9, [sp], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: strh w1, [x19], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: strh w9, [sp], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str w1, [x19], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str w9, [sp], #-257
+// CHECK-ERROR-NEXT: ^
+
+ ldrb w1, [x19], #256
+ ldrb w9, [sp], #-257
+ ldrh w1, [x19], #256
+ ldrh w9, [sp], #-257
+ ldr w1, [x19], #256
+ ldr w9, [sp], #-257
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrb w1, [x19], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrb w9, [sp], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrh w1, [x19], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrh w9, [sp], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr w1, [x19], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr w9, [sp], #-257
+// CHECK-ERROR-NEXT: ^
+
+ ldrsb x2, [x3], #256
+ ldrsb x22, [x13], #-257
+ ldrsh x2, [x3], #256
+ ldrsh x22, [x13], #-257
+ ldrsw x2, [x3], #256
+ ldrsw x22, [x13], #-257
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsb x2, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsb x22, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsh x2, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsh x22, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsw x2, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsw x22, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+
+ ldrsb w2, [x3], #256
+ ldrsb w22, [x13], #-257
+ ldrsh w2, [x3], #256
+ ldrsh w22, [x13], #-257
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsb w2, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsb w22, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsh w2, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsh w22, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+
+ str b3, [x3], #256
+ str b3, [x13], #-257
+ str h3, [x3], #256
+ str h3, [x13], #-257
+ str s3, [x3], #256
+ str s3, [x13], #-257
+ str d3, [x3], #256
+ str d3, [x13], #-257
+ str q3, [x3], #256
+ str q3, [x13], #-257
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str b3, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str b3, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str h3, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str h3, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str s3, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str s3, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str d3, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str d3, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str q3, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str q3, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+
+ ldr b3, [x3], #256
+ ldr b3, [x13], #-257
+ ldr h3, [x3], #256
+ ldr h3, [x13], #-257
+ ldr s3, [x3], #256
+ ldr s3, [x13], #-257
+ ldr d3, [x3], #256
+ ldr d3, [x13], #-257
+ ldr q3, [x3], #256
+ ldr q3, [x13], #-257
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr b3, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr b3, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr h3, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr h3, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr s3, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr s3, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr d3, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr d3, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr q3, [x3], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr q3, [x13], #-257
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Load-store register (immediate pre-indexed)
+//------------------------------------------------------------------------------
+
+ ldr x3, [x4]!
+// CHECK-ERROR: error:
+// CHECK-ERROR-NEXT: ldr x3, [x4]!
+// CHECK-ERROR-NEXT: ^
+
+ strb w1, [x19, #256]!
+ strb w9, [sp, #-257]!
+ strh w1, [x19, #256]!
+ strh w9, [sp, #-257]!
+ str w1, [x19, #256]!
+ str w9, [sp, #-257]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: strb w1, [x19, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: strb w9, [sp, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: strh w1, [x19, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: strh w9, [sp, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str w1, [x19, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str w9, [sp, #-257]!
+// CHECK-ERROR-NEXT: ^
+
+ ldrb w1, [x19, #256]!
+ ldrb w9, [sp, #-257]!
+ ldrh w1, [x19, #256]!
+ ldrh w9, [sp, #-257]!
+ ldr w1, [x19, #256]!
+ ldr w9, [sp, #-257]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrb w1, [x19, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrb w9, [sp, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrh w1, [x19, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrh w9, [sp, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr w1, [x19, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr w9, [sp, #-257]!
+// CHECK-ERROR-NEXT: ^
+
+ ldrsb x2, [x3, #256]!
+ ldrsb x22, [x13, #-257]!
+ ldrsh x2, [x3, #256]!
+ ldrsh x22, [x13, #-257]!
+ ldrsw x2, [x3, #256]!
+ ldrsw x22, [x13, #-257]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsb x2, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsb x22, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsh x2, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsh x22, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsw x2, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsw x22, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+
+ ldrsb w2, [x3, #256]!
+ ldrsb w22, [x13, #-257]!
+ ldrsh w2, [x3, #256]!
+ ldrsh w22, [x13, #-257]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsb w2, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsb w22, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsh w2, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsh w22, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+
+ str b3, [x3, #256]!
+ str b3, [x13, #-257]!
+ str h3, [x3, #256]!
+ str h3, [x13, #-257]!
+ str s3, [x3, #256]!
+ str s3, [x13, #-257]!
+ str d3, [x3, #256]!
+ str d3, [x13, #-257]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str b3, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str b3, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str h3, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str h3, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str s3, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str s3, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str d3, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str d3, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+
+ ldr b3, [x3, #256]!
+ ldr b3, [x13, #-257]!
+ ldr h3, [x3, #256]!
+ ldr h3, [x13, #-257]!
+ ldr s3, [x3, #256]!
+ ldr s3, [x13, #-257]!
+ ldr d3, [x3, #256]!
+ ldr d3, [x13, #-257]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr b3, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr b3, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr h3, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr h3, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr s3, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr s3, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr d3, [x3, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr d3, [x13, #-257]!
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Load/store (unprivileged)
+//------------------------------------------------------------------------------
+
+ ldtrb w2, [sp, #256]
+ sttrh w17, [x1, #256]
+ ldtrsw x20, [x1, #256]
+ ldtr x12, [sp, #256]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldtrb w2, [sp, #256]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sttrh w17, [x1, #256]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldtrsw x20, [x1, #256]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldtr x12, [sp, #256]
+// CHECK-ERROR-NEXT: ^
+
+ sttr h2, [x2, #-257]
+ sttr b2, [x2, #-257]
+ ldtrsb x9, [sp, #-257]
+ ldtr w2, [x30, #-257]
+ sttr q9, [x20, #-257]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sttr h2, [x2, #-257]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sttr b2, [x2, #-257]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldtrsb x9, [sp, #-257]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldtr w2, [x30, #-257]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sttr q9, [x20, #-257]
+// CHECK-ERROR-NEXT: ^
+
+
+//------------------------------------------------------------------------------
+// Load/store (unsigned immediate)
+//------------------------------------------------------------------------------
+
+//// Out of range immediates
+ ldr q0, [x11, #65536]
+ ldr x0, [sp, #32768]
+ ldr w0, [x4, #16384]
+ ldrh w2, [x21, #8192]
+ ldrb w3, [x12, #4096]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr q0, [x11, #65536]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr x0, [sp, #32768]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr w0, [x4, #16384]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrh w2, [x21, #8192]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrb w3, [x12, #4096]
+// CHECK-ERROR-NEXT: ^
+
+//// Misaligned addresses
+ ldr w0, [x0, #2]
+ ldrsh w2, [x0, #123]
+ str q0, [x0, #8]
+// CHECK-ERROR: error: too few operands for instruction
+// CHECK-ERROR-NEXT: ldr w0, [x0, #2]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: too few operands for instruction
+// CHECK-ERROR-NEXT: ldrsh w2, [x0, #123]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: too few operands for instruction
+// CHECK-ERROR-NEXT: str q0, [x0, #8]
+// CHECK-ERROR-NEXT: ^
+
+//// 32-bit addresses
+ ldr w0, [w20]
+ ldrsh x3, [wsp]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr w0, [w20]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsh x3, [wsp]
+// CHECK-ERROR-NEXT: ^
+
+//// Store things
+ strb w0, [wsp]
+ strh w31, [x23, #1]
+ str x5, [x22, #12]
+ str w7, [x12, #16384]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: strb w0, [wsp]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: strh w31, [x23, #1]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: too few operands for instruction
+// CHECK-ERROR-NEXT: str x5, [x22, #12]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str w7, [x12, #16384]
+// CHECK-ERROR-NEXT: ^
+
+//// Bad PRFMs
+ prfm #-1, [sp]
+ prfm #32, [sp, #8]
+ prfm pldl1strm, [w3, #8]
+ prfm wibble, [sp]
+// CHECK-ERROR: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: prfm #-1, [sp]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: prfm #32, [sp, #8]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: prfm pldl1strm, [w3, #8]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: operand specifier not recognised
+// CHECK-ERROR-NEXT: prfm wibble, [sp]
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Load/store register (register offset)
+//------------------------------------------------------------------------------
+
+ ldr w3, [xzr, x3]
+ ldr w4, [x0, x4, lsl]
+ ldr w9, [x5, x5, uxtw]
+ ldr w10, [x6, x9, sxtw #2]
+ ldr w11, [x7, w2, lsl #2]
+ ldr w12, [x8, w1, sxtx]
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr w3, [xzr, x3]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected #imm after shift specifier
+// CHECK-ERROR-NEXT: ldr w4, [x0, x4, lsl]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr w9, [x5, x5, uxtw]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr w10, [x6, x9, sxtw #2]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr w11, [x7, w2, lsl #2]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr w12, [x8, w1, sxtx]
+// CHECK-ERROR-NEXT: ^
+
+ ldrsb w9, [x4, x2, lsl #-1]
+ strb w9, [x4, x2, lsl #1]
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: ldrsb w9, [x4, x2, lsl #-1]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: strb w9, [x4, x2, lsl #1]
+// CHECK-ERROR-NEXT: ^
+
+ ldrsh w9, [x4, x2, lsl #-1]
+ ldr h13, [x4, w2, uxtw #2]
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: ldrsh w9, [x4, x2, lsl #-1]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr h13, [x4, w2, uxtw #2]
+// CHECK-ERROR-NEXT: ^
+
+ str w9, [x5, w9, sxtw #-1]
+ str s3, [sp, w9, uxtw #1]
+ ldrsw x9, [x15, x4, sxtx #3]
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: str w9, [x5, w9, sxtw #-1]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str s3, [sp, w9, uxtw #1]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldrsw x9, [x15, x4, sxtx #3]
+// CHECK-ERROR-NEXT: ^
+
+ str xzr, [x5, x9, sxtx #-1]
+ prfm pldl3keep, [sp, x20, lsl #2]
+ ldr d3, [x20, wzr, uxtw #4]
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: str xzr, [x5, x9, sxtx #-1]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: prfm pldl3keep, [sp, x20, lsl #2]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr d3, [x20, wzr, uxtw #4]
+// CHECK-ERROR-NEXT: ^
+
+ ldr q5, [sp, x2, lsl #-1]
+ ldr q10, [x20, w4, uxtw #2]
+ str q21, [x20, w4, uxtw #5]
+// CHECK-ERROR-NEXT: error: expected integer shift amount
+// CHECK-ERROR-NEXT: ldr q5, [sp, x2, lsl #-1]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldr q10, [x20, w4, uxtw #2]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: str q21, [x20, w4, uxtw #5]
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Load/store register pair (offset)
+//------------------------------------------------------------------------------
+ ldp w3, w2, [x4, #1]
+ stp w1, w2, [x3, #253]
+ stp w9, w10, [x5, #256]
+ ldp w11, w12, [x9, #-260]
+ stp wsp, w9, [sp]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp w3, w2, [x4, #1]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp w1, w2, [x3, #253]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp w9, w10, [x5, #256]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp w11, w12, [x9, #-260]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp wsp, w9, [sp]
+// CHECK-ERROR-NEXT: ^
+
+ ldpsw x9, x2, [sp, #2]
+ ldpsw x1, x2, [x10, #256]
+ ldpsw x3, x4, [x11, #-260]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldpsw x9, x2, [sp, #2]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldpsw x1, x2, [x10, #256]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldpsw x3, x4, [x11, #-260]
+// CHECK-ERROR-NEXT: ^
+
+ ldp x2, x5, [sp, #4]
+ ldp x5, x6, [x9, #512]
+ stp x7, x8, [x10, #-520]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp x2, x5, [sp, #4]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp x5, x6, [x9, #512]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp x7, x8, [x10, #-520]
+// CHECK-ERROR-NEXT: ^
+
+ ldp sp, x3, [x10]
+ stp x3, sp, [x9]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp sp, x3, [x10]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp x3, sp, [x9]
+// CHECK-ERROR-NEXT: ^
+
+ stp s3, s5, [sp, #-2]
+ ldp s6, s26, [x4, #-260]
+ stp s13, s19, [x5, #256]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp s3, s5, [sp, #-2]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp s6, s26, [x4, #-260]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp s13, s19, [x5, #256]
+// CHECK-ERROR-NEXT: ^
+
+ ldp d3, d4, [xzr]
+ ldp d5, d6, [x0, #512]
+ stp d7, d8, [x0, #-520]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp d3, d4, [xzr]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp d5, d6, [x0, #512]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp d7, d8, [x0, #-520]
+// CHECK-ERROR-NEXT: ^
+
+ ldp d3, q2, [sp]
+ ldp q3, q5, [sp, #8]
+ stp q20, q25, [x5, #1024]
+ ldp q30, q15, [x23, #-1040]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp d3, q2, [sp]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp q3, q5, [sp, #8]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp q20, q25, [x5, #1024]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp q30, q15, [x23, #-1040]
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Load/store register pair (post-indexed)
+//------------------------------------------------------------------------------
+
+ ldp w3, w2, [x4], #1
+ stp w1, w2, [x3], #253
+ stp w9, w10, [x5], #256
+ ldp w11, w12, [x9], #-260
+ stp wsp, w9, [sp], #0
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp w3, w2, [x4], #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp w1, w2, [x3], #253
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp w9, w10, [x5], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp w11, w12, [x9], #-260
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp wsp, w9, [sp], #0
+// CHECK-ERROR-NEXT: ^
+
+ ldpsw x9, x2, [sp], #2
+ ldpsw x1, x2, [x10], #256
+ ldpsw x3, x4, [x11], #-260
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldpsw x9, x2, [sp], #2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldpsw x1, x2, [x10], #256
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldpsw x3, x4, [x11], #-260
+// CHECK-ERROR-NEXT: ^
+
+ ldp x2, x5, [sp], #4
+ ldp x5, x6, [x9], #512
+ stp x7, x8, [x10], #-520
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp x2, x5, [sp], #4
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp x5, x6, [x9], #512
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp x7, x8, [x10], #-520
+// CHECK-ERROR-NEXT: ^
+
+ ldp sp, x3, [x10], #0
+ stp x3, sp, [x9], #0
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp sp, x3, [x10], #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp x3, sp, [x9], #0
+// CHECK-ERROR-NEXT: ^
+
+ stp s3, s5, [sp], #-2
+ ldp s6, s26, [x4], #-260
+ stp s13, s19, [x5], #256
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp s3, s5, [sp], #-2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp s6, s26, [x4], #-260
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp s13, s19, [x5], #256
+// CHECK-ERROR-NEXT: ^
+
+ ldp d3, d4, [xzr], #0
+ ldp d5, d6, [x0], #512
+ stp d7, d8, [x0], #-520
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp d3, d4, [xzr], #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp d5, d6, [x0], #512
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp d7, d8, [x0], #-520
+// CHECK-ERROR-NEXT: ^
+
+ ldp d3, q2, [sp], #0
+ ldp q3, q5, [sp], #8
+ stp q20, q25, [x5], #1024
+ ldp q30, q15, [x23], #-1040
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp d3, q2, [sp], #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp q3, q5, [sp], #8
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp q20, q25, [x5], #1024
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp q30, q15, [x23], #-1040
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Load/store register pair (pre-indexed)
+//------------------------------------------------------------------------------
+
+ ldp w3, w2, [x4, #1]!
+ stp w1, w2, [x3, #253]!
+ stp w9, w10, [x5, #256]!
+ ldp w11, w12, [x9, #-260]!
+ stp wsp, w9, [sp, #0]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp w3, w2, [x4, #1]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp w1, w2, [x3, #253]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp w9, w10, [x5, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp w11, w12, [x9, #-260]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp wsp, w9, [sp, #0]!
+// CHECK-ERROR-NEXT: ^
+
+ ldpsw x9, x2, [sp, #2]!
+ ldpsw x1, x2, [x10, #256]!
+ ldpsw x3, x4, [x11, #-260]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldpsw x9, x2, [sp, #2]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldpsw x1, x2, [x10, #256]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldpsw x3, x4, [x11, #-260]!
+// CHECK-ERROR-NEXT: ^
+
+ ldp x2, x5, [sp, #4]!
+ ldp x5, x6, [x9, #512]!
+ stp x7, x8, [x10, #-520]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp x2, x5, [sp, #4]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp x5, x6, [x9, #512]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp x7, x8, [x10, #-520]!
+// CHECK-ERROR-NEXT: ^
+
+ ldp sp, x3, [x10, #0]!
+ stp x3, sp, [x9, #0]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp sp, x3, [x10, #0]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp x3, sp, [x9, #0]!
+// CHECK-ERROR-NEXT: ^
+
+ stp s3, s5, [sp, #-2]!
+ ldp s6, s26, [x4, #-260]!
+ stp s13, s19, [x5, #256]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp s3, s5, [sp, #-2]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp s6, s26, [x4, #-260]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp s13, s19, [x5, #256]!
+// CHECK-ERROR-NEXT: ^
+
+ ldp d3, d4, [xzr, #0]!
+ ldp d5, d6, [x0, #512]!
+ stp d7, d8, [x0, #-520]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp d3, d4, [xzr, #0]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp d5, d6, [x0, #512]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp d7, d8, [x0, #-520]!
+// CHECK-ERROR-NEXT: ^
+
+ ldp d3, q2, [sp, #0]!
+ ldp q3, q5, [sp, #8]!
+ stp q20, q25, [x5, #1024]!
+ ldp q30, q15, [x23, #-1040]!
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp d3, q2, [sp, #0]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp q3, q5, [sp, #8]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stp q20, q25, [x5, #1024]!
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldp q30, q15, [x23, #-1040]!
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Load/store register pair (offset)
+//------------------------------------------------------------------------------
+ ldnp w3, w2, [x4, #1]
+ stnp w1, w2, [x3, #253]
+ stnp w9, w10, [x5, #256]
+ ldnp w11, w12, [x9, #-260]
+ stnp wsp, w9, [sp]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldnp w3, w2, [x4, #1]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stnp w1, w2, [x3, #253]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stnp w9, w10, [x5, #256]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldnp w11, w12, [x9, #-260]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stnp wsp, w9, [sp]
+// CHECK-ERROR-NEXT: ^
+
+ ldnp x2, x5, [sp, #4]
+ ldnp x5, x6, [x9, #512]
+ stnp x7, x8, [x10, #-520]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldnp x2, x5, [sp, #4]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldnp x5, x6, [x9, #512]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stnp x7, x8, [x10, #-520]
+// CHECK-ERROR-NEXT: ^
+
+ ldnp sp, x3, [x10]
+ stnp x3, sp, [x9]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldnp sp, x3, [x10]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stnp x3, sp, [x9]
+// CHECK-ERROR-NEXT: ^
+
+ stnp s3, s5, [sp, #-2]
+ ldnp s6, s26, [x4, #-260]
+ stnp s13, s19, [x5, #256]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stnp s3, s5, [sp, #-2]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldnp s6, s26, [x4, #-260]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stnp s13, s19, [x5, #256]
+// CHECK-ERROR-NEXT: ^
+
+ ldnp d3, d4, [xzr]
+ ldnp d5, d6, [x0, #512]
+ stnp d7, d8, [x0, #-520]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldnp d3, d4, [xzr]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldnp d5, d6, [x0, #512]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stnp d7, d8, [x0, #-520]
+// CHECK-ERROR-NEXT: ^
+
+ ldnp d3, q2, [sp]
+ ldnp q3, q5, [sp, #8]
+ stnp q20, q25, [x5, #1024]
+ ldnp q30, q15, [x23, #-1040]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldnp d3, q2, [sp]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldnp q3, q5, [sp, #8]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: stnp q20, q25, [x5, #1024]
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ldnp q30, q15, [x23, #-1040]
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Logical (shifted register)
+//------------------------------------------------------------------------------
+ orr w0, w1, #0xffffffff
+ and x3, x5, #0xffffffffffffffff
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: orr w0, w1, #0xffffffff
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: and x3, x5, #0xffffffffffffffff
+// CHECK-ERROR-NEXT: ^
+
+ ands w3, w9, #0x0
+ eor x2, x0, #0x0
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ands w3, w9, #0x0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: eor x2, x0, #0x0
+// CHECK-ERROR-NEXT: ^
+
+ eor w3, w5, #0x83
+ eor x9, x20, #0x1234
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: eor w3, w5, #0x83
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: eor x9, x20, #0x1234
+// CHECK-ERROR-NEXT: ^
+
+ and wzr, w4, 0xffff0000
+ eor xzr, x9, #0xffff0000ffff0000
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: and wzr, w4, 0xffff0000
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: eor xzr, x9, #0xffff0000ffff0000
+// CHECK-ERROR-NEXT: ^
+
+ orr w3, wsp, #0xf0f0f0f0
+ ands x3, sp, #0xaaaaaaaaaaaaaaaa
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: orr w3, wsp, #0xf0f0f0f0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ands x3, sp, #0xaaaaaaaaaaaaaaaa
+// CHECK-ERROR-NEXT: ^
+
+ tst sp, #0xe0e0e0e0e0e0e0e0
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: tst sp, #0xe0e0e0e0e0e0e0e0
+// CHECK-ERROR-NEXT: ^
+
+ // movi has been removed from the specification. Make sure it's really gone.
+ movi wzr, #0x44444444
+ movi w3, #0xffff
+ movi x9, #0x0000ffff00000000
+// CHECK-ERROR: error: invalid instruction
+// CHECK-ERROR-NEXT: movi wzr, #0x44444444
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR: error: invalid instruction
+// CHECK-ERROR-NEXT: movi w3, #0xffff
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR: error: invalid instruction
+// CHECK-ERROR-NEXT: movi x9, #0x0000ffff00000000
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Logical (shifted register)
+//------------------------------------------------------------------------------
+
+ //// Out of range shifts
+ and w2, w24, w6, lsl #-1
+ and w4, w6, w12, lsl #32
+ and x4, x6, x12, lsl #64
+ and x2, x5, x11, asr
+// CHECK-ERROR: error: expected integer shift amount
+// CHECK-ERROR-NEXT: and w2, w24, w6, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: and w4, w6, w12, lsl #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: and x4, x6, x12, lsl #64
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected #imm after shift specifier
+// CHECK-ERROR-NEXT: and x2, x5, x11, asr
+// CHECK-ERROR-NEXT: ^
+
+ //// sp not allowed
+ orn wsp, w3, w5
+ bics x20, sp, x9, lsr #0
+ orn x2, x6, sp, lsl #3
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: orn wsp, w3, w5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: bics x20, sp, x9, lsr #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: orn x2, x6, sp, lsl #3
+// CHECK-ERROR-NEXT: ^
+
+ //// Mismatched registers
+ and x3, w2, w1
+ ands w1, x12, w2
+ and x4, x5, w6, lsl #12
+ orr w2, w5, x7, asr #0
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: and x3, w2, w1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: ands w1, x12, w2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: and x4, x5, w6, lsl #12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: orr w2, w5, x7, asr #0
+// CHECK-ERROR-NEXT: ^
+
+ //// Shifts should not be allowed on mov
+ mov w3, w7, lsl #13
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mov w3, w7, lsl #13
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Move wide (immediate)
+//------------------------------------------------------------------------------
+
+ movz w3, #65536, lsl #0
+ movz w4, #65536
+ movn w1, #2, lsl #1
+ movk w3, #0, lsl #-1
+ movn w2, #-1, lsl #0
+ movz x3, #-1
+ movk w3, #1, lsl #32
+ movn x2, #12, lsl #64
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movz w3, #65536, lsl #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movz w4, #65536
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movn w1, #2, lsl #1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: only 'lsl #+N' valid after immediate
+// CHECK-ERROR-NEXT: movk w3, #0, lsl #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movn w2, #-1, lsl #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movz x3, #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movk w3, #1, lsl #32
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movn x2, #12, lsl #64
+// CHECK-ERROR-NEXT: ^
+
+ movz x12, #:abs_g0:sym, lsl #16
+ movz x12, #:abs_g0:sym, lsl #0
+ movn x2, #:abs_g0:sym
+ movk w3, #:abs_g0:sym
+ movz x3, #:abs_g0_nc:sym
+ movn x4, #:abs_g0_nc:sym
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movz x12, #:abs_g0:sym, lsl #16
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movz x12, #:abs_g0:sym, lsl #0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movn x2, #:abs_g0:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movk w3, #:abs_g0:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movz x3, #:abs_g0_nc:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movn x4, #:abs_g0_nc:sym
+// CHECK-ERROR-NEXT: ^
+
+ movn x2, #:abs_g1:sym
+ movk w3, #:abs_g1:sym
+ movz x3, #:abs_g1_nc:sym
+ movn x4, #:abs_g1_nc:sym
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movn x2, #:abs_g1:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movk w3, #:abs_g1:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movz x3, #:abs_g1_nc:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movn x4, #:abs_g1_nc:sym
+// CHECK-ERROR-NEXT: ^
+
+ movz w12, #:abs_g2:sym
+ movn x12, #:abs_g2:sym
+ movk x13, #:abs_g2:sym
+ movk w3, #:abs_g2_nc:sym
+ movz x13, #:abs_g2_nc:sym
+ movn x24, #:abs_g2_nc:sym
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movz w12, #:abs_g2:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movn x12, #:abs_g2:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movk x13, #:abs_g2:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movk w3, #:abs_g2_nc:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movz x13, #:abs_g2_nc:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movn x24, #:abs_g2_nc:sym
+// CHECK-ERROR-NEXT: ^
+
+ movn x19, #:abs_g3:sym
+ movz w20, #:abs_g3:sym
+ movk w21, #:abs_g3:sym
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movn x19, #:abs_g3:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movz w20, #:abs_g3:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movk w21, #:abs_g3:sym
+// CHECK-ERROR-NEXT: ^
+
+ movk x19, #:abs_g0_s:sym
+ movk w23, #:abs_g0_s:sym
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movk x19, #:abs_g0_s:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movk w23, #:abs_g0_s:sym
+// CHECK-ERROR-NEXT: ^
+
+ movk x19, #:abs_g1_s:sym
+ movk w23, #:abs_g1_s:sym
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movk x19, #:abs_g1_s:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movk w23, #:abs_g1_s:sym
+// CHECK-ERROR-NEXT: ^
+
+ movz w2, #:abs_g2_s:sym
+ movn w29, #:abs_g2_s:sym
+ movk x19, #:abs_g2_s:sym
+ movk w23, #:abs_g2_s:sym
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movz w2, #:abs_g2_s:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movn w29, #:abs_g2_s:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movk x19, #:abs_g2_s:sym
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: movk w23, #:abs_g2_s:sym
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// PC-relative addressing
+//------------------------------------------------------------------------------
+
+ adr sp, loc // expects xzr
+ adrp x3, #20 // Immediate unaligned
+ adrp w2, loc // 64-bit register needed
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adr sp, loc
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adrp x3, #20
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adrp w2, loc
+// CHECK-ERROR-NEXT: ^
+
+ adr x9, #1048576
+ adr x2, #-1048577
+ adrp x9, #4294967296
+ adrp x20, #-4294971392
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adr x9, #1048576
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adr x2, #-1048577
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adrp x9, #4294967296
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: adrp x20, #-4294971392
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// System
+//------------------------------------------------------------------------------
+
+ hint #-1
+ hint #128
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: hint #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: hint #128
+// CHECK-ERROR-NEXT: ^
+
+ clrex #-1
+ clrex #16
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: clrex #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: clrex #16
+// CHECK-ERROR-NEXT: ^
+
+ dsb #-1
+ dsb #16
+ dmb #-1
+ dmb #16
+// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: dsb #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: dsb #16
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: dmb #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: dmb #16
+// CHECK-ERROR-NEXT: ^
+
+ isb #-1
+ isb #16
+// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: isb #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: isb #16
+// CHECK-ERROR-NEXT: ^
+
+ msr daifset, x4
+ msr spsel #-1
+ msr daifclr, #16
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr daifset, x4
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error:
+// CHECK-ERROR-NEXT: msr spsel #-1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr daifclr, #16
+// CHECK-ERROR-NEXT: ^
+
+ sys #8, c1, c2, #7, x9
+ sys #3, c16, c2, #3, x10
+ sys #2, c11, c16, #5
+ sys #4, c9, c8, #8, xzr
+ sysl x11, #8, c1, c2, #7
+ sysl x13, #3, c16, c2, #3
+ sysl x9, #2, c11, c16, #5
+ sysl x4, #4, c9, c8, #8
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sys #8, c1, c2, #7, x9
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: Expected cN operand where 0 <= N <= 15
+// CHECK-ERROR-NEXT: sys #3, c16, c2, #3, x10
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: Expected cN operand where 0 <= N <= 15
+// CHECK-ERROR-NEXT: sys #2, c11, c16, #5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sys #4, c9, c8, #8, xzr
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sysl x11, #8, c1, c2, #7
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: Expected cN operand where 0 <= N <= 15
+// CHECK-ERROR-NEXT: sysl x13, #3, c16, c2, #3
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: Expected cN operand where 0 <= N <= 15
+// CHECK-ERROR-NEXT: sysl x9, #2, c11, c16, #5
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: sysl x4, #4, c9, c8, #8
+// CHECK-ERROR-NEXT: ^
+
+ ic ialluis, x2
+ ic allu, x7
+ ic ivau
+// CHECK-ERROR-NEXT: error: specified IC op does not use a register
+// CHECK-ERROR-NEXT: ic ialluis, x2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: operand specifier not recognised
+// CHECK-ERROR-NEXT: ic allu, x7
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified IC op requires a register
+// CHECK-ERROR-NEXT: ic ivau
+// CHECK-ERROR-NEXT: ^
+
+ tlbi IPAS2E1IS
+ tlbi IPAS2LE1IS
+ tlbi VMALLE1IS, x12
+ tlbi ALLE2IS, x11
+ tlbi ALLE3IS, x20
+ tlbi VAE1IS
+ tlbi VAE2IS
+ tlbi VAE3IS
+ tlbi ASIDE1IS
+ tlbi VAAE1IS
+ tlbi ALLE1IS, x0
+ tlbi VALE1IS
+ tlbi VALE2IS
+ tlbi VALE3IS
+ tlbi VMALLS12E1IS, xzr
+ tlbi VAALE1IS
+ tlbi IPAS2E1
+ tlbi IPAS2LE1
+ tlbi VMALLE1, x9
+ tlbi ALLE2, x10
+ tlbi ALLE3, x11
+ tlbi VAE1
+ tlbi VAE2
+ tlbi VAE3
+ tlbi ASIDE1
+ tlbi VAAE1
+ tlbi ALLE1, x25
+ tlbi VALE1
+ tlbi VALE2
+ tlbi VALE3
+ tlbi VMALLS12E1, x15
+ tlbi VAALE1
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi IPAS2E1IS
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi IPAS2LE1IS
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: tlbi VMALLE1IS, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: tlbi ALLE2IS, x11
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: tlbi ALLE3IS, x20
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VAE1IS
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VAE2IS
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VAE3IS
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi ASIDE1IS
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VAAE1IS
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: tlbi ALLE1IS, x0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VALE1IS
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VALE2IS
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VALE3IS
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: tlbi VMALLS12E1IS, xzr
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VAALE1IS
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi IPAS2E1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi IPAS2LE1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: tlbi VMALLE1, x9
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: tlbi ALLE2, x10
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: tlbi ALLE3, x11
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VAE1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VAE2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VAE3
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi ASIDE1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VAAE1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: tlbi ALLE1, x25
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VALE1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VALE2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VALE3
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: tlbi VMALLS12E1, x15
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: tlbi VAALE1
+// CHECK-ERROR-NEXT: ^
+
+// For the MSR/MRS instructions, first make sure read-only and
+// write-only registers actually are.
+ msr MDCCSR_EL0, x12
+ msr DBGDTRRX_EL0, x12
+ msr MDRAR_EL1, x12
+ msr OSLSR_EL1, x12
+ msr DBGAUTHSTATUS_EL1, x12
+ msr MIDR_EL1, x12
+ msr CCSIDR_EL1, x12
+ msr CLIDR_EL1, x12
+ msr CTR_EL0, x12
+ msr MPIDR_EL1, x12
+ msr REVIDR_EL1, x12
+ msr AIDR_EL1, x12
+ msr DCZID_EL0, x12
+ msr ID_PFR0_EL1, x12
+ msr ID_PFR1_EL1, x12
+ msr ID_DFR0_EL1, x12
+ msr ID_AFR0_EL1, x12
+ msr ID_MMFR0_EL1, x12
+ msr ID_MMFR1_EL1, x12
+ msr ID_MMFR2_EL1, x12
+ msr ID_MMFR3_EL1, x12
+ msr ID_ISAR0_EL1, x12
+ msr ID_ISAR1_EL1, x12
+ msr ID_ISAR2_EL1, x12
+ msr ID_ISAR3_EL1, x12
+ msr ID_ISAR4_EL1, x12
+ msr ID_ISAR5_EL1, x12
+ msr MVFR0_EL1, x12
+ msr MVFR1_EL1, x12
+ msr MVFR2_EL1, x12
+ msr ID_AA64PFR0_EL1, x12
+ msr ID_AA64PFR1_EL1, x12
+ msr ID_AA64DFR0_EL1, x12
+ msr ID_AA64DFR1_EL1, x12
+ msr ID_AA64AFR0_EL1, x12
+ msr ID_AA64AFR1_EL1, x12
+ msr ID_AA64ISAR0_EL1, x12
+ msr ID_AA64ISAR1_EL1, x12
+ msr ID_AA64MMFR0_EL1, x12
+ msr ID_AA64MMFR1_EL1, x12
+ msr PMCEID0_EL0, x12
+ msr PMCEID1_EL0, x12
+ msr RVBAR_EL1, x12
+ msr RVBAR_EL2, x12
+ msr RVBAR_EL3, x12
+ msr ISR_EL1, x12
+ msr CNTPCT_EL0, x12
+ msr CNTVCT_EL0, x12
+ msr PMEVCNTR31_EL0, x12
+ msr PMEVTYPER31_EL0, x12
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr MDCCSR_EL0, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr DBGDTRRX_EL0, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr MDRAR_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr OSLSR_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr DBGAUTHSTATUS_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr MIDR_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr CCSIDR_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr CLIDR_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr CTR_EL0, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr MPIDR_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr REVIDR_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr AIDR_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr DCZID_EL0, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_PFR0_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_PFR1_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_DFR0_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_AFR0_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_MMFR0_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_MMFR1_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_MMFR2_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_MMFR3_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_ISAR0_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_ISAR1_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_ISAR2_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_ISAR3_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_ISAR4_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_ISAR5_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr MVFR0_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr MVFR1_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr MVFR2_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_AA64PFR0_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_AA64PFR1_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_AA64DFR0_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_AA64DFR1_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_AA64AFR0_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_AA64AFR1_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_AA64ISAR0_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_AA64ISAR1_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_AA64MMFR0_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ID_AA64MMFR1_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr PMCEID0_EL0, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr PMCEID1_EL0, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr RVBAR_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr RVBAR_EL2, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr RVBAR_EL3, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr ISR_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr CNTPCT_EL0, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr CNTVCT_EL0, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr PMEVCNTR31_EL0, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: msr PMEVTYPER31_EL0, x12
+// CHECK-ERROR-NEXT: ^
+
+ mrs x9, DBGDTRTX_EL0
+ mrs x9, OSLAR_EL1
+ mrs x9, PMSWINC_EL0
+ mrs x9, PMEVCNTR31_EL0
+ mrs x9, PMEVTYPER31_EL0
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mrs x9, DBGDTRTX_EL0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mrs x9, OSLAR_EL1
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mrs x9, PMSWINC_EL0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mrs x9, PMEVCNTR31_EL0
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mrs x9, PMEVTYPER31_EL0
+// CHECK-ERROR-NEXT: ^
+
+// Now check some invalid generic names
+ mrs xzr, s2_5_c11_c13_2
+ mrs x12, s3_8_c11_c13_2
+ mrs x13, s3_3_c12_c13_2
+ mrs x19, s3_2_c15_c16_2
+ mrs x30, s3_2_c15_c1_8
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mrs xzr, s2_5_c11_c13_2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mrs x12, s3_8_c11_c13_2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mrs x13, s3_3_c12_c13_2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mrs x19, s3_2_c15_c16_2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: mrs x30, s3_2_c15_c1_8
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Test and branch (immediate)
+//------------------------------------------------------------------------------
+
+ tbz w3, #-1, addr
+ tbz w3, #32, nowhere
+ tbz x9, #-1, there
+ tbz x20, #64, dont
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: tbz w3, #-1, addr
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: tbz w3, #32, nowhere
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: tbz x9, #-1, there
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: tbz x20, #64, dont
+// CHECK-ERROR-NEXT: ^
+
+ tbnz w3, #-1, addr
+ tbnz w3, #32, nowhere
+ tbnz x9, #-1, there
+ tbnz x20, #64, dont
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: tbnz w3, #-1, addr
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: tbnz w3, #32, nowhere
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: tbnz x9, #-1, there
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: tbnz x20, #64, dont
+
+//------------------------------------------------------------------------------
+// Unconditional branch (immediate)
+//------------------------------------------------------------------------------
+
+ b #134217728
+ b #-134217732
+ b #1
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: b #134217728
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: b #-134217732
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: b #1
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
+// Unconditional branch (register)
+//------------------------------------------------------------------------------
+
+ br w2
+ br sp
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: br w2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: br sp
+// CHECK-ERROR-NEXT: ^
+
+ //// These ones shouldn't allow any registers
+ eret x2
+ drps x2
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: eret x2
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: drps x2
+// CHECK-ERROR-NEXT: ^
+
diff --git a/test/MC/AArch64/basic-a64-instructions.s b/test/MC/AArch64/basic-a64-instructions.s
new file mode 100644
index 0000000000..e16b2ea724
--- /dev/null
+++ b/test/MC/AArch64/basic-a64-instructions.s
@@ -0,0 +1,4790 @@
+// RUN: llvm-mc -triple=aarch64 -show-encoding < %s | FileCheck %s
+ .globl _func
+
+// Check that the assembler can handle the documented syntax from the ARM ARM.
+// For complex constructs like shifter operands, check more thoroughly for them
+// once then spot check that following instructions accept the form generally.
+// This gives us good coverage while keeping the overall size of the test
+// more reasonable.
+
+
+_func:
+// CHECK: _func
+
+//------------------------------------------------------------------------------
+// Add/sub (extended register)
+//------------------------------------------------------------------------------
+ // Basic extends 64-bit ops
+ add x2, x4, w5, uxtb
+ add x20, sp, w19, uxth
+ add x12, x1, w20, uxtw
+ add x20, x3, x13, uxtx
+ add x17, x25, w20, sxtb
+ add x18, x13, w19, sxth
+ add sp, x2, w3, sxtw
+ add x3, x5, x9, sxtx
+// CHECK: add x2, x4, w5, uxtb // encoding: [0x82,0x00,0x25,0x8b]
+// CHECK: add x20, sp, w19, uxth // encoding: [0xf4,0x23,0x33,0x8b]
+// CHECK: add x12, x1, w20, uxtw // encoding: [0x2c,0x40,0x34,0x8b]
+// CHECK: add x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0x8b]
+// CHECK: add x17, x25, w20, sxtb // encoding: [0x31,0x83,0x34,0x8b]
+// CHECK: add x18, x13, w19, sxth // encoding: [0xb2,0xa1,0x33,0x8b]
+// CHECK: add sp, x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x8b]
+// CHECK: add x3, x5, x9, sxtx // encoding: [0xa3,0xe0,0x29,0x8b]
+
+ // Basic extends, 32-bit ops
+ add w2, w5, w7, uxtb
+ add w21, w15, w17, uxth
+ add w30, w29, wzr, uxtw
+ add w19, w17, w1, uxtx // Goodness knows what this means
+ add w2, w5, w1, sxtb
+ add w26, w17, w19, sxth
+ add w0, w2, w3, sxtw
+ add w2, w3, w5, sxtx
+// CHECK: add w2, w5, w7, uxtb // encoding: [0xa2,0x00,0x27,0x0b]
+// CHECK: add w21, w15, w17, uxth // encoding: [0xf5,0x21,0x31,0x0b]
+// CHECK: add w30, w29, wzr, uxtw // encoding: [0xbe,0x43,0x3f,0x0b]
+// CHECK: add w19, w17, w1, uxtx // encoding: [0x33,0x62,0x21,0x0b]
+// CHECK: add w2, w5, w1, sxtb // encoding: [0xa2,0x80,0x21,0x0b]
+// CHECK: add w26, w17, w19, sxth // encoding: [0x3a,0xa2,0x33,0x0b]
+// CHECK: add w0, w2, w3, sxtw // encoding: [0x40,0xc0,0x23,0x0b]
+// CHECK: add w2, w3, w5, sxtx // encoding: [0x62,0xe0,0x25,0x0b]
+
+ // Nonzero shift amounts
+ add x2, x3, w5, sxtb #0
+ add x7, x11, w13, uxth #4
+ add w17, w19, w23, uxtw #2
+ add w29, w23, w17, uxtx #1
+// CHECK: add x2, x3, w5, sxtb // encoding: [0x62,0x80,0x25,0x8b]
+// CHECK: add x7, x11, w13, uxth #4 // encoding: [0x67,0x31,0x2d,0x8b]
+// CHECK: add w17, w19, w23, uxtw #2 // encoding: [0x71,0x4a,0x37,0x0b]
+// CHECK: add w29, w23, w17, uxtx #1 // encoding: [0xfd,0x66,0x31,0x0b]
+
+ // Sub
+ sub x2, x4, w5, uxtb #2
+ sub x20, sp, w19, uxth #4
+ sub x12, x1, w20, uxtw
+ sub x20, x3, x13, uxtx #0
+ sub x17, x25, w20, sxtb
+ sub x18, x13, w19, sxth
+ sub sp, x2, w3, sxtw
+ sub x3, x5, x9, sxtx
+// CHECK: sub x2, x4, w5, uxtb #2 // encoding: [0x82,0x08,0x25,0xcb]
+// CHECK: sub x20, sp, w19, uxth #4 // encoding: [0xf4,0x33,0x33,0xcb]
+// CHECK: sub x12, x1, w20, uxtw // encoding: [0x2c,0x40,0x34,0xcb]
+// CHECK: sub x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0xcb]
+// CHECK: sub x17, x25, w20, sxtb // encoding: [0x31,0x83,0x34,0xcb]
+// CHECK: sub x18, x13, w19, sxth // encoding: [0xb2,0xa1,0x33,0xcb]
+// CHECK: sub sp, x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0xcb]
+// CHECK: sub x3, x5, x9, sxtx // encoding: [0xa3,0xe0,0x29,0xcb]
+
+ sub w2, w5, w7, uxtb
+ sub w21, w15, w17, uxth
+ sub w30, w29, wzr, uxtw
+ sub w19, w17, w1, uxtx // Goodness knows what this means
+ sub w2, w5, w1, sxtb
+ sub w26, wsp, w19, sxth
+ sub wsp, w2, w3, sxtw
+ sub w2, w3, w5, sxtx
+// CHECK: sub w2, w5, w7, uxtb // encoding: [0xa2,0x00,0x27,0x4b]
+// CHECK: sub w21, w15, w17, uxth // encoding: [0xf5,0x21,0x31,0x4b]
+// CHECK: sub w30, w29, wzr, uxtw // encoding: [0xbe,0x43,0x3f,0x4b]
+// CHECK: sub w19, w17, w1, uxtx // encoding: [0x33,0x62,0x21,0x4b]
+// CHECK: sub w2, w5, w1, sxtb // encoding: [0xa2,0x80,0x21,0x4b]
+// CHECK: sub w26, wsp, w19, sxth // encoding: [0xfa,0xa3,0x33,0x4b]
+// CHECK: sub wsp, w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x4b]
+// CHECK: sub w2, w3, w5, sxtx // encoding: [0x62,0xe0,0x25,0x4b]
+
+ // Adds
+ adds x2, x4, w5, uxtb #2
+ adds x20, sp, w19, uxth #4
+ adds x12, x1, w20, uxtw
+ adds x20, x3, x13, uxtx #0
+ adds xzr, x25, w20, sxtb #3
+ adds x18, sp, w19, sxth
+ adds xzr, x2, w3, sxtw
+ adds x3, x5, x9, sxtx #2
+// CHECK: adds x2, x4, w5, uxtb #2 // encoding: [0x82,0x08,0x25,0xab]
+// CHECK: adds x20, sp, w19, uxth #4 // encoding: [0xf4,0x33,0x33,0xab]
+// CHECK: adds x12, x1, w20, uxtw // encoding: [0x2c,0x40,0x34,0xab]
+// CHECK: adds x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0xab]
+// CHECK: adds xzr, x25, w20, sxtb #3 // encoding: [0x3f,0x8f,0x34,0xab]
+// CHECK: adds x18, sp, w19, sxth // encoding: [0xf2,0xa3,0x33,0xab]
+// CHECK: adds xzr, x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0xab]
+// CHECK: adds x3, x5, x9, sxtx #2 // encoding: [0xa3,0xe8,0x29,0xab]
+
+ adds w2, w5, w7, uxtb
+ adds w21, w15, w17, uxth
+ adds w30, w29, wzr, uxtw
+ adds w19, w17, w1, uxtx // Goodness knows what this means
+ adds w2, w5, w1, sxtb #1
+ adds w26, wsp, w19, sxth
+ adds wzr, w2, w3, sxtw
+ adds w2, w3, w5, sxtx
+// CHECK: adds w2, w5, w7, uxtb // encoding: [0xa2,0x00,0x27,0x2b]
+// CHECK: adds w21, w15, w17, uxth // encoding: [0xf5,0x21,0x31,0x2b]
+// CHECK: adds w30, w29, wzr, uxtw // encoding: [0xbe,0x43,0x3f,0x2b]
+// CHECK: adds w19, w17, w1, uxtx // encoding: [0x33,0x62,0x21,0x2b]
+// CHECK: adds w2, w5, w1, sxtb #1 // encoding: [0xa2,0x84,0x21,0x2b]
+// CHECK: adds w26, wsp, w19, sxth // encoding: [0xfa,0xa3,0x33,0x2b]
+// CHECK: adds wzr, w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x2b]
+// CHECK: adds w2, w3, w5, sxtx // encoding: [0x62,0xe0,0x25,0x2b]
+
+ // subs
+ subs x2, x4, w5, uxtb #2
+ subs x20, sp, w19, uxth #4
+ subs x12, x1, w20, uxtw
+ subs x20, x3, x13, uxtx #0
+ subs xzr, x25, w20, sxtb #3
+ subs x18, sp, w19, sxth
+ subs xzr, x2, w3, sxtw
+ subs x3, x5, x9, sxtx #2
+// CHECK: subs x2, x4, w5, uxtb #2 // encoding: [0x82,0x08,0x25,0xeb]
+// CHECK: subs x20, sp, w19, uxth #4 // encoding: [0xf4,0x33,0x33,0xeb]
+// CHECK: subs x12, x1, w20, uxtw // encoding: [0x2c,0x40,0x34,0xeb]
+// CHECK: subs x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0xeb]
+// CHECK: subs xzr, x25, w20, sxtb #3 // encoding: [0x3f,0x8f,0x34,0xeb]
+// CHECK: subs x18, sp, w19, sxth // encoding: [0xf2,0xa3,0x33,0xeb]
+// CHECK: subs xzr, x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0xeb]
+// CHECK: subs x3, x5, x9, sxtx #2 // encoding: [0xa3,0xe8,0x29,0xeb]
+
+ subs w2, w5, w7, uxtb
+ subs w21, w15, w17, uxth
+ subs w30, w29, wzr, uxtw
+ subs w19, w17, w1, uxtx // Goodness knows what this means
+ subs w2, w5, w1, sxtb #1
+ subs w26, wsp, w19, sxth
+ subs wzr, w2, w3, sxtw
+ subs w2, w3, w5, sxtx
+// CHECK: subs w2, w5, w7, uxtb // encoding: [0xa2,0x00,0x27,0x6b]
+// CHECK: subs w21, w15, w17, uxth // encoding: [0xf5,0x21,0x31,0x6b]
+// CHECK: subs w30, w29, wzr, uxtw // encoding: [0xbe,0x43,0x3f,0x6b]
+// CHECK: subs w19, w17, w1, uxtx // encoding: [0x33,0x62,0x21,0x6b]
+// CHECK: subs w2, w5, w1, sxtb #1 // encoding: [0xa2,0x84,0x21,0x6b]
+// CHECK: subs w26, wsp, w19, sxth // encoding: [0xfa,0xa3,0x33,0x6b]
+// CHECK: subs wzr, w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x6b]
+// CHECK: subs w2, w3, w5, sxtx // encoding: [0x62,0xe0,0x25,0x6b]
+
+ // cmp
+ cmp x4, w5, uxtb #2
+ cmp sp, w19, uxth #4
+ cmp x1, w20, uxtw
+ cmp x3, x13, uxtx #0
+ cmp x25, w20, sxtb #3
+ cmp sp, w19, sxth
+ cmp x2, w3, sxtw
+ cmp x5, x9, sxtx #2
+// CHECK: cmp x4, w5, uxtb #2 // encoding: [0x9f,0x08,0x25,0xeb]
+// CHECK: cmp sp, w19, uxth #4 // encoding: [0xff,0x33,0x33,0xeb]
+// CHECK: cmp x1, w20, uxtw // encoding: [0x3f,0x40,0x34,0xeb]
+// CHECK: cmp x3, x13, uxtx // encoding: [0x7f,0x60,0x2d,0xeb]
+// CHECK: cmp x25, w20, sxtb #3 // encoding: [0x3f,0x8f,0x34,0xeb]
+// CHECK: cmp sp, w19, sxth // encoding: [0xff,0xa3,0x33,0xeb]
+// CHECK: cmp x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0xeb]
+// CHECK: cmp x5, x9, sxtx #2 // encoding: [0xbf,0xe8,0x29,0xeb]
+
+ cmp w5, w7, uxtb
+ cmp w15, w17, uxth
+ cmp w29, wzr, uxtw
+ cmp w17, w1, uxtx // Goodness knows what this means
+ cmp w5, w1, sxtb #1
+ cmp wsp, w19, sxth
+ cmp w2, w3, sxtw
+ cmp w3, w5, sxtx
+// CHECK: cmp w5, w7, uxtb // encoding: [0xbf,0x00,0x27,0x6b]
+// CHECK: cmp w15, w17, uxth // encoding: [0xff,0x21,0x31,0x6b]
+// CHECK: cmp w29, wzr, uxtw // encoding: [0xbf,0x43,0x3f,0x6b]
+// CHECK: cmp w17, w1, uxtx // encoding: [0x3f,0x62,0x21,0x6b]
+// CHECK: cmp w5, w1, sxtb #1 // encoding: [0xbf,0x84,0x21,0x6b]
+// CHECK: cmp wsp, w19, sxth // encoding: [0xff,0xa3,0x33,0x6b]
+// CHECK: cmp w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x6b]
+// CHECK: cmp w3, w5, sxtx // encoding: [0x7f,0xe0,0x25,0x6b]
+
+
+ // cmn
+ cmn x4, w5, uxtb #2
+ cmn sp, w19, uxth #4
+ cmn x1, w20, uxtw
+ cmn x3, x13, uxtx #0
+ cmn x25, w20, sxtb #3
+ cmn sp, w19, sxth
+ cmn x2, w3, sxtw
+ cmn x5, x9, sxtx #2
+// CHECK: cmn x4, w5, uxtb #2 // encoding: [0x9f,0x08,0x25,0xab]
+// CHECK: cmn sp, w19, uxth #4 // encoding: [0xff,0x33,0x33,0xab]
+// CHECK: cmn x1, w20, uxtw // encoding: [0x3f,0x40,0x34,0xab]
+// CHECK: cmn x3, x13, uxtx // encoding: [0x7f,0x60,0x2d,0xab]
+// CHECK: cmn x25, w20, sxtb #3 // encoding: [0x3f,0x8f,0x34,0xab]
+// CHECK: cmn sp, w19, sxth // encoding: [0xff,0xa3,0x33,0xab]
+// CHECK: cmn x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0xab]
+// CHECK: cmn x5, x9, sxtx #2 // encoding: [0xbf,0xe8,0x29,0xab]
+
+ cmn w5, w7, uxtb
+ cmn w15, w17, uxth
+ cmn w29, wzr, uxtw
+ cmn w17, w1, uxtx // Goodness knows what this means
+ cmn w5, w1, sxtb #1
+ cmn wsp, w19, sxth
+ cmn w2, w3, sxtw
+ cmn w3, w5, sxtx
+// CHECK: cmn w5, w7, uxtb // encoding: [0xbf,0x00,0x27,0x2b]
+// CHECK: cmn w15, w17, uxth // encoding: [0xff,0x21,0x31,0x2b]
+// CHECK: cmn w29, wzr, uxtw // encoding: [0xbf,0x43,0x3f,0x2b]
+// CHECK: cmn w17, w1, uxtx // encoding: [0x3f,0x62,0x21,0x2b]
+// CHECK: cmn w5, w1, sxtb #1 // encoding: [0xbf,0x84,0x21,0x2b]
+// CHECK: cmn wsp, w19, sxth // encoding: [0xff,0xa3,0x33,0x2b]
+// CHECK: cmn w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x2b]
+// CHECK: cmn w3, w5, sxtx // encoding: [0x7f,0xe0,0x25,0x2b]
+
+ // operands for cmp
+ cmp x20, w29, uxtb #3
+ cmp x12, x13, uxtx #4
+ cmp wsp, w1, uxtb
+ cmn wsp, wzr, sxtw
+// CHECK: cmp x20, w29, uxtb #3 // encoding: [0x9f,0x0e,0x3d,0xeb]
+// CHECK: cmp x12, x13, uxtx #4 // encoding: [0x9f,0x71,0x2d,0xeb]
+// CHECK: cmp wsp, w1, uxtb // encoding: [0xff,0x03,0x21,0x6b]
+// CHECK: cmn wsp, wzr, sxtw // encoding: [0xff,0xc3,0x3f,0x2b]
+
+ // LSL variant if sp involved
+ sub sp, x3, x7, lsl #4
+ add w2, wsp, w3, lsl #1
+ cmp wsp, w9, lsl #0
+ adds wzr, wsp, w3, lsl #4
+ subs x3, sp, x9, lsl #2
+// CHECK: sub sp, x3, x7, lsl #4 // encoding: [0x7f,0x70,0x27,0xcb]
+// CHECK: add w2, wsp, w3, lsl #1 // encoding: [0xe2,0x47,0x23,0x0b]
+// CHECK: cmp wsp, w9 // encoding: [0xff,0x43,0x29,0x6b]
+// CHECK: adds wzr, wsp, w3, lsl #4 // encoding: [0xff,0x53,0x23,0x2b]
+// CHECK: subs x3, sp, x9, lsl #2 // encoding: [0xe3,0x6b,0x29,0xeb]
+
+//------------------------------------------------------------------------------
+// Add/sub (immediate)
+//------------------------------------------------------------------------------
+
+// Check basic immediate values: an unsigned 12-bit immediate, optionally
+// shifted left by 12 bits.
+ add w4, w5, #0x0
+ add w2, w3, #4095
+ add w30, w29, #1, lsl #12
+ add w13, w5, #4095, lsl #12
+ add x5, x7, #1638
+// CHECK: add w4, w5, #0 // encoding: [0xa4,0x00,0x00,0x11]
+// CHECK: add w2, w3, #4095 // encoding: [0x62,0xfc,0x3f,0x11]
+// CHECK: add w30, w29, #1, lsl #12 // encoding: [0xbe,0x07,0x40,0x11]
+// CHECK: add w13, w5, #4095, lsl #12 // encoding: [0xad,0xfc,0x7f,0x11]
+// CHECK: add x5, x7, #1638 // encoding: [0xe5,0x98,0x19,0x91]
+
+// All registers involved in the non-S variants have 31 encoding sp rather than zr
+ add w20, wsp, #801, lsl #0
+ add wsp, wsp, #1104
+ add wsp, w30, #4084
+// CHECK: add w20, wsp, #801 // encoding: [0xf4,0x87,0x0c,0x11]
+// CHECK: add wsp, wsp, #1104 // encoding: [0xff,0x43,0x11,0x11]
+// CHECK: add wsp, w30, #4084 // encoding: [0xdf,0xd3,0x3f,0x11]
+
+// A few checks on the sanity of 64-bit versions
+ add x0, x24, #291
+ add x3, x24, #4095, lsl #12
+ add x8, sp, #1074
+ add sp, x29, #3816
+// CHECK: add x0, x24, #291 // encoding: [0x00,0x8f,0x04,0x91]
+// CHECK: add x3, x24, #4095, lsl #12 // encoding: [0x03,0xff,0x7f,0x91]
+// CHECK: add x8, sp, #1074 // encoding: [0xe8,0xcb,0x10,0x91]
+// CHECK: add sp, x29, #3816 // encoding: [0xbf,0xa3,0x3b,0x91]
+
+// And on sub
+ sub w0, wsp, #4077
+ sub w4, w20, #546, lsl #12
+ sub sp, sp, #288
+ sub wsp, w19, #16
+// CHECK: sub w0, wsp, #4077 // encoding: [0xe0,0xb7,0x3f,0x51]
+// CHECK: sub w4, w20, #546, lsl #12 // encoding: [0x84,0x8a,0x48,0x51]
+// CHECK: sub sp, sp, #288 // encoding: [0xff,0x83,0x04,0xd1]
+// CHECK: sub wsp, w19, #16 // encoding: [0x7f,0x42,0x00,0x51]
+
+// ADDS/SUBS accept zr in the Rd position but sp in the Rn position
+ adds w13, w23, #291, lsl #12
+ adds wzr, w2, #4095 // FIXME: canonically should be cmn
+ adds w20, wsp, #0x0
+ adds xzr, x3, #0x1, lsl #12 // FIXME: canonically should be cmn
+// CHECK: adds w13, w23, #291, lsl #12 // encoding: [0xed,0x8e,0x44,0x31]
+// CHECK: adds wzr, w2, #4095 // encoding: [0x5f,0xfc,0x3f,0x31]
+// CHECK: adds w20, wsp, #0 // encoding: [0xf4,0x03,0x00,0x31]
+// CHECK: adds xzr, x3, #1, lsl #12 // encoding: [0x7f,0x04,0x40,0xb1]
+
+// Checks for subs
+ subs xzr, sp, #20, lsl #12 // FIXME: canonically should be cmp
+ subs xzr, x30, #4095, lsl #0 // FIXME: canonically should be cmp
+ subs x4, sp, #3822
+// CHECK: subs xzr, sp, #20, lsl #12 // encoding: [0xff,0x53,0x40,0xf1]
+// CHECK: subs xzr, x30, #4095 // encoding: [0xdf,0xff,0x3f,0xf1]
+// CHECK: subs x4, sp, #3822 // encoding: [0xe4,0xbb,0x3b,0xf1]
+
+// cmn is an alias for adds zr, ...
+ cmn w3, #291, lsl #12
+ cmn wsp, #1365, lsl #0
+ cmn sp, #1092, lsl #12
+// CHECK: cmn w3, #291, lsl #12 // encoding: [0x7f,0x8c,0x44,0x31]
+// CHECK: cmn wsp, #1365 // encoding: [0xff,0x57,0x15,0x31]
+// CHECK: cmn sp, #1092, lsl #12 // encoding: [0xff,0x13,0x51,0xb1]
+
+// cmp is an alias for subs zr, ... (FIXME: should always disassemble as such too).
+ cmp x4, #300, lsl #12
+ cmp wsp, #500
+ cmp sp, #200, lsl #0
+// CHECK: cmp x4, #300, lsl #12 // encoding: [0x9f,0xb0,0x44,0xf1]
+// CHECK: cmp wsp, #500 // encoding: [0xff,0xd3,0x07,0x71]
+// CHECK: cmp sp, #200 // encoding: [0xff,0x23,0x03,0xf1]
+
+// A "MOV" involving sp is encoded in this manner: add Reg, Reg, #0
+ mov sp, x30
+ mov wsp, w20
+ mov x11, sp
+ mov w24, wsp
+// CHECK: mov sp, x30 // encoding: [0xdf,0x03,0x00,0x91]
+// CHECK: mov wsp, w20 // encoding: [0x9f,0x02,0x00,0x11]
+// CHECK: mov x11, sp // encoding: [0xeb,0x03,0x00,0x91]
+// CHECK: mov w24, wsp // encoding: [0xf8,0x03,0x00,0x11]
+
+// A relocation check (default to lo12, which is the only sane relocation anyway really)
+ add x0, x4, #:lo12:var
+// CHECK: add x0, x4, #:lo12:var // encoding: [0x80'A',A,A,0x91'A']
+// CHECK: // fixup A - offset: 0, value: :lo12:var, kind: fixup_a64_add_lo12
+
+//------------------------------------------------------------------------------
+// Add-sub (shifted register)
+//------------------------------------------------------------------------------
+
+// As usual, we don't print the canonical forms of many instructions.
+
+ add w3, w5, w7
+ add wzr, w3, w5
+ add w20, wzr, w4
+ add w4, w6, wzr
+// CHECK: add w3, w5, w7 // encoding: [0xa3,0x00,0x07,0x0b]
+// CHECK: add wzr, w3, w5 // encoding: [0x7f,0x00,0x05,0x0b]
+// CHECK: add w20, wzr, w4 // encoding: [0xf4,0x03,0x04,0x0b]
+// CHECK: add w4, w6, wzr // encoding: [0xc4,0x00,0x1f,0x0b]
+
+ add w11, w13, w15, lsl #0
+ add w9, w3, wzr, lsl #10
+ add w17, w29, w20, lsl #31
+// CHECK: add w11, w13, w15 // encoding: [0xab,0x01,0x0f,0x0b]
+// CHECK: add w9, w3, wzr, lsl #10 // encoding: [0x69,0x28,0x1f,0x0b]
+// CHECK: add w17, w29, w20, lsl #31 // encoding: [0xb1,0x7f,0x14,0x0b]
+
+ add w21, w22, w23, lsr #0
+ add w24, w25, w26, lsr #18
+ add w27, w28, w29, lsr #31
+// CHECK: add w21, w22, w23, lsr #0 // encoding: [0xd5,0x02,0x57,0x0b]
+// CHECK: add w24, w25, w26, lsr #18 // encoding: [0x38,0x4b,0x5a,0x0b]
+// CHECK: add w27, w28, w29, lsr #31 // encoding: [0x9b,0x7f,0x5d,0x0b]
+
+ add w2, w3, w4, asr #0
+ add w5, w6, w7, asr #21
+ add w8, w9, w10, asr #31
+// CHECK: add w2, w3, w4, asr #0 // encoding: [0x62,0x00,0x84,0x0b]
+// CHECK: add w5, w6, w7, asr #21 // encoding: [0xc5,0x54,0x87,0x0b]
+// CHECK: add w8, w9, w10, asr #31 // encoding: [0x28,0x7d,0x8a,0x0b]
+
+ add x3, x5, x7
+ add xzr, x3, x5
+ add x20, xzr, x4
+ add x4, x6, xzr
+// CHECK: add x3, x5, x7 // encoding: [0xa3,0x00,0x07,0x8b]
+// CHECK: add xzr, x3, x5 // encoding: [0x7f,0x00,0x05,0x8b]
+// CHECK: add x20, xzr, x4 // encoding: [0xf4,0x03,0x04,0x8b]
+// CHECK: add x4, x6, xzr // encoding: [0xc4,0x00,0x1f,0x8b]
+
+ add x11, x13, x15, lsl #0
+ add x9, x3, xzr, lsl #10
+ add x17, x29, x20, lsl #63
+// CHECK: add x11, x13, x15 // encoding: [0xab,0x01,0x0f,0x8b]
+// CHECK: add x9, x3, xzr, lsl #10 // encoding: [0x69,0x28,0x1f,0x8b]
+// CHECK: add x17, x29, x20, lsl #63 // encoding: [0xb1,0xff,0x14,0x8b]
+
+ add x21, x22, x23, lsr #0
+ add x24, x25, x26, lsr #18
+ add x27, x28, x29, lsr #63
+// CHECK: add x21, x22, x23, lsr #0 // encoding: [0xd5,0x02,0x57,0x8b]
+// CHECK: add x24, x25, x26, lsr #18 // encoding: [0x38,0x4b,0x5a,0x8b]
+// CHECK: add x27, x28, x29, lsr #63 // encoding: [0x9b,0xff,0x5d,0x8b]
+
+ add x2, x3, x4, asr #0
+ add x5, x6, x7, asr #21
+ add x8, x9, x10, asr #63
+// CHECK: add x2, x3, x4, asr #0 // encoding: [0x62,0x00,0x84,0x8b]
+// CHECK: add x5, x6, x7, asr #21 // encoding: [0xc5,0x54,0x87,0x8b]
+// CHECK: add x8, x9, x10, asr #63 // encoding: [0x28,0xfd,0x8a,0x8b]
+
+ adds w3, w5, w7
+ adds wzr, w3, w5
+ adds w20, wzr, w4
+ adds w4, w6, wzr
+// CHECK: adds w3, w5, w7 // encoding: [0xa3,0x00,0x07,0x2b]
+// CHECK: adds wzr, w3, w5 // encoding: [0x7f,0x00,0x05,0x2b]
+// CHECK: adds w20, wzr, w4 // encoding: [0xf4,0x03,0x04,0x2b]
+// CHECK: adds w4, w6, wzr // encoding: [0xc4,0x00,0x1f,0x2b]
+
+ adds w11, w13, w15, lsl #0
+ adds w9, w3, wzr, lsl #10
+ adds w17, w29, w20, lsl #31
+// CHECK: adds w11, w13, w15 // encoding: [0xab,0x01,0x0f,0x2b]
+// CHECK: adds w9, w3, wzr, lsl #10 // encoding: [0x69,0x28,0x1f,0x2b]
+// CHECK: adds w17, w29, w20, lsl #31 // encoding: [0xb1,0x7f,0x14,0x2b]
+
+ adds w21, w22, w23, lsr #0
+ adds w24, w25, w26, lsr #18
+ adds w27, w28, w29, lsr #31
+// CHECK: adds w21, w22, w23, lsr #0 // encoding: [0xd5,0x02,0x57,0x2b]
+// CHECK: adds w24, w25, w26, lsr #18 // encoding: [0x38,0x4b,0x5a,0x2b]
+// CHECK: adds w27, w28, w29, lsr #31 // encoding: [0x9b,0x7f,0x5d,0x2b]
+
+ adds w2, w3, w4, asr #0
+ adds w5, w6, w7, asr #21
+ adds w8, w9, w10, asr #31
+// CHECK: adds w2, w3, w4, asr #0 // encoding: [0x62,0x00,0x84,0x2b]
+// CHECK: adds w5, w6, w7, asr #21 // encoding: [0xc5,0x54,0x87,0x2b]
+// CHECK: adds w8, w9, w10, asr #31 // encoding: [0x28,0x7d,0x8a,0x2b]
+
+ adds x3, x5, x7
+ adds xzr, x3, x5
+ adds x20, xzr, x4
+ adds x4, x6, xzr
+// CHECK: adds x3, x5, x7 // encoding: [0xa3,0x00,0x07,0xab]
+// CHECK: adds xzr, x3, x5 // encoding: [0x7f,0x00,0x05,0xab]
+// CHECK: adds x20, xzr, x4 // encoding: [0xf4,0x03,0x04,0xab]
+// CHECK: adds x4, x6, xzr // encoding: [0xc4,0x00,0x1f,0xab]
+
+ adds x11, x13, x15, lsl #0
+ adds x9, x3, xzr, lsl #10
+ adds x17, x29, x20, lsl #63
+// CHECK: adds x11, x13, x15 // encoding: [0xab,0x01,0x0f,0xab]
+// CHECK: adds x9, x3, xzr, lsl #10 // encoding: [0x69,0x28,0x1f,0xab]
+// CHECK: adds x17, x29, x20, lsl #63 // encoding: [0xb1,0xff,0x14,0xab]
+
+ adds x21, x22, x23, lsr #0
+ adds x24, x25, x26, lsr #18
+ adds x27, x28, x29, lsr #63
+// CHECK: adds x21, x22, x23, lsr #0 // encoding: [0xd5,0x02,0x57,0xab]
+// CHECK: adds x24, x25, x26, lsr #18 // encoding: [0x38,0x4b,0x5a,0xab]
+// CHECK: adds x27, x28, x29, lsr #63 // encoding: [0x9b,0xff,0x5d,0xab]
+
+ adds x2, x3, x4, asr #0
+ adds x5, x6, x7, asr #21
+ adds x8, x9, x10, asr #63
+// CHECK: adds x2, x3, x4, asr #0 // encoding: [0x62,0x00,0x84,0xab]
+// CHECK: adds x5, x6, x7, asr #21 // encoding: [0xc5,0x54,0x87,0xab]
+// CHECK: adds x8, x9, x10, asr #63 // encoding: [0x28,0xfd,0x8a,0xab]
+
+ sub w3, w5, w7
+ sub wzr, w3, w5
+ sub w20, wzr, w4
+ sub w4, w6, wzr
+// CHECK: sub w3, w5, w7 // encoding: [0xa3,0x00,0x07,0x4b]
+// CHECK: sub wzr, w3, w5 // encoding: [0x7f,0x00,0x05,0x4b]
+// CHECK: sub w20, wzr, w4 // encoding: [0xf4,0x03,0x04,0x4b]
+// CHECK: sub w4, w6, wzr // encoding: [0xc4,0x00,0x1f,0x4b]
+
+ sub w11, w13, w15, lsl #0
+ sub w9, w3, wzr, lsl #10
+ sub w17, w29, w20, lsl #31
+// CHECK: sub w11, w13, w15 // encoding: [0xab,0x01,0x0f,0x4b]
+// CHECK: sub w9, w3, wzr, lsl #10 // encoding: [0x69,0x28,0x1f,0x4b]
+// CHECK: sub w17, w29, w20, lsl #31 // encoding: [0xb1,0x7f,0x14,0x4b]
+
+ sub w21, w22, w23, lsr #0
+ sub w24, w25, w26, lsr #18
+ sub w27, w28, w29, lsr #31
+// CHECK: sub w21, w22, w23, lsr #0 // encoding: [0xd5,0x02,0x57,0x4b]
+// CHECK: sub w24, w25, w26, lsr #18 // encoding: [0x38,0x4b,0x5a,0x4b]
+// CHECK: sub w27, w28, w29, lsr #31 // encoding: [0x9b,0x7f,0x5d,0x4b]
+
+ sub w2, w3, w4, asr #0
+ sub w5, w6, w7, asr #21
+ sub w8, w9, w10, asr #31
+// CHECK: sub w2, w3, w4, asr #0 // encoding: [0x62,0x00,0x84,0x4b]
+// CHECK: sub w5, w6, w7, asr #21 // encoding: [0xc5,0x54,0x87,0x4b]
+// CHECK: sub w8, w9, w10, asr #31 // encoding: [0x28,0x7d,0x8a,0x4b]
+
+ sub x3, x5, x7
+ sub xzr, x3, x5
+ sub x20, xzr, x4
+ sub x4, x6, xzr
+// CHECK: sub x3, x5, x7 // encoding: [0xa3,0x00,0x07,0xcb]
+// CHECK: sub xzr, x3, x5 // encoding: [0x7f,0x00,0x05,0xcb]
+// CHECK: sub x20, xzr, x4 // encoding: [0xf4,0x03,0x04,0xcb]
+// CHECK: sub x4, x6, xzr // encoding: [0xc4,0x00,0x1f,0xcb]
+
+ sub x11, x13, x15, lsl #0
+ sub x9, x3, xzr, lsl #10
+ sub x17, x29, x20, lsl #63
+// CHECK: sub x11, x13, x15 // encoding: [0xab,0x01,0x0f,0xcb]
+// CHECK: sub x9, x3, xzr, lsl #10 // encoding: [0x69,0x28,0x1f,0xcb]
+// CHECK: sub x17, x29, x20, lsl #63 // encoding: [0xb1,0xff,0x14,0xcb]
+
+ sub x21, x22, x23, lsr #0
+ sub x24, x25, x26, lsr #18
+ sub x27, x28, x29, lsr #63
+// CHECK: sub x21, x22, x23, lsr #0 // encoding: [0xd5,0x02,0x57,0xcb]
+// CHECK: sub x24, x25, x26, lsr #18 // encoding: [0x38,0x4b,0x5a,0xcb]
+// CHECK: sub x27, x28, x29, lsr #63 // encoding: [0x9b,0xff,0x5d,0xcb]
+
+ sub x2, x3, x4, asr #0
+ sub x5, x6, x7, asr #21
+ sub x8, x9, x10, asr #63
+// CHECK: sub x2, x3, x4, asr #0 // encoding: [0x62,0x00,0x84,0xcb]
+// CHECK: sub x5, x6, x7, asr #21 // encoding: [0xc5,0x54,0x87,0xcb]
+// CHECK: sub x8, x9, x10, asr #63 // encoding: [0x28,0xfd,0x8a,0xcb]
+
+ subs w3, w5, w7
+ subs wzr, w3, w5
+ subs w20, wzr, w4
+ subs w4, w6, wzr
+// CHECK: subs w3, w5, w7 // encoding: [0xa3,0x00,0x07,0x6b]
+// CHECK: subs wzr, w3, w5 // encoding: [0x7f,0x00,0x05,0x6b]
+// CHECK: subs w20, wzr, w4 // encoding: [0xf4,0x03,0x04,0x6b]
+// CHECK: subs w4, w6, wzr // encoding: [0xc4,0x00,0x1f,0x6b]
+
+ subs w11, w13, w15, lsl #0
+ subs w9, w3, wzr, lsl #10
+ subs w17, w29, w20, lsl #31
+// CHECK: subs w11, w13, w15 // encoding: [0xab,0x01,0x0f,0x6b]
+// CHECK: subs w9, w3, wzr, lsl #10 // encoding: [0x69,0x28,0x1f,0x6b]
+// CHECK: subs w17, w29, w20, lsl #31 // encoding: [0xb1,0x7f,0x14,0x6b]
+
+ subs w21, w22, w23, lsr #0
+ subs w24, w25, w26, lsr #18
+ subs w27, w28, w29, lsr #31
+// CHECK: subs w21, w22, w23, lsr #0 // encoding: [0xd5,0x02,0x57,0x6b]
+// CHECK: subs w24, w25, w26, lsr #18 // encoding: [0x38,0x4b,0x5a,0x6b]
+// CHECK: subs w27, w28, w29, lsr #31 // encoding: [0x9b,0x7f,0x5d,0x6b]
+
+ subs w2, w3, w4, asr #0
+ subs w5, w6, w7, asr #21
+ subs w8, w9, w10, asr #31
+// CHECK: subs w2, w3, w4, asr #0 // encoding: [0x62,0x00,0x84,0x6b]
+// CHECK: subs w5, w6, w7, asr #21 // encoding: [0xc5,0x54,0x87,0x6b]
+// CHECK: subs w8, w9, w10, asr #31 // encoding: [0x28,0x7d,0x8a,0x6b]
+
+ subs x3, x5, x7
+ subs xzr, x3, x5
+ subs x20, xzr, x4
+ subs x4, x6, xzr
+// CHECK: subs x3, x5, x7 // encoding: [0xa3,0x00,0x07,0xeb]
+// CHECK: subs xzr, x3, x5 // encoding: [0x7f,0x00,0x05,0xeb]
+// CHECK: subs x20, xzr, x4 // encoding: [0xf4,0x03,0x04,0xeb]
+// CHECK: subs x4, x6, xzr // encoding: [0xc4,0x00,0x1f,0xeb]
+
+ subs x11, x13, x15, lsl #0
+ subs x9, x3, xzr, lsl #10
+ subs x17, x29, x20, lsl #63
+// CHECK: subs x11, x13, x15 // encoding: [0xab,0x01,0x0f,0xeb]
+// CHECK: subs x9, x3, xzr, lsl #10 // encoding: [0x69,0x28,0x1f,0xeb]
+// CHECK: subs x17, x29, x20, lsl #63 // encoding: [0xb1,0xff,0x14,0xeb]
+
+ subs x21, x22, x23, lsr #0
+ subs x24, x25, x26, lsr #18
+ subs x27, x28, x29, lsr #63
+// CHECK: subs x21, x22, x23, lsr #0 // encoding: [0xd5,0x02,0x57,0xeb]
+// CHECK: subs x24, x25, x26, lsr #18 // encoding: [0x38,0x4b,0x5a,0xeb]
+// CHECK: subs x27, x28, x29, lsr #63 // encoding: [0x9b,0xff,0x5d,0xeb]
+
+ subs x2, x3, x4, asr #0
+ subs x5, x6, x7, asr #21
+ subs x8, x9, x10, asr #63
+// CHECK: subs x2, x3, x4, asr #0 // encoding: [0x62,0x00,0x84,0xeb]
+// CHECK: subs x5, x6, x7, asr #21 // encoding: [0xc5,0x54,0x87,0xeb]
+// CHECK: subs x8, x9, x10, asr #63 // encoding: [0x28,0xfd,0x8a,0xeb]
+
+ cmn w0, w3
+ cmn wzr, w4
+ cmn w5, wzr
+// CHECK: cmn w0, w3 // encoding: [0x1f,0x00,0x03,0x2b]
+// CHECK: cmn wzr, w4 // encoding: [0xff,0x03,0x04,0x2b]
+// CHECK: cmn w5, wzr // encoding: [0xbf,0x00,0x1f,0x2b]
+
+ cmn w6, w7, lsl #0
+ cmn w8, w9, lsl #15
+ cmn w10, w11, lsl #31
+// CHECK: cmn w6, w7 // encoding: [0xdf,0x00,0x07,0x2b]
+// CHECK: cmn w8, w9, lsl #15 // encoding: [0x1f,0x3d,0x09,0x2b]
+// CHECK: cmn w10, w11, lsl #31 // encoding: [0x5f,0x7d,0x0b,0x2b]
+
+ cmn w12, w13, lsr #0
+ cmn w14, w15, lsr #21
+ cmn w16, w17, lsr #31
+// CHECK: cmn w12, w13, lsr #0 // encoding: [0x9f,0x01,0x4d,0x2b]
+// CHECK: cmn w14, w15, lsr #21 // encoding: [0xdf,0x55,0x4f,0x2b]
+// CHECK: cmn w16, w17, lsr #31 // encoding: [0x1f,0x7e,0x51,0x2b]
+
+ cmn w18, w19, asr #0
+ cmn w20, w21, asr #22
+ cmn w22, w23, asr #31
+// CHECK: cmn w18, w19, asr #0 // encoding: [0x5f,0x02,0x93,0x2b]
+// CHECK: cmn w20, w21, asr #22 // encoding: [0x9f,0x5a,0x95,0x2b]
+// CHECK: cmn w22, w23, asr #31 // encoding: [0xdf,0x7e,0x97,0x2b]
+
+ cmn x0, x3
+ cmn xzr, x4
+ cmn x5, xzr
+// CHECK: cmn x0, x3 // encoding: [0x1f,0x00,0x03,0xab]
+// CHECK: cmn xzr, x4 // encoding: [0xff,0x03,0x04,0xab]
+// CHECK: cmn x5, xzr // encoding: [0xbf,0x00,0x1f,0xab]
+
+ cmn x6, x7, lsl #0
+ cmn x8, x9, lsl #15
+ cmn x10, x11, lsl #63
+// CHECK: cmn x6, x7 // encoding: [0xdf,0x00,0x07,0xab]
+// CHECK: cmn x8, x9, lsl #15 // encoding: [0x1f,0x3d,0x09,0xab]
+// CHECK: cmn x10, x11, lsl #63 // encoding: [0x5f,0xfd,0x0b,0xab]
+
+ cmn x12, x13, lsr #0
+ cmn x14, x15, lsr #41
+ cmn x16, x17, lsr #63
+// CHECK: cmn x12, x13, lsr #0 // encoding: [0x9f,0x01,0x4d,0xab]
+// CHECK: cmn x14, x15, lsr #41 // encoding: [0xdf,0xa5,0x4f,0xab]
+// CHECK: cmn x16, x17, lsr #63 // encoding: [0x1f,0xfe,0x51,0xab]
+
+ cmn x18, x19, asr #0
+ cmn x20, x21, asr #55
+ cmn x22, x23, asr #63
+// CHECK: cmn x18, x19, asr #0 // encoding: [0x5f,0x02,0x93,0xab]
+// CHECK: cmn x20, x21, asr #55 // encoding: [0x9f,0xde,0x95,0xab]
+// CHECK: cmn x22, x23, asr #63 // encoding: [0xdf,0xfe,0x97,0xab]
+
+ cmp w0, w3
+ cmp wzr, w4
+ cmp w5, wzr
+// CHECK: cmp w0, w3 // encoding: [0x1f,0x00,0x03,0x6b]
+// CHECK: cmp wzr, w4 // encoding: [0xff,0x03,0x04,0x6b]
+// CHECK: cmp w5, wzr // encoding: [0xbf,0x00,0x1f,0x6b]
+
+ cmp w6, w7, lsl #0
+ cmp w8, w9, lsl #15
+ cmp w10, w11, lsl #31
+// CHECK: cmp w6, w7 // encoding: [0xdf,0x00,0x07,0x6b]
+// CHECK: cmp w8, w9, lsl #15 // encoding: [0x1f,0x3d,0x09,0x6b]
+// CHECK: cmp w10, w11, lsl #31 // encoding: [0x5f,0x7d,0x0b,0x6b]
+
+ cmp w12, w13, lsr #0
+ cmp w14, w15, lsr #21
+ cmp w16, w17, lsr #31
+// CHECK: cmp w12, w13, lsr #0 // encoding: [0x9f,0x01,0x4d,0x6b]
+// CHECK: cmp w14, w15, lsr #21 // encoding: [0xdf,0x55,0x4f,0x6b]
+// CHECK: cmp w16, w17, lsr #31 // encoding: [0x1f,0x7e,0x51,0x6b]
+
+ cmp w18, w19, asr #0
+ cmp w20, w21, asr #22
+ cmp w22, w23, asr #31
+// CHECK: cmp w18, w19, asr #0 // encoding: [0x5f,0x02,0x93,0x6b]
+// CHECK: cmp w20, w21, asr #22 // encoding: [0x9f,0x5a,0x95,0x6b]
+// CHECK: cmp w22, w23, asr #31 // encoding: [0xdf,0x7e,0x97,0x6b]
+
+ cmp x0, x3
+ cmp xzr, x4
+ cmp x5, xzr
+// CHECK: cmp x0, x3 // encoding: [0x1f,0x00,0x03,0xeb]
+// CHECK: cmp xzr, x4 // encoding: [0xff,0x03,0x04,0xeb]
+// CHECK: cmp x5, xzr // encoding: [0xbf,0x00,0x1f,0xeb]
+
+ cmp x6, x7, lsl #0
+ cmp x8, x9, lsl #15
+ cmp x10, x11, lsl #63
+// CHECK: cmp x6, x7 // encoding: [0xdf,0x00,0x07,0xeb]
+// CHECK: cmp x8, x9, lsl #15 // encoding: [0x1f,0x3d,0x09,0xeb]
+// CHECK: cmp x10, x11, lsl #63 // encoding: [0x5f,0xfd,0x0b,0xeb]
+
+ cmp x12, x13, lsr #0
+ cmp x14, x15, lsr #41
+ cmp x16, x17, lsr #63
+// CHECK: cmp x12, x13, lsr #0 // encoding: [0x9f,0x01,0x4d,0xeb]
+// CHECK: cmp x14, x15, lsr #41 // encoding: [0xdf,0xa5,0x4f,0xeb]
+// CHECK: cmp x16, x17, lsr #63 // encoding: [0x1f,0xfe,0x51,0xeb]
+
+ cmp x18, x19, asr #0
+ cmp x20, x21, asr #55
+ cmp x22, x23, asr #63
+// CHECK: cmp x18, x19, asr #0 // encoding: [0x5f,0x02,0x93,0xeb]
+// CHECK: cmp x20, x21, asr #55 // encoding: [0x9f,0xde,0x95,0xeb]
+// CHECK: cmp x22, x23, asr #63 // encoding: [0xdf,0xfe,0x97,0xeb]
+
+ neg w29, w30
+ neg w30, wzr
+ neg wzr, w0
+// CHECK: sub w29, wzr, w30 // encoding: [0xfd,0x03,0x1e,0x4b]
+// CHECK: sub w30, wzr, wzr // encoding: [0xfe,0x03,0x1f,0x4b]
+// CHECK: sub wzr, wzr, w0 // encoding: [0xff,0x03,0x00,0x4b]
+
+ neg w28, w27, lsl #0
+ neg w26, w25, lsl #29
+ neg w24, w23, lsl #31
+// CHECK: sub w28, wzr, w27 // encoding: [0xfc,0x03,0x1b,0x4b]
+// CHECK: sub w26, wzr, w25, lsl #29 // encoding: [0xfa,0x77,0x19,0x4b]
+// CHECK: sub w24, wzr, w23, lsl #31 // encoding: [0xf8,0x7f,0x17,0x4b]
+
+ neg w22, w21, lsr #0
+ neg w20, w19, lsr #1
+ neg w18, w17, lsr #31
+// CHECK: sub w22, wzr, w21, lsr #0 // encoding: [0xf6,0x03,0x55,0x4b]
+// CHECK: sub w20, wzr, w19, lsr #1 // encoding: [0xf4,0x07,0x53,0x4b]
+// CHECK: sub w18, wzr, w17, lsr #31 // encoding: [0xf2,0x7f,0x51,0x4b]
+
+ neg w16, w15, asr #0
+ neg w14, w13, asr #12
+ neg w12, w11, asr #31
+// CHECK: sub w16, wzr, w15, asr #0 // encoding: [0xf0,0x03,0x8f,0x4b]
+// CHECK: sub w14, wzr, w13, asr #12 // encoding: [0xee,0x33,0x8d,0x4b]
+// CHECK: sub w12, wzr, w11, asr #31 // encoding: [0xec,0x7f,0x8b,0x4b]
+
+ neg x29, x30
+ neg x30, xzr
+ neg xzr, x0
+// CHECK: sub x29, xzr, x30 // encoding: [0xfd,0x03,0x1e,0xcb]
+// CHECK: sub x30, xzr, xzr // encoding: [0xfe,0x03,0x1f,0xcb]
+// CHECK: sub xzr, xzr, x0 // encoding: [0xff,0x03,0x00,0xcb]
+
+ neg x28, x27, lsl #0
+ neg x26, x25, lsl #29
+ neg x24, x23, lsl #31
+// CHECK: sub x28, xzr, x27 // encoding: [0xfc,0x03,0x1b,0xcb]
+// CHECK: sub x26, xzr, x25, lsl #29 // encoding: [0xfa,0x77,0x19,0xcb]
+// CHECK: sub x24, xzr, x23, lsl #31 // encoding: [0xf8,0x7f,0x17,0xcb]
+
+ neg x22, x21, lsr #0
+ neg x20, x19, lsr #1
+ neg x18, x17, lsr #31
+// CHECK: sub x22, xzr, x21, lsr #0 // encoding: [0xf6,0x03,0x55,0xcb]
+// CHECK: sub x20, xzr, x19, lsr #1 // encoding: [0xf4,0x07,0x53,0xcb]
+// CHECK: sub x18, xzr, x17, lsr #31 // encoding: [0xf2,0x7f,0x51,0xcb]
+
+ neg x16, x15, asr #0
+ neg x14, x13, asr #12
+ neg x12, x11, asr #31
+// CHECK: sub x16, xzr, x15, asr #0 // encoding: [0xf0,0x03,0x8f,0xcb]
+// CHECK: sub x14, xzr, x13, asr #12 // encoding: [0xee,0x33,0x8d,0xcb]
+// CHECK: sub x12, xzr, x11, asr #31 // encoding: [0xec,0x7f,0x8b,0xcb]
+
+ negs w29, w30
+ negs w30, wzr
+ negs wzr, w0
+// CHECK: subs w29, wzr, w30 // encoding: [0xfd,0x03,0x1e,0x6b]
+// CHECK: subs w30, wzr, wzr // encoding: [0xfe,0x03,0x1f,0x6b]
+// CHECK: subs wzr, wzr, w0 // encoding: [0xff,0x03,0x00,0x6b]
+
+ negs w28, w27, lsl #0
+ negs w26, w25, lsl #29
+ negs w24, w23, lsl #31
+// CHECK: subs w28, wzr, w27 // encoding: [0xfc,0x03,0x1b,0x6b]
+// CHECK: subs w26, wzr, w25, lsl #29 // encoding: [0xfa,0x77,0x19,0x6b]
+// CHECK: subs w24, wzr, w23, lsl #31 // encoding: [0xf8,0x7f,0x17,0x6b]
+
+ negs w22, w21, lsr #0
+ negs w20, w19, lsr #1
+ negs w18, w17, lsr #31
+// CHECK: subs w22, wzr, w21, lsr #0 // encoding: [0xf6,0x03,0x55,0x6b]
+// CHECK: subs w20, wzr, w19, lsr #1 // encoding: [0xf4,0x07,0x53,0x6b]
+// CHECK: subs w18, wzr, w17, lsr #31 // encoding: [0xf2,0x7f,0x51,0x6b]
+
+ negs w16, w15, asr #0
+ negs w14, w13, asr #12
+ negs w12, w11, asr #31
+// CHECK: subs w16, wzr, w15, asr #0 // encoding: [0xf0,0x03,0x8f,0x6b]
+// CHECK: subs w14, wzr, w13, asr #12 // encoding: [0xee,0x33,0x8d,0x6b]
+// CHECK: subs w12, wzr, w11, asr #31 // encoding: [0xec,0x7f,0x8b,0x6b]
+
+ negs x29, x30
+ negs x30, xzr
+ negs xzr, x0
+// CHECK: subs x29, xzr, x30 // encoding: [0xfd,0x03,0x1e,0xeb]
+// CHECK: subs x30, xzr, xzr // encoding: [0xfe,0x03,0x1f,0xeb]
+// CHECK: subs xzr, xzr, x0 // encoding: [0xff,0x03,0x00,0xeb]
+
+ negs x28, x27, lsl #0
+ negs x26, x25, lsl #29
+ negs x24, x23, lsl #31
+// CHECK: subs x28, xzr, x27 // encoding: [0xfc,0x03,0x1b,0xeb]
+// CHECK: subs x26, xzr, x25, lsl #29 // encoding: [0xfa,0x77,0x19,0xeb]
+// CHECK: subs x24, xzr, x23, lsl #31 // encoding: [0xf8,0x7f,0x17,0xeb]
+
+ negs x22, x21, lsr #0
+ negs x20, x19, lsr #1
+ negs x18, x17, lsr #31
+// CHECK: subs x22, xzr, x21, lsr #0 // encoding: [0xf6,0x03,0x55,0xeb]
+// CHECK: subs x20, xzr, x19, lsr #1 // encoding: [0xf4,0x07,0x53,0xeb]
+// CHECK: subs x18, xzr, x17, lsr #31 // encoding: [0xf2,0x7f,0x51,0xeb]
+
+ negs x16, x15, asr #0
+ negs x14, x13, asr #12
+ negs x12, x11, asr #31
+// CHECK: subs x16, xzr, x15, asr #0 // encoding: [0xf0,0x03,0x8f,0xeb]
+// CHECK: subs x14, xzr, x13, asr #12 // encoding: [0xee,0x33,0x8d,0xeb]
+// CHECK: subs x12, xzr, x11, asr #31 // encoding: [0xec,0x7f,0x8b,0xeb]
+
+//------------------------------------------------------------------------------
+// Add-sub (shifted register)
+//------------------------------------------------------------------------------
+ adc w29, w27, w25
+ adc wzr, w3, w4
+ adc w9, wzr, w10
+ adc w20, w0, wzr
+// CHECK: adc w29, w27, w25 // encoding: [0x7d,0x03,0x19,0x1a]
+// CHECK: adc wzr, w3, w4 // encoding: [0x7f,0x00,0x04,0x1a]
+// CHECK: adc w9, wzr, w10 // encoding: [0xe9,0x03,0x0a,0x1a]
+// CHECK: adc w20, w0, wzr // encoding: [0x14,0x00,0x1f,0x1a]
+
+ adc x29, x27, x25
+ adc xzr, x3, x4
+ adc x9, xzr, x10
+ adc x20, x0, xzr
+// CHECK: adc x29, x27, x25 // encoding: [0x7d,0x03,0x19,0x9a]
+// CHECK: adc xzr, x3, x4 // encoding: [0x7f,0x00,0x04,0x9a]
+// CHECK: adc x9, xzr, x10 // encoding: [0xe9,0x03,0x0a,0x9a]
+// CHECK: adc x20, x0, xzr // encoding: [0x14,0x00,0x1f,0x9a]
+
+ adcs w29, w27, w25
+ adcs wzr, w3, w4
+ adcs w9, wzr, w10
+ adcs w20, w0, wzr
+// CHECK: adcs w29, w27, w25 // encoding: [0x7d,0x03,0x19,0x3a]
+// CHECK: adcs wzr, w3, w4 // encoding: [0x7f,0x00,0x04,0x3a]
+// CHECK: adcs w9, wzr, w10 // encoding: [0xe9,0x03,0x0a,0x3a]
+// CHECK: adcs w20, w0, wzr // encoding: [0x14,0x00,0x1f,0x3a]
+
+ adcs x29, x27, x25
+ adcs xzr, x3, x4
+ adcs x9, xzr, x10
+ adcs x20, x0, xzr
+// CHECK: adcs x29, x27, x25 // encoding: [0x7d,0x03,0x19,0xba]
+// CHECK: adcs xzr, x3, x4 // encoding: [0x7f,0x00,0x04,0xba]
+// CHECK: adcs x9, xzr, x10 // encoding: [0xe9,0x03,0x0a,0xba]
+// CHECK: adcs x20, x0, xzr // encoding: [0x14,0x00,0x1f,0xba]
+
+ sbc w29, w27, w25
+ sbc wzr, w3, w4
+ sbc w9, wzr, w10
+ sbc w20, w0, wzr
+// CHECK: sbc w29, w27, w25 // encoding: [0x7d,0x03,0x19,0x5a]
+// CHECK: sbc wzr, w3, w4 // encoding: [0x7f,0x00,0x04,0x5a]
+// CHECK: ngc w9, w10 // encoding: [0xe9,0x03,0x0a,0x5a]
+// CHECK: sbc w20, w0, wzr // encoding: [0x14,0x00,0x1f,0x5a]
+
+ sbc x29, x27, x25
+ sbc xzr, x3, x4
+ sbc x9, xzr, x10
+ sbc x20, x0, xzr
+// CHECK: sbc x29, x27, x25 // encoding: [0x7d,0x03,0x19,0xda]
+// CHECK: sbc xzr, x3, x4 // encoding: [0x7f,0x00,0x04,0xda]
+// CHECK: ngc x9, x10 // encoding: [0xe9,0x03,0x0a,0xda]
+// CHECK: sbc x20, x0, xzr // encoding: [0x14,0x00,0x1f,0xda]
+
+ sbcs w29, w27, w25
+ sbcs wzr, w3, w4
+ sbcs w9, wzr, w10
+ sbcs w20, w0, wzr
+// CHECK: sbcs w29, w27, w25 // encoding: [0x7d,0x03,0x19,0x7a]
+// CHECK: sbcs wzr, w3, w4 // encoding: [0x7f,0x00,0x04,0x7a]
+// CHECK: ngcs w9, w10 // encoding: [0xe9,0x03,0x0a,0x7a]
+// CHECK: sbcs w20, w0, wzr // encoding: [0x14,0x00,0x1f,0x7a]
+
+ sbcs x29, x27, x25
+ sbcs xzr, x3, x4
+ sbcs x9, xzr, x10
+ sbcs x20, x0, xzr
+// CHECK: sbcs x29, x27, x25 // encoding: [0x7d,0x03,0x19,0xfa]
+// CHECK: sbcs xzr, x3, x4 // encoding: [0x7f,0x00,0x04,0xfa]
+// CHECK: ngcs x9, x10 // encoding: [0xe9,0x03,0x0a,0xfa]
+// CHECK: sbcs x20, x0, xzr // encoding: [0x14,0x00,0x1f,0xfa]
+
+ ngc w3, w12
+ ngc wzr, w9
+ ngc w23, wzr
+// CHECK: ngc w3, w12 // encoding: [0xe3,0x03,0x0c,0x5a]
+// CHECK: ngc wzr, w9 // encoding: [0xff,0x03,0x09,0x5a]
+// CHECK: ngc w23, wzr // encoding: [0xf7,0x03,0x1f,0x5a]
+
+ ngc x29, x30
+ ngc xzr, x0
+ ngc x0, xzr
+// CHECK: ngc x29, x30 // encoding: [0xfd,0x03,0x1e,0xda]
+// CHECK: ngc xzr, x0 // encoding: [0xff,0x03,0x00,0xda]
+// CHECK: ngc x0, xzr // encoding: [0xe0,0x03,0x1f,0xda]
+
+ ngcs w3, w12
+ ngcs wzr, w9
+ ngcs w23, wzr
+// CHECK: ngcs w3, w12 // encoding: [0xe3,0x03,0x0c,0x7a]
+// CHECK: ngcs wzr, w9 // encoding: [0xff,0x03,0x09,0x7a]
+// CHECK: ngcs w23, wzr // encoding: [0xf7,0x03,0x1f,0x7a]
+
+ ngcs x29, x30
+ ngcs xzr, x0
+ ngcs x0, xzr
+// CHECK: ngcs x29, x30 // encoding: [0xfd,0x03,0x1e,0xfa]
+// CHECK: ngcs xzr, x0 // encoding: [0xff,0x03,0x00,0xfa]
+// CHECK: ngcs x0, xzr // encoding: [0xe0,0x03,0x1f,0xfa]
+
+//------------------------------------------------------------------------------
+// Bitfield
+//------------------------------------------------------------------------------
+
+ sbfm x1, x2, #3, #4
+ sbfm x3, x4, #63, #63
+ sbfm wzr, wzr, #31, #31
+ sbfm w12, w9, #0, #0
+// CHECK: sbfm x1, x2, #3, #4 // encoding: [0x41,0x10,0x43,0x93]
+// CHECK: sbfm x3, x4, #63, #63 // encoding: [0x83,0xfc,0x7f,0x93]
+// CHECK: sbfm wzr, wzr, #31, #31 // encoding: [0xff,0x7f,0x1f,0x13]
+// CHECK: sbfm w12, w9, #0, #0 // encoding: [0x2c,0x01,0x00,0x13]
+
+ ubfm x4, x5, #12, #10
+ ubfm xzr, x4, #0, #0
+ ubfm x4, xzr, #63, #5
+ ubfm x5, x6, #12, #63
+// CHECK: ubfm x4, x5, #12, #10 // encoding: [0xa4,0x28,0x4c,0xd3]
+// CHECK: ubfm xzr, x4, #0, #0 // encoding: [0x9f,0x00,0x40,0xd3]
+// CHECK: ubfm x4, xzr, #63, #5 // encoding: [0xe4,0x17,0x7f,0xd3]
+// CHECK: ubfm x5, x6, #12, #63 // encoding: [0xc5,0xfc,0x4c,0xd3]
+
+ bfm x4, x5, #12, #10
+ bfm xzr, x4, #0, #0
+ bfm x4, xzr, #63, #5
+ bfm x5, x6, #12, #63
+// CHECK: bfm x4, x5, #12, #10 // encoding: [0xa4,0x28,0x4c,0xb3]
+// CHECK: bfm xzr, x4, #0, #0 // encoding: [0x9f,0x00,0x40,0xb3]
+// CHECK: bfm x4, xzr, #63, #5 // encoding: [0xe4,0x17,0x7f,0xb3]
+// CHECK: bfm x5, x6, #12, #63 // encoding: [0xc5,0xfc,0x4c,0xb3]
+
+ sxtb w1, w2
+ sxtb xzr, w3
+ sxth w9, w10
+ sxth x0, w1
+ sxtw x3, w30
+// CHECK: sxtb w1, w2 // encoding: [0x41,0x1c,0x00,0x13]
+// CHECK: sxtb xzr, w3 // encoding: [0x7f,0x1c,0x40,0x93]
+// CHECK: sxth w9, w10 // encoding: [0x49,0x3d,0x00,0x13]
+// CHECK: sxth x0, w1 // encoding: [0x20,0x3c,0x40,0x93]
+// CHECK: sxtw x3, w30 // encoding: [0xc3,0x7f,0x40,0x93]
+
+ uxtb w1, w2
+ uxtb xzr, w3
+ uxth w9, w10
+ uxth x0, w1
+// CHECK: uxtb w1, w2 // encoding: [0x41,0x1c,0x00,0x53]
+// CHECK: uxtb xzr, w3 // encoding: [0x7f,0x1c,0x00,0x53]
+// CHECK: uxth w9, w10 // encoding: [0x49,0x3d,0x00,0x53]
+// CHECK: uxth x0, w1 // encoding: [0x20,0x3c,0x00,0x53]
+
+ asr w3, w2, #0
+ asr w9, w10, #31
+ asr x20, x21, #63
+ asr w1, wzr, #3
+// CHECK: asr w3, w2, #0 // encoding: [0x43,0x7c,0x00,0x13]
+// CHECK: asr w9, w10, #31 // encoding: [0x49,0x7d,0x1f,0x13]
+// CHECK: asr x20, x21, #63 // encoding: [0xb4,0xfe,0x7f,0x93]
+// CHECK: asr w1, wzr, #3 // encoding: [0xe1,0x7f,0x03,0x13]
+
+ lsr w3, w2, #0
+ lsr w9, w10, #31
+ lsr x20, x21, #63
+ lsr wzr, wzr, #3
+// CHECK: lsr w3, w2, #0 // encoding: [0x43,0x7c,0x00,0x53]
+// CHECK: lsr w9, w10, #31 // encoding: [0x49,0x7d,0x1f,0x53]
+// CHECK: lsr x20, x21, #63 // encoding: [0xb4,0xfe,0x7f,0xd3]
+// CHECK: lsr wzr, wzr, #3 // encoding: [0xff,0x7f,0x03,0x53]
+
+ lsl w3, w2, #0
+ lsl w9, w10, #31
+ lsl x20, x21, #63
+ lsl w1, wzr, #3
+// CHECK: lsl w3, w2, #0 // encoding: [0x43,0x7c,0x00,0x53]
+// CHECK: lsl w9, w10, #31 // encoding: [0x49,0x01,0x01,0x53]
+// CHECK: lsl x20, x21, #63 // encoding: [0xb4,0x02,0x41,0xd3]
+// CHECK: lsl w1, wzr, #3 // encoding: [0xe1,0x73,0x1d,0x53]
+
+ sbfiz w9, w10, #0, #1
+ sbfiz x2, x3, #63, #1
+ sbfiz x19, x20, #0, #64
+ sbfiz x9, x10, #5, #59
+ sbfiz w9, w10, #0, #32
+ sbfiz w11, w12, #31, #1
+ sbfiz w13, w14, #29, #3
+ sbfiz xzr, xzr, #10, #11
+// CHECK: sbfiz w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x13]
+// CHECK: sbfiz x2, x3, #63, #1 // encoding: [0x62,0x00,0x41,0x93]
+// CHECK: sbfiz x19, x20, #0, #64 // encoding: [0x93,0xfe,0x40,0x93]
+// CHECK: sbfiz x9, x10, #5, #59 // encoding: [0x49,0xe9,0x7b,0x93]
+// CHECK: sbfiz w9, w10, #0, #32 // encoding: [0x49,0x7d,0x00,0x13]
+// CHECK: sbfiz w11, w12, #31, #1 // encoding: [0x8b,0x01,0x01,0x13]
+// CHECK: sbfiz w13, w14, #29, #3 // encoding: [0xcd,0x09,0x03,0x13]
+// CHECK: sbfiz xzr, xzr, #10, #11 // encoding: [0xff,0x2b,0x76,0x93]
+
+ sbfx w9, w10, #0, #1
+ sbfx x2, x3, #63, #1
+ sbfx x19, x20, #0, #64
+ sbfx x9, x10, #5, #59
+ sbfx w9, w10, #0, #32
+ sbfx w11, w12, #31, #1
+ sbfx w13, w14, #29, #3
+ sbfx xzr, xzr, #10, #11
+// CHECK: sbfx w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x13]
+// CHECK: sbfx x2, x3, #63, #1 // encoding: [0x62,0xfc,0x7f,0x93]
+// CHECK: sbfx x19, x20, #0, #64 // encoding: [0x93,0xfe,0x40,0x93]
+// CHECK: sbfx x9, x10, #5, #59 // encoding: [0x49,0xfd,0x45,0x93]
+// CHECK: sbfx w9, w10, #0, #32 // encoding: [0x49,0x7d,0x00,0x13]
+// CHECK: sbfx w11, w12, #31, #1 // encoding: [0x8b,0x7d,0x1f,0x13]
+// CHECK: sbfx w13, w14, #29, #3 // encoding: [0xcd,0x7d,0x1d,0x13]
+// CHECK: sbfx xzr, xzr, #10, #11 // encoding: [0xff,0x53,0x4a,0x93]
+
+ bfi w9, w10, #0, #1
+ bfi x2, x3, #63, #1
+ bfi x19, x20, #0, #64
+ bfi x9, x10, #5, #59
+ bfi w9, w10, #0, #32
+ bfi w11, w12, #31, #1
+ bfi w13, w14, #29, #3
+ bfi xzr, xzr, #10, #11
+// CHECK: bfi w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x33]
+// CHECK: bfi x2, x3, #63, #1 // encoding: [0x62,0x00,0x41,0xb3]
+// CHECK: bfi x19, x20, #0, #64 // encoding: [0x93,0xfe,0x40,0xb3]
+// CHECK: bfi x9, x10, #5, #59 // encoding: [0x49,0xe9,0x7b,0xb3]
+// CHECK: bfi w9, w10, #0, #32 // encoding: [0x49,0x7d,0x00,0x33]
+// CHECK: bfi w11, w12, #31, #1 // encoding: [0x8b,0x01,0x01,0x33]
+// CHECK: bfi w13, w14, #29, #3 // encoding: [0xcd,0x09,0x03,0x33]
+// CHECK: bfi xzr, xzr, #10, #11 // encoding: [0xff,0x2b,0x76,0xb3]
+
+ bfxil w9, w10, #0, #1
+ bfxil x2, x3, #63, #1
+ bfxil x19, x20, #0, #64
+ bfxil x9, x10, #5, #59
+ bfxil w9, w10, #0, #32
+ bfxil w11, w12, #31, #1
+ bfxil w13, w14, #29, #3
+ bfxil xzr, xzr, #10, #11
+// CHECK: bfxil w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x33]
+// CHECK: bfxil x2, x3, #63, #1 // encoding: [0x62,0xfc,0x7f,0xb3]
+// CHECK: bfxil x19, x20, #0, #64 // encoding: [0x93,0xfe,0x40,0xb3]
+// CHECK: bfxil x9, x10, #5, #59 // encoding: [0x49,0xfd,0x45,0xb3]
+// CHECK: bfxil w9, w10, #0, #32 // encoding: [0x49,0x7d,0x00,0x33]
+// CHECK: bfxil w11, w12, #31, #1 // encoding: [0x8b,0x7d,0x1f,0x33]
+// CHECK: bfxil w13, w14, #29, #3 // encoding: [0xcd,0x7d,0x1d,0x33]
+// CHECK: bfxil xzr, xzr, #10, #11 // encoding: [0xff,0x53,0x4a,0xb3]
+
+ ubfiz w9, w10, #0, #1
+ ubfiz x2, x3, #63, #1
+ ubfiz x19, x20, #0, #64
+ ubfiz x9, x10, #5, #59
+ ubfiz w9, w10, #0, #32
+ ubfiz w11, w12, #31, #1
+ ubfiz w13, w14, #29, #3
+ ubfiz xzr, xzr, #10, #11
+// CHECK: ubfiz w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x53]
+// CHECK: ubfiz x2, x3, #63, #1 // encoding: [0x62,0x00,0x41,0xd3]
+// CHECK: ubfiz x19, x20, #0, #64 // encoding: [0x93,0xfe,0x40,0xd3]
+// CHECK: ubfiz x9, x10, #5, #59 // encoding: [0x49,0xe9,0x7b,0xd3]
+// CHECK: ubfiz w9, w10, #0, #32 // encoding: [0x49,0x7d,0x00,0x53]
+// CHECK: ubfiz w11, w12, #31, #1 // encoding: [0x8b,0x01,0x01,0x53]
+// CHECK: ubfiz w13, w14, #29, #3 // encoding: [0xcd,0x09,0x03,0x53]
+// CHECK: ubfiz xzr, xzr, #10, #11 // encoding: [0xff,0x2b,0x76,0xd3]
+
+ ubfx w9, w10, #0, #1
+ ubfx x2, x3, #63, #1
+ ubfx x19, x20, #0, #64
+ ubfx x9, x10, #5, #59
+ ubfx w9, w10, #0, #32
+ ubfx w11, w12, #31, #1
+ ubfx w13, w14, #29, #3
+ ubfx xzr, xzr, #10, #11
+// CHECK: ubfx w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x53]
+// CHECK: ubfx x2, x3, #63, #1 // encoding: [0x62,0xfc,0x7f,0xd3]
+// CHECK: ubfx x19, x20, #0, #64 // encoding: [0x93,0xfe,0x40,0xd3]
+// CHECK: ubfx x9, x10, #5, #59 // encoding: [0x49,0xfd,0x45,0xd3]
+// CHECK: ubfx w9, w10, #0, #32 // encoding: [0x49,0x7d,0x00,0x53]
+// CHECK: ubfx w11, w12, #31, #1 // encoding: [0x8b,0x7d,0x1f,0x53]
+// CHECK: ubfx w13, w14, #29, #3 // encoding: [0xcd,0x7d,0x1d,0x53]
+// CHECK: ubfx xzr, xzr, #10, #11 // encoding: [0xff,0x53,0x4a,0xd3]
+
+//------------------------------------------------------------------------------
+// Compare & branch (immediate)
+//------------------------------------------------------------------------------
+
+ cbz w5, lbl
+ cbz x5, lbl
+ cbnz x2, lbl
+ cbnz x26, lbl
+// CHECK: cbz w5, lbl // encoding: [0x05'A',A,A,0x34'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: cbz x5, lbl // encoding: [0x05'A',A,A,0xb4'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: cbnz x2, lbl // encoding: [0x02'A',A,A,0xb5'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: cbnz x26, lbl // encoding: [0x1a'A',A,A,0xb5'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+
+ cbz wzr, lbl
+ cbnz xzr, lbl
+// CHECK: cbz wzr, lbl // encoding: [0x1f'A',A,A,0x34'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: cbnz xzr, lbl // encoding: [0x1f'A',A,A,0xb5'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+
+ cbz w5, #0
+ cbnz x3, #-4
+ cbz w20, #1048572
+ cbnz xzr, #-1048576
+// CHECK: cbz w5, #0 // encoding: [0x05,0x00,0x00,0x34]
+// CHECK: cbnz x3, #-4 // encoding: [0xe3,0xff,0xff,0xb5]
+// CHECK: cbz w20, #1048572 // encoding: [0xf4,0xff,0x7f,0x34]
+// CHECK: cbnz xzr, #-1048576 // encoding: [0x1f,0x00,0x80,0xb5]
+
+//------------------------------------------------------------------------------
+// Conditional branch (immediate)
+//------------------------------------------------------------------------------
+
+ b.eq lbl
+ b.ne lbl
+ b.cs lbl
+ b.hs lbl
+ b.lo lbl
+ b.cc lbl
+ b.mi lbl
+ b.pl lbl
+ b.vs lbl
+ b.vc lbl
+ b.hi lbl
+ b.ls lbl
+ b.ge lbl
+ b.lt lbl
+ b.gt lbl
+ b.le lbl
+ b.al lbl
+// CHECK: b.eq lbl // encoding: [A,A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.ne lbl // encoding: [0x01'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.hs lbl // encoding: [0x02'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.hs lbl // encoding: [0x02'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.lo lbl // encoding: [0x03'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.lo lbl // encoding: [0x03'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.mi lbl // encoding: [0x04'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.pl lbl // encoding: [0x05'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.vs lbl // encoding: [0x06'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.vc lbl // encoding: [0x07'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.hi lbl // encoding: [0x08'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.ls lbl // encoding: [0x09'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.ge lbl // encoding: [0x0a'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.lt lbl // encoding: [0x0b'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.gt lbl // encoding: [0x0c'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.le lbl // encoding: [0x0d'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: b.al lbl // encoding: [0x0e'A',A,A,0x54'A']
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+
+ b.eq #0
+ b.lt #-4
+ b.cc #1048572
+// CHECK: b.eq #0 // encoding: [0x00,0x00,0x00,0x54]
+// CHECK: b.lt #-4 // encoding: [0xeb,0xff,0xff,0x54]
+// CHECK: b.lo #1048572 // encoding: [0xe3,0xff,0x7f,0x54]
+
+//------------------------------------------------------------------------------
+// Conditional compare (immediate)
+//------------------------------------------------------------------------------
+
+ ccmp w1, #31, #0, eq
+ ccmp w3, #0, #15, hs
+ ccmp wzr, #15, #13, cs
+// CHECK: ccmp w1, #31, #0, eq // encoding: [0x20,0x08,0x5f,0x7a]
+// CHECK: ccmp w3, #0, #15, hs // encoding: [0x6f,0x28,0x40,0x7a]
+// CHECK: ccmp wzr, #15, #13, hs // encoding: [0xed,0x2b,0x4f,0x7a]
+
+ ccmp x9, #31, #0, le
+ ccmp x3, #0, #15, gt
+ ccmp xzr, #5, #7, ne
+// CHECK: ccmp x9, #31, #0, le // encoding: [0x20,0xd9,0x5f,0xfa]
+// CHECK: ccmp x3, #0, #15, gt // encoding: [0x6f,0xc8,0x40,0xfa]
+// CHECK: ccmp xzr, #5, #7, ne // encoding: [0xe7,0x1b,0x45,0xfa]
+
+ ccmn w1, #31, #0, eq
+ ccmn w3, #0, #15, hs
+ ccmn wzr, #15, #13, cs
+// CHECK: ccmn w1, #31, #0, eq // encoding: [0x20,0x08,0x5f,0x3a]
+// CHECK: ccmn w3, #0, #15, hs // encoding: [0x6f,0x28,0x40,0x3a]
+// CHECK: ccmn wzr, #15, #13, hs // encoding: [0xed,0x2b,0x4f,0x3a]
+
+ ccmn x9, #31, #0, le
+ ccmn x3, #0, #15, gt
+ ccmn xzr, #5, #7, ne
+// CHECK: ccmn x9, #31, #0, le // encoding: [0x20,0xd9,0x5f,0xba]
+// CHECK: ccmn x3, #0, #15, gt // encoding: [0x6f,0xc8,0x40,0xba]
+// CHECK: ccmn xzr, #5, #7, ne // encoding: [0xe7,0x1b,0x45,0xba]
+
+//------------------------------------------------------------------------------
+// Conditional compare (register)
+//------------------------------------------------------------------------------
+
+ ccmp w1, wzr, #0, eq
+ ccmp w3, w0, #15, hs
+ ccmp wzr, w15, #13, cs
+// CHECK: ccmp w1, wzr, #0, eq // encoding: [0x20,0x00,0x5f,0x7a]
+// CHECK: ccmp w3, w0, #15, hs // encoding: [0x6f,0x20,0x40,0x7a]
+// CHECK: ccmp wzr, w15, #13, hs // encoding: [0xed,0x23,0x4f,0x7a]
+
+ ccmp x9, xzr, #0, le
+ ccmp x3, x0, #15, gt
+ ccmp xzr, x5, #7, ne
+// CHECK: ccmp x9, xzr, #0, le // encoding: [0x20,0xd1,0x5f,0xfa]
+// CHECK: ccmp x3, x0, #15, gt // encoding: [0x6f,0xc0,0x40,0xfa]
+// CHECK: ccmp xzr, x5, #7, ne // encoding: [0xe7,0x13,0x45,0xfa]
+
+ ccmn w1, wzr, #0, eq
+ ccmn w3, w0, #15, hs
+ ccmn wzr, w15, #13, cs
+// CHECK: ccmn w1, wzr, #0, eq // encoding: [0x20,0x00,0x5f,0x3a]
+// CHECK: ccmn w3, w0, #15, hs // encoding: [0x6f,0x20,0x40,0x3a]
+// CHECK: ccmn wzr, w15, #13, hs // encoding: [0xed,0x23,0x4f,0x3a]
+
+ ccmn x9, xzr, #0, le
+ ccmn x3, x0, #15, gt
+ ccmn xzr, x5, #7, ne
+// CHECK: ccmn x9, xzr, #0, le // encoding: [0x20,0xd1,0x5f,0xba]
+// CHECK: ccmn x3, x0, #15, gt // encoding: [0x6f,0xc0,0x40,0xba]
+// CHECK: ccmn xzr, x5, #7, ne // encoding: [0xe7,0x13,0x45,0xba]
+
+//------------------------------------------------------------------------------
+// Conditional select
+//------------------------------------------------------------------------------
+ csel w1, w0, w19, ne
+ csel wzr, w5, w9, eq
+ csel w9, wzr, w30, gt
+ csel w1, w28, wzr, mi
+// CHECK: csel w1, w0, w19, ne // encoding: [0x01,0x10,0x93,0x1a]
+// CHECK: csel wzr, w5, w9, eq // encoding: [0xbf,0x00,0x89,0x1a]
+// CHECK: csel w9, wzr, w30, gt // encoding: [0xe9,0xc3,0x9e,0x1a]
+// CHECK: csel w1, w28, wzr, mi // encoding: [0x81,0x43,0x9f,0x1a]
+
+ csel x19, x23, x29, lt
+ csel xzr, x3, x4, ge
+ csel x5, xzr, x6, cs
+ csel x7, x8, xzr, cc
+// CHECK: csel x19, x23, x29, lt // encoding: [0xf3,0xb2,0x9d,0x9a]
+// CHECK: csel xzr, x3, x4, ge // encoding: [0x7f,0xa0,0x84,0x9a]
+// CHECK: csel x5, xzr, x6, hs // encoding: [0xe5,0x23,0x86,0x9a]
+// CHECK: csel x7, x8, xzr, lo // encoding: [0x07,0x31,0x9f,0x9a]
+
+ csinc w1, w0, w19, ne
+ csinc wzr, w5, w9, eq
+ csinc w9, wzr, w30, gt
+ csinc w1, w28, wzr, mi
+// CHECK: csinc w1, w0, w19, ne // encoding: [0x01,0x14,0x93,0x1a]
+// CHECK: csinc wzr, w5, w9, eq // encoding: [0xbf,0x04,0x89,0x1a]
+// CHECK: csinc w9, wzr, w30, gt // encoding: [0xe9,0xc7,0x9e,0x1a]
+// CHECK: csinc w1, w28, wzr, mi // encoding: [0x81,0x47,0x9f,0x1a]
+
+ csinc x19, x23, x29, lt
+ csinc xzr, x3, x4, ge
+ csinc x5, xzr, x6, cs
+ csinc x7, x8, xzr, cc
+// CHECK: csinc x19, x23, x29, lt // encoding: [0xf3,0xb6,0x9d,0x9a]
+// CHECK: csinc xzr, x3, x4, ge // encoding: [0x7f,0xa4,0x84,0x9a]
+// CHECK: csinc x5, xzr, x6, hs // encoding: [0xe5,0x27,0x86,0x9a]
+// CHECK: csinc x7, x8, xzr, lo // encoding: [0x07,0x35,0x9f,0x9a]
+
+ csinv w1, w0, w19, ne
+ csinv wzr, w5, w9, eq
+ csinv w9, wzr, w30, gt
+ csinv w1, w28, wzr, mi
+// CHECK: csinv w1, w0, w19, ne // encoding: [0x01,0x10,0x93,0x5a]
+// CHECK: csinv wzr, w5, w9, eq // encoding: [0xbf,0x00,0x89,0x5a]
+// CHECK: csinv w9, wzr, w30, gt // encoding: [0xe9,0xc3,0x9e,0x5a]
+// CHECK: csinv w1, w28, wzr, mi // encoding: [0x81,0x43,0x9f,0x5a]
+
+ csinv x19, x23, x29, lt
+ csinv xzr, x3, x4, ge
+ csinv x5, xzr, x6, cs
+ csinv x7, x8, xzr, cc
+// CHECK: csinv x19, x23, x29, lt // encoding: [0xf3,0xb2,0x9d,0xda]
+// CHECK: csinv xzr, x3, x4, ge // encoding: [0x7f,0xa0,0x84,0xda]
+// CHECK: csinv x5, xzr, x6, hs // encoding: [0xe5,0x23,0x86,0xda]
+// CHECK: csinv x7, x8, xzr, lo // encoding: [0x07,0x31,0x9f,0xda]
+
+ csneg w1, w0, w19, ne
+ csneg wzr, w5, w9, eq
+ csneg w9, wzr, w30, gt
+ csneg w1, w28, wzr, mi
+// CHECK: csneg w1, w0, w19, ne // encoding: [0x01,0x14,0x93,0x5a]
+// CHECK: csneg wzr, w5, w9, eq // encoding: [0xbf,0x04,0x89,0x5a]
+// CHECK: csneg w9, wzr, w30, gt // encoding: [0xe9,0xc7,0x9e,0x5a]
+// CHECK: csneg w1, w28, wzr, mi // encoding: [0x81,0x47,0x9f,0x5a]
+
+ csneg x19, x23, x29, lt
+ csneg xzr, x3, x4, ge
+ csneg x5, xzr, x6, cs
+ csneg x7, x8, xzr, cc
+// CHECK: csneg x19, x23, x29, lt // encoding: [0xf3,0xb6,0x9d,0xda]
+// CHECK: csneg xzr, x3, x4, ge // encoding: [0x7f,0xa4,0x84,0xda]
+// CHECK: csneg x5, xzr, x6, hs // encoding: [0xe5,0x27,0x86,0xda]
+// CHECK: csneg x7, x8, xzr, lo // encoding: [0x07,0x35,0x9f,0xda]
+
+ cset w3, eq
+ cset x9, pl
+// CHECK: csinc w3, wzr, wzr, ne // encoding: [0xe3,0x17,0x9f,0x1a]
+// CHECK: csinc x9, xzr, xzr, mi // encoding: [0xe9,0x47,0x9f,0x9a]
+
+ csetm w20, ne
+ csetm x30, ge
+// CHECK: csinv w20, wzr, wzr, eq // encoding: [0xf4,0x03,0x9f,0x5a]
+// CHECK: csinv x30, xzr, xzr, lt // encoding: [0xfe,0xb3,0x9f,0xda]
+
+ cinc w3, w5, gt
+ cinc wzr, w4, le
+ cinc w9, wzr, lt
+// CHECK: csinc w3, w5, w5, le // encoding: [0xa3,0xd4,0x85,0x1a]
+// CHECK: csinc wzr, w4, w4, gt // encoding: [0x9f,0xc4,0x84,0x1a]
+// CHECK: csinc w9, wzr, wzr, ge // encoding: [0xe9,0xa7,0x9f,0x1a]
+
+ cinc x3, x5, gt
+ cinc xzr, x4, le
+ cinc x9, xzr, lt
+// CHECK: csinc x3, x5, x5, le // encoding: [0xa3,0xd4,0x85,0x9a]
+// CHECK: csinc xzr, x4, x4, gt // encoding: [0x9f,0xc4,0x84,0x9a]
+// CHECK: csinc x9, xzr, xzr, ge // encoding: [0xe9,0xa7,0x9f,0x9a]
+
+ cinv w3, w5, gt
+ cinv wzr, w4, le
+ cinv w9, wzr, lt
+// CHECK: csinv w3, w5, w5, le // encoding: [0xa3,0xd0,0x85,0x5a]
+// CHECK: csinv wzr, w4, w4, gt // encoding: [0x9f,0xc0,0x84,0x5a]
+// CHECK: csinv w9, wzr, wzr, ge // encoding: [0xe9,0xa3,0x9f,0x5a]
+
+ cinv x3, x5, gt
+ cinv xzr, x4, le
+ cinv x9, xzr, lt
+// CHECK: csinv x3, x5, x5, le // encoding: [0xa3,0xd0,0x85,0xda]
+// CHECK: csinv xzr, x4, x4, gt // encoding: [0x9f,0xc0,0x84,0xda]
+// CHECK: csinv x9, xzr, xzr, ge // encoding: [0xe9,0xa3,0x9f,0xda]
+
+ cneg w3, w5, gt
+ cneg wzr, w4, le
+ cneg w9, wzr, lt
+// CHECK: csneg w3, w5, w5, le // encoding: [0xa3,0xd4,0x85,0x5a]
+// CHECK: csneg wzr, w4, w4, gt // encoding: [0x9f,0xc4,0x84,0x5a]
+// CHECK: csneg w9, wzr, wzr, ge // encoding: [0xe9,0xa7,0x9f,0x5a]
+
+ cneg x3, x5, gt
+ cneg xzr, x4, le
+ cneg x9, xzr, lt
+// CHECK: csneg x3, x5, x5, le // encoding: [0xa3,0xd4,0x85,0xda]
+// CHECK: csneg xzr, x4, x4, gt // encoding: [0x9f,0xc4,0x84,0xda]
+// CHECK: csneg x9, xzr, xzr, ge // encoding: [0xe9,0xa7,0x9f,0xda]
+
+//------------------------------------------------------------------------------
+// Data-processing (1 source)
+//------------------------------------------------------------------------------
+
+ rbit w0, w7
+ rbit x18, x3
+ rev16 w17, w1
+ rev16 x5, x2
+ rev w18, w0
+ rev32 x20, x1
+ rev32 x20, xzr
+// CHECK: rbit w0, w7 // encoding: [0xe0,0x00,0xc0,0x5a]
+// CHECK: rbit x18, x3 // encoding: [0x72,0x00,0xc0,0xda]
+// CHECK: rev16 w17, w1 // encoding: [0x31,0x04,0xc0,0x5a]
+// CHECK: rev16 x5, x2 // encoding: [0x45,0x04,0xc0,0xda]
+// CHECK: rev w18, w0 // encoding: [0x12,0x08,0xc0,0x5a]
+// CHECK: rev32 x20, x1 // encoding: [0x34,0x08,0xc0,0xda]
+// CHECK: rev32 x20, xzr // encoding: [0xf4,0x0b,0xc0,0xda]
+
+ rev x22, x2
+ rev x18, xzr
+ rev w7, wzr
+ clz w24, w3
+ clz x26, x4
+ cls w3, w5
+ cls x20, x5
+// CHECK: rev x22, x2 // encoding: [0x56,0x0c,0xc0,0xda]
+// CHECK: rev x18, xzr // encoding: [0xf2,0x0f,0xc0,0xda]
+// CHECK: rev w7, wzr // encoding: [0xe7,0x0b,0xc0,0x5a]
+// CHECK: clz w24, w3 // encoding: [0x78,0x10,0xc0,0x5a]
+// CHECK: clz x26, x4 // encoding: [0x9a,0x10,0xc0,0xda]
+// CHECK: cls w3, w5 // encoding: [0xa3,0x14,0xc0,0x5a]
+// CHECK: cls x20, x5 // encoding: [0xb4,0x14,0xc0,0xda]
+
+ clz w24, wzr
+ rev x22, xzr
+// CHECK: clz w24, wzr // encoding: [0xf8,0x13,0xc0,0x5a]
+// CHECK: rev x22, xzr // encoding: [0xf6,0x0f,0xc0,0xda]
+
+//------------------------------------------------------------------------------
+// Data-processing (2 source)
+//------------------------------------------------------------------------------
+
+ udiv w0, w7, w10
+ udiv x9, x22, x4
+ sdiv w12, w21, w0
+ sdiv x13, x2, x1
+ lslv w11, w12, w13
+ lslv x14, x15, x16
+ lsrv w17, w18, w19
+ lsrv x20, x21, x22
+ asrv w23, w24, w25
+ asrv x26, x27, x28
+ rorv w0, w1, w2
+ rorv x3, x4, x5
+
+
+// CHECK: udiv w0, w7, w10 // encoding: [0xe0,0x08,0xca,0x1a]
+// CHECK: udiv x9, x22, x4 // encoding: [0xc9,0x0a,0xc4,0x9a]
+// CHECK: sdiv w12, w21, w0 // encoding: [0xac,0x0e,0xc0,0x1a]
+// CHECK: sdiv x13, x2, x1 // encoding: [0x4d,0x0c,0xc1,0x9a]
+// CHECK: lsl w11, w12, w13 // encoding: [0x8b,0x21,0xcd,0x1a]
+// CHECK: lsl x14, x15, x16 // encoding: [0xee,0x21,0xd0,0x9a]
+// CHECK: lsr w17, w18, w19 // encoding: [0x51,0x26,0xd3,0x1a]
+// CHECK: lsr x20, x21, x22 // encoding: [0xb4,0x26,0xd6,0x9a]
+// CHECK: asr w23, w24, w25 // encoding: [0x17,0x2b,0xd9,0x1a]
+// CHECK: asr x26, x27, x28 // encoding: [0x7a,0x2b,0xdc,0x9a]
+// CHECK: ror w0, w1, w2 // encoding: [0x20,0x2c,0xc2,0x1a]
+// CHECK: ror x3, x4, x5 // encoding: [0x83,0x2c,0xc5,0x9a]
+
+
+ lsl w6, w7, w8
+ lsl x9, x10, x11
+ lsr w12, w13, w14
+ lsr x15, x16, x17
+ asr w18, w19, w20
+ asr x21, x22, x23
+ ror w24, w25, w26
+ ror x27, x28, x29
+// CHECK: lsl w6, w7, w8 // encoding: [0xe6,0x20,0xc8,0x1a]
+// CHECK: lsl x9, x10, x11 // encoding: [0x49,0x21,0xcb,0x9a]
+// CHECK: lsr w12, w13, w14 // encoding: [0xac,0x25,0xce,0x1a]
+// CHECK: lsr x15, x16, x17 // encoding: [0x0f,0x26,0xd1,0x9a]
+// CHECK: asr w18, w19, w20 // encoding: [0x72,0x2a,0xd4,0x1a]
+// CHECK: asr x21, x22, x23 // encoding: [0xd5,0x2a,0xd7,0x9a]
+// CHECK: ror w24, w25, w26 // encoding: [0x38,0x2f,0xda,0x1a]
+// CHECK: ror x27, x28, x29 // encoding: [0x9b,0x2f,0xdd,0x9a]
+
+ madd w1, w3, w7, w4
+ madd wzr, w0, w9, w11
+ madd w13, wzr, w4, w4
+ madd w19, w30, wzr, w29
+ madd w4, w5, w6, wzr
+// CHECK: madd w1, w3, w7, w4 // encoding: [0x61,0x10,0x07,0x1b]
+// CHECK: madd wzr, w0, w9, w11 // encoding: [0x1f,0x2c,0x09,0x1b]
+// CHECK: madd w13, wzr, w4, w4 // encoding: [0xed,0x13,0x04,0x1b]
+// CHECK: madd w19, w30, wzr, w29 // encoding: [0xd3,0x77,0x1f,0x1b]
+// CHECK: mul w4, w5, w6 // encoding: [0xa4,0x7c,0x06,0x1b]
+
+ madd x1, x3, x7, x4
+ madd xzr, x0, x9, x11
+ madd x13, xzr, x4, x4
+ madd x19, x30, xzr, x29
+ madd x4, x5, x6, xzr
+// CHECK: madd x1, x3, x7, x4 // encoding: [0x61,0x10,0x07,0x9b]
+// CHECK: madd xzr, x0, x9, x11 // encoding: [0x1f,0x2c,0x09,0x9b]
+// CHECK: madd x13, xzr, x4, x4 // encoding: [0xed,0x13,0x04,0x9b]
+// CHECK: madd x19, x30, xzr, x29 // encoding: [0xd3,0x77,0x1f,0x9b]
+// CHECK: mul x4, x5, x6 // encoding: [0xa4,0x7c,0x06,0x9b]
+
+ msub w1, w3, w7, w4
+ msub wzr, w0, w9, w11
+ msub w13, wzr, w4, w4
+ msub w19, w30, wzr, w29
+ msub w4, w5, w6, wzr
+// CHECK: msub w1, w3, w7, w4 // encoding: [0x61,0x90,0x07,0x1b]
+// CHECK: msub wzr, w0, w9, w11 // encoding: [0x1f,0xac,0x09,0x1b]
+// CHECK: msub w13, wzr, w4, w4 // encoding: [0xed,0x93,0x04,0x1b]
+// CHECK: msub w19, w30, wzr, w29 // encoding: [0xd3,0xf7,0x1f,0x1b]
+// CHECK: mneg w4, w5, w6 // encoding: [0xa4,0xfc,0x06,0x1b]
+
+ msub x1, x3, x7, x4
+ msub xzr, x0, x9, x11
+ msub x13, xzr, x4, x4
+ msub x19, x30, xzr, x29
+ msub x4, x5, x6, xzr
+// CHECK: msub x1, x3, x7, x4 // encoding: [0x61,0x90,0x07,0x9b]
+// CHECK: msub xzr, x0, x9, x11 // encoding: [0x1f,0xac,0x09,0x9b]
+// CHECK: msub x13, xzr, x4, x4 // encoding: [0xed,0x93,0x04,0x9b]
+// CHECK: msub x19, x30, xzr, x29 // encoding: [0xd3,0xf7,0x1f,0x9b]
+// CHECK: mneg x4, x5, x6 // encoding: [0xa4,0xfc,0x06,0x9b]
+
+ smaddl x3, w5, w2, x9
+ smaddl xzr, w10, w11, x12
+ smaddl x13, wzr, w14, x15
+ smaddl x16, w17, wzr, x18
+ smaddl x19, w20, w21, xzr
+// CHECK: smaddl x3, w5, w2, x9 // encoding: [0xa3,0x24,0x22,0x9b]
+// CHECK: smaddl xzr, w10, w11, x12 // encoding: [0x5f,0x31,0x2b,0x9b]
+// CHECK: smaddl x13, wzr, w14, x15 // encoding: [0xed,0x3f,0x2e,0x9b]
+// CHECK: smaddl x16, w17, wzr, x18 // encoding: [0x30,0x4a,0x3f,0x9b]
+// CHECK: smull x19, w20, w21 // encoding: [0x93,0x7e,0x35,0x9b]
+
+ smsubl x3, w5, w2, x9
+ smsubl xzr, w10, w11, x12
+ smsubl x13, wzr, w14, x15
+ smsubl x16, w17, wzr, x18
+ smsubl x19, w20, w21, xzr
+// CHECK: smsubl x3, w5, w2, x9 // encoding: [0xa3,0xa4,0x22,0x9b]
+// CHECK: smsubl xzr, w10, w11, x12 // encoding: [0x5f,0xb1,0x2b,0x9b]
+// CHECK: smsubl x13, wzr, w14, x15 // encoding: [0xed,0xbf,0x2e,0x9b]
+// CHECK: smsubl x16, w17, wzr, x18 // encoding: [0x30,0xca,0x3f,0x9b]
+// CHECK: smnegl x19, w20, w21 // encoding: [0x93,0xfe,0x35,0x9b]
+
+ umaddl x3, w5, w2, x9
+ umaddl xzr, w10, w11, x12
+ umaddl x13, wzr, w14, x15
+ umaddl x16, w17, wzr, x18
+ umaddl x19, w20, w21, xzr
+// CHECK: umaddl x3, w5, w2, x9 // encoding: [0xa3,0x24,0xa2,0x9b]
+// CHECK: umaddl xzr, w10, w11, x12 // encoding: [0x5f,0x31,0xab,0x9b]
+// CHECK: umaddl x13, wzr, w14, x15 // encoding: [0xed,0x3f,0xae,0x9b]
+// CHECK: umaddl x16, w17, wzr, x18 // encoding: [0x30,0x4a,0xbf,0x9b]
+// CHECK: umull x19, w20, w21 // encoding: [0x93,0x7e,0xb5,0x9b]
+
+
+
+ umsubl x3, w5, w2, x9
+ umsubl xzr, w10, w11, x12
+ umsubl x13, wzr, w14, x15
+ umsubl x16, w17, wzr, x18
+ umsubl x19, w20, w21, xzr
+// CHECK: umsubl x3, w5, w2, x9 // encoding: [0xa3,0xa4,0xa2,0x9b]
+// CHECK: umsubl xzr, w10, w11, x12 // encoding: [0x5f,0xb1,0xab,0x9b]
+// CHECK: umsubl x13, wzr, w14, x15 // encoding: [0xed,0xbf,0xae,0x9b]
+// CHECK: umsubl x16, w17, wzr, x18 // encoding: [0x30,0xca,0xbf,0x9b]
+// CHECK: umnegl x19, w20, w21 // encoding: [0x93,0xfe,0xb5,0x9b]
+
+ smulh x30, x29, x28
+ smulh xzr, x27, x26
+ smulh x25, xzr, x24
+ smulh x23, x22, xzr
+// CHECK: smulh x30, x29, x28 // encoding: [0xbe,0x7f,0x5c,0x9b]
+// CHECK: smulh xzr, x27, x26 // encoding: [0x7f,0x7f,0x5a,0x9b]
+// CHECK: smulh x25, xzr, x24 // encoding: [0xf9,0x7f,0x58,0x9b]
+// CHECK: smulh x23, x22, xzr // encoding: [0xd7,0x7e,0x5f,0x9b]
+
+ umulh x30, x29, x28
+ umulh xzr, x27, x26
+ umulh x25, xzr, x24
+ umulh x23, x22, xzr
+// CHECK: umulh x30, x29, x28 // encoding: [0xbe,0x7f,0xdc,0x9b]
+// CHECK: umulh xzr, x27, x26 // encoding: [0x7f,0x7f,0xda,0x9b]
+// CHECK: umulh x25, xzr, x24 // encoding: [0xf9,0x7f,0xd8,0x9b]
+// CHECK: umulh x23, x22, xzr // encoding: [0xd7,0x7e,0xdf,0x9b]
+
+ mul w3, w4, w5
+ mul wzr, w6, w7
+ mul w8, wzr, w9
+ mul w10, w11, wzr
+
+ mul x12, x13, x14
+ mul xzr, x15, x16
+ mul x17, xzr, x18
+ mul x19, x20, xzr
+
+ mneg w21, w22, w23
+ mneg wzr, w24, w25
+ mneg w26, wzr, w27
+ mneg w28, w29, wzr
+
+ smull x11, w13, w17
+ umull x11, w13, w17
+ smnegl x11, w13, w17
+ umnegl x11, w13, w17
+// CHECK: mul w3, w4, w5 // encoding: [0x83,0x7c,0x05,0x1b]
+// CHECK: mul wzr, w6, w7 // encoding: [0xdf,0x7c,0x07,0x1b]
+// CHECK: mul w8, wzr, w9 // encoding: [0xe8,0x7f,0x09,0x1b]
+// CHECK: mul w10, w11, wzr // encoding: [0x6a,0x7d,0x1f,0x1b]
+// CHECK: mul x12, x13, x14 // encoding: [0xac,0x7d,0x0e,0x9b]
+// CHECK: mul xzr, x15, x16 // encoding: [0xff,0x7d,0x10,0x9b]
+// CHECK: mul x17, xzr, x18 // encoding: [0xf1,0x7f,0x12,0x9b]
+// CHECK: mul x19, x20, xzr // encoding: [0x93,0x7e,0x1f,0x9b]
+// CHECK: mneg w21, w22, w23 // encoding: [0xd5,0xfe,0x17,0x1b]
+// CHECK: mneg wzr, w24, w25 // encoding: [0x1f,0xff,0x19,0x1b]
+// CHECK: mneg w26, wzr, w27 // encoding: [0xfa,0xff,0x1b,0x1b]
+// CHECK: mneg w28, w29, wzr // encoding: [0xbc,0xff,0x1f,0x1b]
+// CHECK: smull x11, w13, w17 // encoding: [0xab,0x7d,0x31,0x9b]
+// CHECK: umull x11, w13, w17 // encoding: [0xab,0x7d,0xb1,0x9b]
+// CHECK: smnegl x11, w13, w17 // encoding: [0xab,0xfd,0x31,0x9b]
+// CHECK: umnegl x11, w13, w17 // encoding: [0xab,0xfd,0xb1,0x9b]
+
+//------------------------------------------------------------------------------
+// Exception generation
+//------------------------------------------------------------------------------
+ svc #0
+ svc #65535
+// CHECK: svc #0 // encoding: [0x01,0x00,0x00,0xd4]
+// CHECK: svc #65535 // encoding: [0xe1,0xff,0x1f,0xd4]
+
+ hvc #1
+ smc #12000
+ brk #12
+ hlt #123
+// CHECK: hvc #1 // encoding: [0x22,0x00,0x00,0xd4]
+// CHECK: smc #12000 // encoding: [0x03,0xdc,0x05,0xd4]
+// CHECK: brk #12 // encoding: [0x80,0x01,0x20,0xd4]
+// CHECK: hlt #123 // encoding: [0x60,0x0f,0x40,0xd4]
+
+ dcps1 #42
+ dcps2 #9
+ dcps3 #1000
+// CHECK: dcps1 #42 // encoding: [0x41,0x05,0xa0,0xd4]
+// CHECK: dcps2 #9 // encoding: [0x22,0x01,0xa0,0xd4]
+// CHECK: dcps3 #1000 // encoding: [0x03,0x7d,0xa0,0xd4]
+
+ dcps1
+ dcps2
+ dcps3
+// CHECK: dcps1 // encoding: [0x01,0x00,0xa0,0xd4]
+// CHECK: dcps2 // encoding: [0x02,0x00,0xa0,0xd4]
+// CHECK: dcps3 // encoding: [0x03,0x00,0xa0,0xd4]
+
+//------------------------------------------------------------------------------
+// Extract (immediate)
+//------------------------------------------------------------------------------
+
+ extr w3, w5, w7, #0
+ extr w11, w13, w17, #31
+// CHECK: extr w3, w5, w7, #0 // encoding: [0xa3,0x00,0x87,0x13]
+// CHECK: extr w11, w13, w17, #31 // encoding: [0xab,0x7d,0x91,0x13]
+
+ extr x3, x5, x7, #15
+ extr x11, x13, x17, #63
+// CHECK: extr x3, x5, x7, #15 // encoding: [0xa3,0x3c,0xc7,0x93]
+// CHECK: extr x11, x13, x17, #63 // encoding: [0xab,0xfd,0xd1,0x93]
+
+ ror x19, x23, #24
+ ror x29, xzr, #63
+// CHECK: extr x19, x23, x23, #24 // encoding: [0xf3,0x62,0xd7,0x93]
+// CHECK: extr x29, xzr, xzr, #63 // encoding: [0xfd,0xff,0xdf,0x93]
+
+ ror w9, w13, #31
+// CHECK: extr w9, w13, w13, #31 // encoding: [0xa9,0x7d,0x8d,0x13]
+
+//------------------------------------------------------------------------------
+// Floating-point compare
+//------------------------------------------------------------------------------
+
+ fcmp s3, s5
+ fcmp s31, #0.0
+// CHECK: fcmp s3, s5 // encoding: [0x60,0x20,0x25,0x1e]
+// CHECK: fcmp s31, #0.0 // encoding: [0xe8,0x23,0x20,0x1e]
+
+ fcmpe s29, s30
+ fcmpe s15, #0.0
+// CHECK: fcmpe s29, s30 // encoding: [0xb0,0x23,0x3e,0x1e]
+// CHECK: fcmpe s15, #0.0 // encoding: [0xf8,0x21,0x20,0x1e]
+
+ fcmp d4, d12
+ fcmp d23, #0.0
+// CHECK: fcmp d4, d12 // encoding: [0x80,0x20,0x6c,0x1e]
+// CHECK: fcmp d23, #0.0 // encoding: [0xe8,0x22,0x60,0x1e]
+
+ fcmpe d26, d22
+ fcmpe d29, #0.0
+// CHECK: fcmpe d26, d22 // encoding: [0x50,0x23,0x76,0x1e]
+// CHECK: fcmpe d29, #0.0 // encoding: [0xb8,0x23,0x60,0x1e]
+
+//------------------------------------------------------------------------------
+// Floating-point conditional compare
+//------------------------------------------------------------------------------
+
+ fccmp s1, s31, #0, eq
+ fccmp s3, s0, #15, hs
+ fccmp s31, s15, #13, cs
+// CHECK: fccmp s1, s31, #0, eq // encoding: [0x20,0x04,0x3f,0x1e]
+// CHECK: fccmp s3, s0, #15, hs // encoding: [0x6f,0x24,0x20,0x1e]
+// CHECK: fccmp s31, s15, #13, hs // encoding: [0xed,0x27,0x2f,0x1e]
+
+ fccmp d9, d31, #0, le
+ fccmp d3, d0, #15, gt
+ fccmp d31, d5, #7, ne
+// CHECK: fccmp d9, d31, #0, le // encoding: [0x20,0xd5,0x7f,0x1e]
+// CHECK: fccmp d3, d0, #15, gt // encoding: [0x6f,0xc4,0x60,0x1e]
+// CHECK: fccmp d31, d5, #7, ne // encoding: [0xe7,0x17,0x65,0x1e]
+
+ fccmpe s1, s31, #0, eq
+ fccmpe s3, s0, #15, hs
+ fccmpe s31, s15, #13, cs
+// CHECK: fccmpe s1, s31, #0, eq // encoding: [0x30,0x04,0x3f,0x1e]
+// CHECK: fccmpe s3, s0, #15, hs // encoding: [0x7f,0x24,0x20,0x1e]
+// CHECK: fccmpe s31, s15, #13, hs // encoding: [0xfd,0x27,0x2f,0x1e]
+
+ fccmpe d9, d31, #0, le
+ fccmpe d3, d0, #15, gt
+ fccmpe d31, d5, #7, ne
+// CHECK: fccmpe d9, d31, #0, le // encoding: [0x30,0xd5,0x7f,0x1e]
+// CHECK: fccmpe d3, d0, #15, gt // encoding: [0x7f,0xc4,0x60,0x1e]
+// CHECK: fccmpe d31, d5, #7, ne // encoding: [0xf7,0x17,0x65,0x1e]
+
+//------------------------------------------------------------------------------
+// Floating-point conditional compare
+//------------------------------------------------------------------------------
+
+ fcsel s3, s20, s9, pl
+ fcsel d9, d10, d11, mi
+// CHECK: fcsel s3, s20, s9, pl // encoding: [0x83,0x5e,0x29,0x1e]
+// CHECK: fcsel d9, d10, d11, mi // encoding: [0x49,0x4d,0x6b,0x1e]
+
+//------------------------------------------------------------------------------
+// Floating-point data-processing (1 source)
+//------------------------------------------------------------------------------
+
+ fmov s0, s1
+ fabs s2, s3
+ fneg s4, s5
+ fsqrt s6, s7
+ fcvt d8, s9
+ fcvt h10, s11
+ frintn s12, s13
+ frintp s14, s15
+ frintm s16, s17
+ frintz s18, s19
+ frinta s20, s21
+ frintx s22, s23
+ frinti s24, s25
+// CHECK: fmov s0, s1 // encoding: [0x20,0x40,0x20,0x1e]
+// CHECK: fabs s2, s3 // encoding: [0x62,0xc0,0x20,0x1e]
+// CHECK: fneg s4, s5 // encoding: [0xa4,0x40,0x21,0x1e]
+// CHECK: fsqrt s6, s7 // encoding: [0xe6,0xc0,0x21,0x1e]
+// CHECK: fcvt d8, s9 // encoding: [0x28,0xc1,0x22,0x1e]
+// CHECK: fcvt h10, s11 // encoding: [0x6a,0xc1,0x23,0x1e]
+// CHECK: frintn s12, s13 // encoding: [0xac,0x41,0x24,0x1e]
+// CHECK: frintp s14, s15 // encoding: [0xee,0xc1,0x24,0x1e]
+// CHECK: frintm s16, s17 // encoding: [0x30,0x42,0x25,0x1e]
+// CHECK: frintz s18, s19 // encoding: [0x72,0xc2,0x25,0x1e]
+// CHECK: frinta s20, s21 // encoding: [0xb4,0x42,0x26,0x1e]
+// CHECK: frintx s22, s23 // encoding: [0xf6,0x42,0x27,0x1e]
+// CHECK: frinti s24, s25 // encoding: [0x38,0xc3,0x27,0x1e]
+
+ fmov d0, d1
+ fabs d2, d3
+ fneg d4, d5
+ fsqrt d6, d7
+ fcvt s8, d9
+ fcvt h10, d11
+ frintn d12, d13
+ frintp d14, d15
+ frintm d16, d17
+ frintz d18, d19
+ frinta d20, d21
+ frintx d22, d23
+ frinti d24, d25
+// CHECK: fmov d0, d1 // encoding: [0x20,0x40,0x60,0x1e]
+// CHECK: fabs d2, d3 // encoding: [0x62,0xc0,0x60,0x1e]
+// CHECK: fneg d4, d5 // encoding: [0xa4,0x40,0x61,0x1e]
+// CHECK: fsqrt d6, d7 // encoding: [0xe6,0xc0,0x61,0x1e]
+// CHECK: fcvt s8, d9 // encoding: [0x28,0x41,0x62,0x1e]
+// CHECK: fcvt h10, d11 // encoding: [0x6a,0xc1,0x63,0x1e]
+// CHECK: frintn d12, d13 // encoding: [0xac,0x41,0x64,0x1e]
+// CHECK: frintp d14, d15 // encoding: [0xee,0xc1,0x64,0x1e]
+// CHECK: frintm d16, d17 // encoding: [0x30,0x42,0x65,0x1e]
+// CHECK: frintz d18, d19 // encoding: [0x72,0xc2,0x65,0x1e]
+// CHECK: frinta d20, d21 // encoding: [0xb4,0x42,0x66,0x1e]
+// CHECK: frintx d22, d23 // encoding: [0xf6,0x42,0x67,0x1e]
+// CHECK: frinti d24, d25 // encoding: [0x38,0xc3,0x67,0x1e]
+
+ fcvt s26, h27
+ fcvt d28, h29
+// CHECK: fcvt s26, h27 // encoding: [0x7a,0x43,0xe2,0x1e]
+// CHECK: fcvt d28, h29 // encoding: [0xbc,0xc3,0xe2,0x1e]
+
+//------------------------------------------------------------------------------
+// Floating-point data-processing (2 sources)
+//------------------------------------------------------------------------------
+
+ fmul s20, s19, s17
+ fdiv s1, s2, s3
+ fadd s4, s5, s6
+ fsub s7, s8, s9
+ fmax s10, s11, s12
+ fmin s13, s14, s15
+ fmaxnm s16, s17, s18
+ fminnm s19, s20, s21
+ fnmul s22, s23, s24
+// CHECK: fmul s20, s19, s17 // encoding: [0x74,0x0a,0x31,0x1e]
+// CHECK: fdiv s1, s2, s3 // encoding: [0x41,0x18,0x23,0x1e]
+// CHECK: fadd s4, s5, s6 // encoding: [0xa4,0x28,0x26,0x1e]
+// CHECK: fsub s7, s8, s9 // encoding: [0x07,0x39,0x29,0x1e]
+// CHECK: fmax s10, s11, s12 // encoding: [0x6a,0x49,0x2c,0x1e]
+// CHECK: fmin s13, s14, s15 // encoding: [0xcd,0x59,0x2f,0x1e]
+// CHECK: fmaxnm s16, s17, s18 // encoding: [0x30,0x6a,0x32,0x1e]
+// CHECK: fminnm s19, s20, s21 // encoding: [0x93,0x7a,0x35,0x1e]
+// CHECK: fnmul s22, s23, s24 // encoding: [0xf6,0x8a,0x38,0x1e]
+
+ fmul d20, d19, d17
+ fdiv d1, d2, d3
+ fadd d4, d5, d6
+ fsub d7, d8, d9
+ fmax d10, d11, d12
+ fmin d13, d14, d15
+ fmaxnm d16, d17, d18
+ fminnm d19, d20, d21
+ fnmul d22, d23, d24
+// CHECK: fmul d20, d19, d17 // encoding: [0x74,0x0a,0x71,0x1e]
+// CHECK: fdiv d1, d2, d3 // encoding: [0x41,0x18,0x63,0x1e]
+// CHECK: fadd d4, d5, d6 // encoding: [0xa4,0x28,0x66,0x1e]
+// CHECK: fsub d7, d8, d9 // encoding: [0x07,0x39,0x69,0x1e]
+// CHECK: fmax d10, d11, d12 // encoding: [0x6a,0x49,0x6c,0x1e]
+// CHECK: fmin d13, d14, d15 // encoding: [0xcd,0x59,0x6f,0x1e]
+// CHECK: fmaxnm d16, d17, d18 // encoding: [0x30,0x6a,0x72,0x1e]
+// CHECK: fminnm d19, d20, d21 // encoding: [0x93,0x7a,0x75,0x1e]
+// CHECK: fnmul d22, d23, d24 // encoding: [0xf6,0x8a,0x78,0x1e]
+
+//------------------------------------------------------------------------------
+// Floating-point data-processing (3 sources)
+//------------------------------------------------------------------------------
+
+ fmadd s3, s5, s6, s31
+ fmadd d3, d13, d0, d23
+ fmsub s3, s5, s6, s31
+ fmsub d3, d13, d0, d23
+ fnmadd s3, s5, s6, s31
+ fnmadd d3, d13, d0, d23
+ fnmsub s3, s5, s6, s31
+ fnmsub d3, d13, d0, d23
+// CHECK: fmadd s3, s5, s6, s31 // encoding: [0xa3,0x7c,0x06,0x1f]
+// CHECK: fmadd d3, d13, d0, d23 // encoding: [0xa3,0x5d,0x40,0x1f]
+// CHECK: fmsub s3, s5, s6, s31 // encoding: [0xa3,0xfc,0x06,0x1f]
+// CHECK: fmsub d3, d13, d0, d23 // encoding: [0xa3,0xdd,0x40,0x1f]
+// CHECK: fnmadd s3, s5, s6, s31 // encoding: [0xa3,0x7c,0x26,0x1f]
+// CHECK: fnmadd d3, d13, d0, d23 // encoding: [0xa3,0x5d,0x60,0x1f]
+// CHECK: fnmsub s3, s5, s6, s31 // encoding: [0xa3,0xfc,0x26,0x1f]
+// CHECK: fnmsub d3, d13, d0, d23 // encoding: [0xa3,0xdd,0x60,0x1f]
+
+//------------------------------------------------------------------------------
+// Floating-point <-> fixed-point conversion
+//------------------------------------------------------------------------------
+
+ fcvtzs w3, s5, #1
+ fcvtzs wzr, s20, #13
+ fcvtzs w19, s0, #32
+// CHECK: fcvtzs w3, s5, #1 // encoding: [0xa3,0xfc,0x18,0x1e]
+// CHECK: fcvtzs wzr, s20, #13 // encoding: [0x9f,0xce,0x18,0x1e]
+// CHECK: fcvtzs w19, s0, #32 // encoding: [0x13,0x80,0x18,0x1e]
+
+ fcvtzs x3, s5, #1
+ fcvtzs x12, s30, #45
+ fcvtzs x19, s0, #64
+// CHECK: fcvtzs x3, s5, #1 // encoding: [0xa3,0xfc,0x18,0x9e]
+// CHECK: fcvtzs x12, s30, #45 // encoding: [0xcc,0x4f,0x18,0x9e]
+// CHECK: fcvtzs x19, s0, #64 // encoding: [0x13,0x00,0x18,0x9e]
+
+ fcvtzs w3, d5, #1
+ fcvtzs wzr, d20, #13
+ fcvtzs w19, d0, #32
+// CHECK: fcvtzs w3, d5, #1 // encoding: [0xa3,0xfc,0x58,0x1e]
+// CHECK: fcvtzs wzr, d20, #13 // encoding: [0x9f,0xce,0x58,0x1e]
+// CHECK: fcvtzs w19, d0, #32 // encoding: [0x13,0x80,0x58,0x1e]
+
+ fcvtzs x3, d5, #1
+ fcvtzs x12, d30, #45
+ fcvtzs x19, d0, #64
+// CHECK: fcvtzs x3, d5, #1 // encoding: [0xa3,0xfc,0x58,0x9e]
+// CHECK: fcvtzs x12, d30, #45 // encoding: [0xcc,0x4f,0x58,0x9e]
+// CHECK: fcvtzs x19, d0, #64 // encoding: [0x13,0x00,0x58,0x9e]
+
+ fcvtzu w3, s5, #1
+ fcvtzu wzr, s20, #13
+ fcvtzu w19, s0, #32
+// CHECK: fcvtzu w3, s5, #1 // encoding: [0xa3,0xfc,0x19,0x1e]
+// CHECK: fcvtzu wzr, s20, #13 // encoding: [0x9f,0xce,0x19,0x1e]
+// CHECK: fcvtzu w19, s0, #32 // encoding: [0x13,0x80,0x19,0x1e]
+
+ fcvtzu x3, s5, #1
+ fcvtzu x12, s30, #45
+ fcvtzu x19, s0, #64
+// CHECK: fcvtzu x3, s5, #1 // encoding: [0xa3,0xfc,0x19,0x9e]
+// CHECK: fcvtzu x12, s30, #45 // encoding: [0xcc,0x4f,0x19,0x9e]
+// CHECK: fcvtzu x19, s0, #64 // encoding: [0x13,0x00,0x19,0x9e]
+
+ fcvtzu w3, d5, #1
+ fcvtzu wzr, d20, #13
+ fcvtzu w19, d0, #32
+// CHECK: fcvtzu w3, d5, #1 // encoding: [0xa3,0xfc,0x59,0x1e]
+// CHECK: fcvtzu wzr, d20, #13 // encoding: [0x9f,0xce,0x59,0x1e]
+// CHECK: fcvtzu w19, d0, #32 // encoding: [0x13,0x80,0x59,0x1e]
+
+ fcvtzu x3, d5, #1
+ fcvtzu x12, d30, #45
+ fcvtzu x19, d0, #64
+// CHECK: fcvtzu x3, d5, #1 // encoding: [0xa3,0xfc,0x59,0x9e]
+// CHECK: fcvtzu x12, d30, #45 // encoding: [0xcc,0x4f,0x59,0x9e]
+// CHECK: fcvtzu x19, d0, #64 // encoding: [0x13,0x00,0x59,0x9e]
+
+ scvtf s23, w19, #1
+ scvtf s31, wzr, #20
+ scvtf s14, w0, #32
+// CHECK: scvtf s23, w19, #1 // encoding: [0x77,0xfe,0x02,0x1e]
+// CHECK: scvtf s31, wzr, #20 // encoding: [0xff,0xb3,0x02,0x1e]
+// CHECK: scvtf s14, w0, #32 // encoding: [0x0e,0x80,0x02,0x1e]
+
+ scvtf s23, x19, #1
+ scvtf s31, xzr, #20
+ scvtf s14, x0, #64
+// CHECK: scvtf s23, x19, #1 // encoding: [0x77,0xfe,0x02,0x9e]
+// CHECK: scvtf s31, xzr, #20 // encoding: [0xff,0xb3,0x02,0x9e]
+// CHECK: scvtf s14, x0, #64 // encoding: [0x0e,0x00,0x02,0x9e]
+
+ scvtf d23, w19, #1
+ scvtf d31, wzr, #20
+ scvtf d14, w0, #32
+// CHECK: scvtf d23, w19, #1 // encoding: [0x77,0xfe,0x42,0x1e]
+// CHECK: scvtf d31, wzr, #20 // encoding: [0xff,0xb3,0x42,0x1e]
+// CHECK: scvtf d14, w0, #32 // encoding: [0x0e,0x80,0x42,0x1e]
+
+ scvtf d23, x19, #1
+ scvtf d31, xzr, #20
+ scvtf d14, x0, #64
+// CHECK: scvtf d23, x19, #1 // encoding: [0x77,0xfe,0x42,0x9e]
+// CHECK: scvtf d31, xzr, #20 // encoding: [0xff,0xb3,0x42,0x9e]
+// CHECK: scvtf d14, x0, #64 // encoding: [0x0e,0x00,0x42,0x9e]
+
+ ucvtf s23, w19, #1
+ ucvtf s31, wzr, #20
+ ucvtf s14, w0, #32
+// CHECK: ucvtf s23, w19, #1 // encoding: [0x77,0xfe,0x03,0x1e]
+// CHECK: ucvtf s31, wzr, #20 // encoding: [0xff,0xb3,0x03,0x1e]
+// CHECK: ucvtf s14, w0, #32 // encoding: [0x0e,0x80,0x03,0x1e]
+
+ ucvtf s23, x19, #1
+ ucvtf s31, xzr, #20
+ ucvtf s14, x0, #64
+// CHECK: ucvtf s23, x19, #1 // encoding: [0x77,0xfe,0x03,0x9e]
+// CHECK: ucvtf s31, xzr, #20 // encoding: [0xff,0xb3,0x03,0x9e]
+// CHECK: ucvtf s14, x0, #64 // encoding: [0x0e,0x00,0x03,0x9e]
+
+ ucvtf d23, w19, #1
+ ucvtf d31, wzr, #20
+ ucvtf d14, w0, #32
+// CHECK: ucvtf d23, w19, #1 // encoding: [0x77,0xfe,0x43,0x1e]
+// CHECK: ucvtf d31, wzr, #20 // encoding: [0xff,0xb3,0x43,0x1e]
+// CHECK: ucvtf d14, w0, #32 // encoding: [0x0e,0x80,0x43,0x1e]
+
+ ucvtf d23, x19, #1
+ ucvtf d31, xzr, #20
+ ucvtf d14, x0, #64
+// CHECK: ucvtf d23, x19, #1 // encoding: [0x77,0xfe,0x43,0x9e]
+// CHECK: ucvtf d31, xzr, #20 // encoding: [0xff,0xb3,0x43,0x9e]
+// CHECK: ucvtf d14, x0, #64 // encoding: [0x0e,0x00,0x43,0x9e]
+
+//------------------------------------------------------------------------------
+// Floating-point <-> integer conversion
+//------------------------------------------------------------------------------
+ fcvtns w3, s31
+ fcvtns xzr, s12
+ fcvtnu wzr, s12
+ fcvtnu x0, s0
+// CHECK: fcvtns w3, s31 // encoding: [0xe3,0x03,0x20,0x1e]
+// CHECK: fcvtns xzr, s12 // encoding: [0x9f,0x01,0x20,0x9e]
+// CHECK: fcvtnu wzr, s12 // encoding: [0x9f,0x01,0x21,0x1e]
+// CHECK: fcvtnu x0, s0 // encoding: [0x00,0x00,0x21,0x9e]
+
+ fcvtps wzr, s9
+ fcvtps x12, s20
+ fcvtpu w30, s23
+ fcvtpu x29, s3
+// CHECK: fcvtps wzr, s9 // encoding: [0x3f,0x01,0x28,0x1e]
+// CHECK: fcvtps x12, s20 // encoding: [0x8c,0x02,0x28,0x9e]
+// CHECK: fcvtpu w30, s23 // encoding: [0xfe,0x02,0x29,0x1e]
+// CHECK: fcvtpu x29, s3 // encoding: [0x7d,0x00,0x29,0x9e]
+
+ fcvtms w2, s3
+ fcvtms x4, s5
+ fcvtmu w6, s7
+ fcvtmu x8, s9
+// CHECK: fcvtms w2, s3 // encoding: [0x62,0x00,0x30,0x1e]
+// CHECK: fcvtms x4, s5 // encoding: [0xa4,0x00,0x30,0x9e]
+// CHECK: fcvtmu w6, s7 // encoding: [0xe6,0x00,0x31,0x1e]
+// CHECK: fcvtmu x8, s9 // encoding: [0x28,0x01,0x31,0x9e]
+
+ fcvtzs w10, s11
+ fcvtzs x12, s13
+ fcvtzu w14, s15
+ fcvtzu x15, s16
+// CHECK: fcvtzs w10, s11 // encoding: [0x6a,0x01,0x38,0x1e]
+// CHECK: fcvtzs x12, s13 // encoding: [0xac,0x01,0x38,0x9e]
+// CHECK: fcvtzu w14, s15 // encoding: [0xee,0x01,0x39,0x1e]
+// CHECK: fcvtzu x15, s16 // encoding: [0x0f,0x02,0x39,0x9e]
+
+ scvtf s17, w18
+ scvtf s19, x20
+ ucvtf s21, w22
+ scvtf s23, x24
+// CHECK: scvtf s17, w18 // encoding: [0x51,0x02,0x22,0x1e]
+// CHECK: scvtf s19, x20 // encoding: [0x93,0x02,0x22,0x9e]
+// CHECK: ucvtf s21, w22 // encoding: [0xd5,0x02,0x23,0x1e]
+// CHECK: scvtf s23, x24 // encoding: [0x17,0x03,0x22,0x9e]
+
+ fcvtas w25, s26
+ fcvtas x27, s28
+ fcvtau w29, s30
+ fcvtau xzr, s0
+// CHECK: fcvtas w25, s26 // encoding: [0x59,0x03,0x24,0x1e]
+// CHECK: fcvtas x27, s28 // encoding: [0x9b,0x03,0x24,0x9e]
+// CHECK: fcvtau w29, s30 // encoding: [0xdd,0x03,0x25,0x1e]
+// CHECK: fcvtau xzr, s0 // encoding: [0x1f,0x00,0x25,0x9e]
+
+ fcvtns w3, d31
+ fcvtns xzr, d12
+ fcvtnu wzr, d12
+ fcvtnu x0, d0
+// CHECK: fcvtns w3, d31 // encoding: [0xe3,0x03,0x60,0x1e]
+// CHECK: fcvtns xzr, d12 // encoding: [0x9f,0x01,0x60,0x9e]
+// CHECK: fcvtnu wzr, d12 // encoding: [0x9f,0x01,0x61,0x1e]
+// CHECK: fcvtnu x0, d0 // encoding: [0x00,0x00,0x61,0x9e]
+
+ fcvtps wzr, d9
+ fcvtps x12, d20
+ fcvtpu w30, d23
+ fcvtpu x29, d3
+// CHECK: fcvtps wzr, d9 // encoding: [0x3f,0x01,0x68,0x1e]
+// CHECK: fcvtps x12, d20 // encoding: [0x8c,0x02,0x68,0x9e]
+// CHECK: fcvtpu w30, d23 // encoding: [0xfe,0x02,0x69,0x1e]
+// CHECK: fcvtpu x29, d3 // encoding: [0x7d,0x00,0x69,0x9e]
+
+ fcvtms w2, d3
+ fcvtms x4, d5
+ fcvtmu w6, d7
+ fcvtmu x8, d9
+// CHECK: fcvtms w2, d3 // encoding: [0x62,0x00,0x70,0x1e]
+// CHECK: fcvtms x4, d5 // encoding: [0xa4,0x00,0x70,0x9e]
+// CHECK: fcvtmu w6, d7 // encoding: [0xe6,0x00,0x71,0x1e]
+// CHECK: fcvtmu x8, d9 // encoding: [0x28,0x01,0x71,0x9e]
+
+ fcvtzs w10, d11
+ fcvtzs x12, d13
+ fcvtzu w14, d15
+ fcvtzu x15, d16
+// CHECK: fcvtzs w10, d11 // encoding: [0x6a,0x01,0x78,0x1e]
+// CHECK: fcvtzs x12, d13 // encoding: [0xac,0x01,0x78,0x9e]
+// CHECK: fcvtzu w14, d15 // encoding: [0xee,0x01,0x79,0x1e]
+// CHECK: fcvtzu x15, d16 // encoding: [0x0f,0x02,0x79,0x9e]
+
+ scvtf d17, w18
+ scvtf d19, x20
+ ucvtf d21, w22
+ ucvtf d23, x24
+// CHECK: scvtf d17, w18 // encoding: [0x51,0x02,0x62,0x1e]
+// CHECK: scvtf d19, x20 // encoding: [0x93,0x02,0x62,0x9e]
+// CHECK: ucvtf d21, w22 // encoding: [0xd5,0x02,0x63,0x1e]
+// CHECK: ucvtf d23, x24 // encoding: [0x17,0x03,0x63,0x9e]
+
+ fcvtas w25, d26
+ fcvtas x27, d28
+ fcvtau w29, d30
+ fcvtau xzr, d0
+// CHECK: fcvtas w25, d26 // encoding: [0x59,0x03,0x64,0x1e]
+// CHECK: fcvtas x27, d28 // encoding: [0x9b,0x03,0x64,0x9e]
+// CHECK: fcvtau w29, d30 // encoding: [0xdd,0x03,0x65,0x1e]
+// CHECK: fcvtau xzr, d0 // encoding: [0x1f,0x00,0x65,0x9e]
+
+ fmov w3, s9
+ fmov s9, w3
+// CHECK: fmov w3, s9 // encoding: [0x23,0x01,0x26,0x1e]
+// CHECK: fmov s9, w3 // encoding: [0x69,0x00,0x27,0x1e]
+
+ fmov x20, d31
+ fmov d1, x15
+// CHECK: fmov x20, d31 // encoding: [0xf4,0x03,0x66,0x9e]
+// CHECK: fmov d1, x15 // encoding: [0xe1,0x01,0x67,0x9e]
+
+ fmov x3, v12.d[1]
+ fmov v1.d[1], x19
+ fmov v3.2d[1], xzr
+// CHECK: fmov x3, v12.d[1] // encoding: [0x83,0x01,0xae,0x9e]
+// CHECK: fmov v1.d[1], x19 // encoding: [0x61,0x02,0xaf,0x9e]
+// CHECK: fmov v3.d[1], xzr // encoding: [0xe3,0x03,0xaf,0x9e]
+
+//------------------------------------------------------------------------------
+// Floating-point immediate
+//------------------------------------------------------------------------------
+
+ fmov s2, #0.125
+ fmov s3, #1.0
+ fmov d30, #16.0
+// CHECK: fmov s2, #0.12500000 // encoding: [0x02,0x10,0x28,0x1e]
+// CHECK: fmov s3, #1.00000000 // encoding: [0x03,0x10,0x2e,0x1e]
+// CHECK: fmov d30, #16.00000000 // encoding: [0x1e,0x10,0x66,0x1e]
+
+ fmov s4, #1.0625
+ fmov d10, #1.9375
+// CHECK: fmov s4, #1.06250000 // encoding: [0x04,0x30,0x2e,0x1e]
+// CHECK: fmov d10, #1.93750000 // encoding: [0x0a,0xf0,0x6f,0x1e]
+
+ fmov s12, #-1.0
+// CHECK: fmov s12, #-1.00000000 // encoding: [0x0c,0x10,0x3e,0x1e]
+
+ fmov d16, #8.5
+// CHECK: fmov d16, #8.50000000 // encoding: [0x10,0x30,0x64,0x1e]
+
+//------------------------------------------------------------------------------
+// Load-register (literal)
+//------------------------------------------------------------------------------
+ ldr w3, here
+ ldr x29, there
+ ldrsw xzr, everywhere
+// CHECK: ldr w3, here // encoding: [0x03'A',A,A,0x18'A']
+// CHECK: // fixup A - offset: 0, value: here, kind: fixup_a64_ld_prel
+// CHECK: ldr x29, there // encoding: [0x1d'A',A,A,0x58'A']
+// CHECK: // fixup A - offset: 0, value: there, kind: fixup_a64_ld_prel
+// CHECK: ldrsw xzr, everywhere // encoding: [0x1f'A',A,A,0x98'A']
+// CHECK: // fixup A - offset: 0, value: everywhere, kind: fixup_a64_ld_prel
+
+ ldr s0, who_knows
+ ldr d0, i_dont
+ ldr q0, there_must_be_a_better_way
+// CHECK: ldr s0, who_knows // encoding: [A,A,A,0x1c'A']
+// CHECK: // fixup A - offset: 0, value: who_knows, kind: fixup_a64_ld_prel
+// CHECK: ldr d0, i_dont // encoding: [A,A,A,0x5c'A']
+// CHECK: // fixup A - offset: 0, value: i_dont, kind: fixup_a64_ld_prel
+// CHECK: ldr q0, there_must_be_a_better_way // encoding: [A,A,A,0x9c'A']
+// CHECK: // fixup A - offset: 0, value: there_must_be_a_better_way, kind: fixup_a64_ld_prel
+
+ ldr w0, #1048572
+ ldr x10, #-1048576
+// CHECK: ldr w0, #1048572 // encoding: [0xe0,0xff,0x7f,0x18]
+// CHECK: ldr x10, #-1048576 // encoding: [0x0a,0x00,0x80,0x58]
+
+ prfm pldl1strm, nowhere
+ prfm #22, somewhere
+// CHECK: prfm pldl1strm, nowhere // encoding: [0x01'A',A,A,0xd8'A']
+// CHECK: // fixup A - offset: 0, value: nowhere, kind: fixup_a64_ld_prel
+// CHECK: prfm #22, somewhere // encoding: [0x16'A',A,A,0xd8'A']
+// CHECK: // fixup A - offset: 0, value: somewhere, kind: fixup_a64_ld_prel
+
+//------------------------------------------------------------------------------
+// Floating-point immediate
+//------------------------------------------------------------------------------
+
+ fmov s2, #0.125
+ fmov s3, #1.0
+ fmov d30, #16.0
+// CHECK: fmov s2, #0.12500000 // encoding: [0x02,0x10,0x28,0x1e]
+// CHECK: fmov s3, #1.00000000 // encoding: [0x03,0x10,0x2e,0x1e]
+// CHECK: fmov d30, #16.00000000 // encoding: [0x1e,0x10,0x66,0x1e]
+
+ fmov s4, #1.0625
+ fmov d10, #1.9375
+// CHECK: fmov s4, #1.06250000 // encoding: [0x04,0x30,0x2e,0x1e]
+// CHECK: fmov d10, #1.93750000 // encoding: [0x0a,0xf0,0x6f,0x1e]
+
+ fmov s12, #-1.0
+// CHECK: fmov s12, #-1.00000000 // encoding: [0x0c,0x10,0x3e,0x1e]
+
+ fmov d16, #8.5
+// CHECK: fmov d16, #8.50000000 // encoding: [0x10,0x30,0x64,0x1e]
+
+//------------------------------------------------------------------------------
+// Load/store exclusive
+//------------------------------------------------------------------------------
+
+ stxrb w1, w2, [x3, #0]
+ stxrh w2, w3, [x4]
+ stxr wzr, w4, [sp]
+ stxr w5, x6, [x7]
+// CHECK: stxrb w1, w2, [x3] // encoding: [0x62,0x7c,0x01,0x08]
+// CHECK: stxrh w2, w3, [x4] // encoding: [0x83,0x7c,0x02,0x48]
+// CHECK: stxr wzr, w4, [sp] // encoding: [0xe4,0x7f,0x1f,0x88]
+// CHECK: stxr w5, x6, [x7] // encoding: [0xe6,0x7c,0x05,0xc8]
+
+ ldxrb w7, [x9]
+ ldxrh wzr, [x10]
+ ldxr w9, [sp]
+ ldxr x10, [x11]
+// CHECK: ldxrb w7, [x9] // encoding: [0x27,0x7d,0x5f,0x08]
+// CHECK: ldxrh wzr, [x10] // encoding: [0x5f,0x7d,0x5f,0x48]
+// CHECK: ldxr w9, [sp] // encoding: [0xe9,0x7f,0x5f,0x88]
+// CHECK: ldxr x10, [x11] // encoding: [0x6a,0x7d,0x5f,0xc8]
+
+ stxp w11, w12, w13, [x14]
+ stxp wzr, x23, x14, [x15]
+// CHECK: stxp w11, w12, w13, [x14] // encoding: [0xcc,0x35,0x2b,0x88]
+// CHECK: stxp wzr, x23, x14, [x15] // encoding: [0xf7,0x39,0x3f,0xc8]
+
+ ldxp w12, wzr, [sp]
+ ldxp x13, x14, [x15]
+// CHECK: ldxp w12, wzr, [sp] // encoding: [0xec,0x7f,0x7f,0x88]
+// CHECK: ldxp x13, x14, [x15] // encoding: [0xed,0x39,0x7f,0xc8]
+
+ stlxrb w14, w15, [x16]
+ stlxrh w15, w16, [x17,#0]
+ stlxr wzr, w17, [sp]
+ stlxr w18, x19, [x20]
+// CHECK: stlxrb w14, w15, [x16] // encoding: [0x0f,0xfe,0x0e,0x08]
+// CHECK: stlxrh w15, w16, [x17] // encoding: [0x30,0xfe,0x0f,0x48]
+// CHECK: stlxr wzr, w17, [sp] // encoding: [0xf1,0xff,0x1f,0x88]
+// CHECK: stlxr w18, x19, [x20] // encoding: [0x93,0xfe,0x12,0xc8]
+
+ ldaxrb w19, [x21]
+ ldaxrh w20, [sp]
+ ldaxr wzr, [x22]
+ ldaxr x21, [x23]
+// CHECK: ldaxrb w19, [x21] // encoding: [0xb3,0xfe,0x5f,0x08]
+// CHECK: ldaxrh w20, [sp] // encoding: [0xf4,0xff,0x5f,0x48]
+// CHECK: ldaxr wzr, [x22] // encoding: [0xdf,0xfe,0x5f,0x88]
+// CHECK: ldaxr x21, [x23] // encoding: [0xf5,0xfe,0x5f,0xc8]
+
+ stlxp wzr, w22, w23, [x24]
+ stlxp w25, x26, x27, [sp]
+// CHECK: stlxp wzr, w22, w23, [x24] // encoding: [0x16,0xdf,0x3f,0x88]
+// CHECK: stlxp w25, x26, x27, [sp] // encoding: [0xfa,0xef,0x39,0xc8]
+
+ ldaxp w26, wzr, [sp]
+ ldaxp x27, x28, [x30]
+// CHECK: ldaxp w26, wzr, [sp] // encoding: [0xfa,0xff,0x7f,0x88]
+// CHECK: ldaxp x27, x28, [x30] // encoding: [0xdb,0xf3,0x7f,0xc8]
+
+ stlrb w27, [sp]
+ stlrh w28, [x0]
+ stlr wzr, [x1]
+ stlr x30, [x2]
+// CHECK: stlrb w27, [sp] // encoding: [0xfb,0xff,0x9f,0x08]
+// CHECK: stlrh w28, [x0] // encoding: [0x1c,0xfc,0x9f,0x48]
+// CHECK: stlr wzr, [x1] // encoding: [0x3f,0xfc,0x9f,0x88]
+// CHECK: stlr x30, [x2] // encoding: [0x5e,0xfc,0x9f,0xc8]
+
+ ldarb w29, [sp]
+ ldarh w30, [x0]
+ ldar wzr, [x1]
+ ldar x1, [x2]
+// CHECK: ldarb w29, [sp] // encoding: [0xfd,0xff,0xdf,0x08]
+// CHECK: ldarh w30, [x0] // encoding: [0x1e,0xfc,0xdf,0x48]
+// CHECK: ldar wzr, [x1] // encoding: [0x3f,0xfc,0xdf,0x88]
+// CHECK: ldar x1, [x2] // encoding: [0x41,0xfc,0xdf,0xc8]
+
+ stlxp wzr, w22, w23, [x24,#0]
+// CHECK: stlxp wzr, w22, w23, [x24] // encoding: [0x16,0xdf,0x3f,0x88]
+
+//------------------------------------------------------------------------------
+// Load/store (unaligned immediate)
+//------------------------------------------------------------------------------
+
+ sturb w9, [sp, #0]
+ sturh wzr, [x12, #255]
+ stur w16, [x0, #-256]
+ stur x28, [x14, #1]
+// CHECK: sturb w9, [sp] // encoding: [0xe9,0x03,0x00,0x38]
+// CHECK: sturh wzr, [x12, #255] // encoding: [0x9f,0xf1,0x0f,0x78]
+// CHECK: stur w16, [x0, #-256] // encoding: [0x10,0x00,0x10,0xb8]
+// CHECK: stur x28, [x14, #1] // encoding: [0xdc,0x11,0x00,0xf8]
+
+ ldurb w1, [x20, #255]
+ ldurh w20, [x1, #255]
+ ldur w12, [sp, #255]
+ ldur xzr, [x12, #255]
+// CHECK: ldurb w1, [x20, #255] // encoding: [0x81,0xf2,0x4f,0x38]
+// CHECK: ldurh w20, [x1, #255] // encoding: [0x34,0xf0,0x4f,0x78]
+// CHECK: ldur w12, [sp, #255] // encoding: [0xec,0xf3,0x4f,0xb8]
+// CHECK: ldur xzr, [x12, #255] // encoding: [0x9f,0xf1,0x4f,0xf8]
+
+ ldursb x9, [x7, #-256]
+ ldursh x17, [x19, #-256]
+ ldursw x20, [x15, #-256]
+ ldursw x13, [x2]
+ prfum pldl2keep, [sp, #-256]
+ ldursb w19, [x1, #-256]
+ ldursh w15, [x21, #-256]
+// CHECK: ldursb x9, [x7, #-256] // encoding: [0xe9,0x00,0x90,0x38]
+// CHECK: ldursh x17, [x19, #-256] // encoding: [0x71,0x02,0x90,0x78]
+// CHECK: ldursw x20, [x15, #-256] // encoding: [0xf4,0x01,0x90,0xb8]
+// CHECK: ldursw x13, [x2] // encoding: [0x4d,0x00,0x80,0xb8]
+// CHECK: prfum pldl2keep, [sp, #-256] // encoding: [0xe2,0x03,0x90,0xf8]
+// CHECK: ldursb w19, [x1, #-256] // encoding: [0x33,0x00,0xd0,0x38]
+// CHECK: ldursh w15, [x21, #-256] // encoding: [0xaf,0x02,0xd0,0x78]
+
+ stur b0, [sp, #1]
+ stur h12, [x12, #-1]
+ stur s15, [x0, #255]
+ stur d31, [x5, #25]
+ stur q9, [x5]
+// CHECK: stur b0, [sp, #1] // encoding: [0xe0,0x13,0x00,0x3c]
+// CHECK: stur h12, [x12, #-1] // encoding: [0x8c,0xf1,0x1f,0x7c]
+// CHECK: stur s15, [x0, #255] // encoding: [0x0f,0xf0,0x0f,0xbc]
+// CHECK: stur d31, [x5, #25] // encoding: [0xbf,0x90,0x01,0xfc]
+// CHECK: stur q9, [x5] // encoding: [0xa9,0x00,0x80,0x3c]
+
+ ldur b3, [sp]
+ ldur h5, [x4, #-256]
+ ldur s7, [x12, #-1]
+ ldur d11, [x19, #4]
+ ldur q13, [x1, #2]
+// CHECK: ldur b3, [sp] // encoding: [0xe3,0x03,0x40,0x3c]
+// CHECK: ldur h5, [x4, #-256] // encoding: [0x85,0x00,0x50,0x7c]
+// CHECK: ldur s7, [x12, #-1] // encoding: [0x87,0xf1,0x5f,0xbc]
+// CHECK: ldur d11, [x19, #4] // encoding: [0x6b,0x42,0x40,0xfc]
+// CHECK: ldur q13, [x1, #2] // encoding: [0x2d,0x20,0xc0,0x3c]
+
+//------------------------------------------------------------------------------
+// Load/store (unsigned immediate)
+//------------------------------------------------------------------------------
+
+//// Basic addressing mode limits: 8 byte access
+ ldr x0, [x0]
+ ldr x4, [x29, #0]
+ ldr x30, [x12, #32760]
+ ldr x20, [sp, #8]
+// CHECK: ldr x0, [x0] // encoding: [0x00,0x00,0x40,0xf9]
+// CHECK: ldr x4, [x29] // encoding: [0xa4,0x03,0x40,0xf9]
+// CHECK: ldr x30, [x12, #32760] // encoding: [0x9e,0xfd,0x7f,0xf9]
+// CHECK: ldr x20, [sp, #8] // encoding: [0xf4,0x07,0x40,0xf9]
+
+//// Rt treats 31 as zero-register
+ ldr xzr, [sp]
+// CHECK: ldr xzr, [sp] // encoding: [0xff,0x03,0x40,0xf9]
+
+ //// 4-byte load, check still 64-bit address, limits
+ ldr w2, [sp]
+ ldr w17, [sp, #16380]
+ ldr w13, [x2, #4]
+// CHECK: ldr w2, [sp] // encoding: [0xe2,0x03,0x40,0xb9]
+// CHECK: ldr w17, [sp, #16380] // encoding: [0xf1,0xff,0x7f,0xb9]
+// CHECK: ldr w13, [x2, #4] // encoding: [0x4d,0x04,0x40,0xb9]
+
+//// Signed 4-byte load. Limits.
+ ldrsw x2, [x5,#4]
+ ldrsw x23, [sp, #16380]
+// CHECK: ldrsw x2, [x5, #4] // encoding: [0xa2,0x04,0x80,0xb9]
+// CHECK: ldrsw x23, [sp, #16380] // encoding: [0xf7,0xff,0xbf,0xb9]
+
+//// 2-byte loads
+ ldrh w2, [x4]
+ ldrsh w23, [x6, #8190]
+ ldrsh wzr, [sp, #2]
+ ldrsh x29, [x2, #2]
+// CHECK: ldrh w2, [x4] // encoding: [0x82,0x00,0x40,0x79]
+// CHECK: ldrsh w23, [x6, #8190] // encoding: [0xd7,0xfc,0xff,0x79]
+// CHECK: ldrsh wzr, [sp, #2] // encoding: [0xff,0x07,0xc0,0x79]
+// CHECK: ldrsh x29, [x2, #2] // encoding: [0x5d,0x04,0x80,0x79]
+
+//// 1-byte loads
+ ldrb w26, [x3, #121]
+ ldrb w12, [x2, #0]
+ ldrsb w27, [sp, #4095]
+ ldrsb xzr, [x15]
+// CHECK: ldrb w26, [x3, #121] // encoding: [0x7a,0xe4,0x41,0x39]
+// CHECK: ldrb w12, [x2] // encoding: [0x4c,0x00,0x40,0x39]
+// CHECK: ldrsb w27, [sp, #4095] // encoding: [0xfb,0xff,0xff,0x39]
+// CHECK: ldrsb xzr, [x15] // encoding: [0xff,0x01,0x80,0x39]
+
+//// Stores
+ str x30, [sp]
+ str w20, [x4, #16380]
+ strh w20, [x10, #14]
+ strh w17, [sp, #8190]
+ strb w23, [x3, #4095]
+ strb wzr, [x2]
+// CHECK: str x30, [sp] // encoding: [0xfe,0x03,0x00,0xf9]
+// CHECK: str w20, [x4, #16380] // encoding: [0x94,0xfc,0x3f,0xb9]
+// CHECK: strh w20, [x10, #14] // encoding: [0x54,0x1d,0x00,0x79]
+// CHECK: strh w17, [sp, #8190] // encoding: [0xf1,0xff,0x3f,0x79]
+// CHECK: strb w23, [x3, #4095] // encoding: [0x77,0xfc,0x3f,0x39]
+// CHECK: strb wzr, [x2] // encoding: [0x5f,0x00,0x00,0x39]
+
+//// Relocations
+ str x15, [x5, #:lo12:sym]
+ ldrb w15, [x5, #:lo12:sym]
+ ldrsh x15, [x5, #:lo12:sym]
+ ldrsw x15, [x5, #:lo12:sym]
+ ldr x15, [x5, #:lo12:sym]
+ ldr q3, [x2, #:lo12:sym]
+// CHECK: str x15, [x5, #:lo12:sym] // encoding: [0xaf'A',A,A,0xf9'A']
+// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_a64_ldst64_lo12
+// CHECK: ldrb w15, [x5, #:lo12:sym] // encoding: [0xaf'A',A,0x40'A',0x39'A']
+// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_a64_ldst8_lo12
+// CHECK: ldrsh x15, [x5, #:lo12:sym] // encoding: [0xaf'A',A,0x80'A',0x79'A']
+// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_a64_ldst16_lo12
+// CHECK: ldrsw x15, [x5, #:lo12:sym] // encoding: [0xaf'A',A,0x80'A',0xb9'A']
+// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_a64_ldst32_lo12
+// CHECK: ldr x15, [x5, #:lo12:sym] // encoding: [0xaf'A',A,0x40'A',0xf9'A']
+// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_a64_ldst64_lo12
+// CHECK: ldr q3, [x2, #:lo12:sym] // encoding: [0x43'A',A,0xc0'A',0x3d'A']
+// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_a64_ldst128_lo12
+
+ prfm pldl1keep, [sp, #8]
+ prfm pldl1strm, [x3]
+ prfm pldl2keep, [x5,#16]
+ prfm pldl2strm, [x2]
+ prfm pldl3keep, [x5]
+ prfm pldl3strm, [x6]
+ prfm pstl1keep, [sp, #8]
+ prfm pstl1strm, [x3]
+ prfm pstl2keep, [x5,#16]
+ prfm pstl2strm, [x2]
+ prfm pstl3keep, [x5]
+ prfm pstl3strm, [x6]
+ prfm #15, [sp]
+// CHECK: prfm pldl1keep, [sp, #8] // encoding: [0xe0,0x07,0x80,0xf9]
+// CHECK: prfm pldl1strm, [x3, #0] // encoding: [0x61,0x00,0x80,0xf9]
+// CHECK: prfm pldl2keep, [x5, #16] // encoding: [0xa2,0x08,0x80,0xf9]
+// CHECK: prfm pldl2strm, [x2, #0] // encoding: [0x43,0x00,0x80,0xf9]
+// CHECK: prfm pldl3keep, [x5, #0] // encoding: [0xa4,0x00,0x80,0xf9]
+// CHECK: prfm pldl3strm, [x6, #0] // encoding: [0xc5,0x00,0x80,0xf9]
+// CHECK: prfm pstl1keep, [sp, #8] // encoding: [0xf0,0x07,0x80,0xf9]
+// CHECK: prfm pstl1strm, [x3, #0] // encoding: [0x71,0x00,0x80,0xf9]
+// CHECK: prfm pstl2keep, [x5, #16] // encoding: [0xb2,0x08,0x80,0xf9]
+// CHECK: prfm pstl2strm, [x2, #0] // encoding: [0x53,0x00,0x80,0xf9]
+// CHECK: prfm pstl3keep, [x5, #0] // encoding: [0xb4,0x00,0x80,0xf9]
+// CHECK: prfm pstl3strm, [x6, #0] // encoding: [0xd5,0x00,0x80,0xf9]
+// CHECK: prfm #15, [sp, #0] // encoding: [0xef,0x03,0x80,0xf9]
+
+//// Floating-point versions
+
+ ldr b31, [sp, #4095]
+ ldr h20, [x2, #8190]
+ ldr s10, [x19, #16380]
+ ldr d3, [x10, #32760]
+ str q12, [sp, #65520]
+// CHECK: ldr b31, [sp, #4095] // encoding: [0xff,0xff,0x7f,0x3d]
+// CHECK: ldr h20, [x2, #8190] // encoding: [0x54,0xfc,0x7f,0x7d]
+// CHECK: ldr s10, [x19, #16380] // encoding: [0x6a,0xfe,0x7f,0xbd]
+// CHECK: ldr d3, [x10, #32760] // encoding: [0x43,0xfd,0x7f,0xfd]
+// CHECK: str q12, [sp, #65520] // encoding: [0xec,0xff,0xbf,0x3d]
+
+//------------------------------------------------------------------------------
+// Load/store register (register offset)
+//------------------------------------------------------------------------------
+
+ ldrb w3, [sp, x5]
+ ldrb w9, [x27, x6, lsl #0]
+ ldrsb w10, [x30, x7]
+ ldrb w11, [x29, x3, sxtx]
+ strb w12, [x28, xzr, sxtx #0]
+ ldrb w14, [x26, w6, uxtw]
+ ldrsb w15, [x25, w7, uxtw #0]
+ ldrb w17, [x23, w9, sxtw]
+ ldrsb x18, [x22, w10, sxtw #0]
+// CHECK: ldrb w3, [sp, x5] // encoding: [0xe3,0x6b,0x65,0x38]
+// CHECK: ldrb w9, [x27, x6, lsl #0] // encoding: [0x69,0x7b,0x66,0x38]
+// CHECK: ldrsb w10, [x30, x7] // encoding: [0xca,0x6b,0xe7,0x38]
+// CHECK: ldrb w11, [x29, x3, sxtx] // encoding: [0xab,0xeb,0x63,0x38]
+// CHECK: strb w12, [x28, xzr, sxtx #0] // encoding: [0x8c,0xfb,0x3f,0x38]
+// CHECK: ldrb w14, [x26, w6, uxtw] // encoding: [0x4e,0x4b,0x66,0x38]
+// CHECK: ldrsb w15, [x25, w7, uxtw #0] // encoding: [0x2f,0x5b,0xe7,0x38]
+// CHECK: ldrb w17, [x23, w9, sxtw] // encoding: [0xf1,0xca,0x69,0x38]
+// CHECK: ldrsb x18, [x22, w10, sxtw #0] // encoding: [0xd2,0xda,0xaa,0x38]
+
+ ldrsh w3, [sp, x5]
+ ldrsh w9, [x27, x6, lsl #0]
+ ldrh w10, [x30, x7, lsl #1]
+ strh w11, [x29, x3, sxtx]
+ ldrh w12, [x28, xzr, sxtx #0]
+ ldrsh x13, [x27, x5, sxtx #1]
+ ldrh w14, [x26, w6, uxtw]
+ ldrh w15, [x25, w7, uxtw #0]
+ ldrsh w16, [x24, w8, uxtw #1]
+ ldrh w17, [x23, w9, sxtw]
+ ldrh w18, [x22, w10, sxtw #0]
+ strh w19, [x21, wzr, sxtw #1]
+// CHECK: ldrsh w3, [sp, x5] // encoding: [0xe3,0x6b,0xe5,0x78]
+// CHECK: ldrsh w9, [x27, x6] // encoding: [0x69,0x6b,0xe6,0x78]
+// CHECK: ldrh w10, [x30, x7, lsl #1] // encoding: [0xca,0x7b,0x67,0x78]
+// CHECK: strh w11, [x29, x3, sxtx] // encoding: [0xab,0xeb,0x23,0x78]
+// CHECK: ldrh w12, [x28, xzr, sxtx] // encoding: [0x8c,0xeb,0x7f,0x78]
+// CHECK: ldrsh x13, [x27, x5, sxtx #1] // encoding: [0x6d,0xfb,0xa5,0x78]
+// CHECK: ldrh w14, [x26, w6, uxtw] // encoding: [0x4e,0x4b,0x66,0x78]
+// CHECK: ldrh w15, [x25, w7, uxtw] // encoding: [0x2f,0x4b,0x67,0x78]
+// CHECK: ldrsh w16, [x24, w8, uxtw #1] // encoding: [0x10,0x5b,0xe8,0x78]
+// CHECK: ldrh w17, [x23, w9, sxtw] // encoding: [0xf1,0xca,0x69,0x78]
+// CHECK: ldrh w18, [x22, w10, sxtw] // encoding: [0xd2,0xca,0x6a,0x78]
+// CHECK: strh w19, [x21, wzr, sxtw #1] // encoding: [0xb3,0xda,0x3f,0x78]
+
+ ldr w3, [sp, x5]
+ ldr s9, [x27, x6, lsl #0]
+ ldr w10, [x30, x7, lsl #2]
+ ldr w11, [x29, x3, sxtx]
+ str s12, [x28, xzr, sxtx #0]
+ str w13, [x27, x5, sxtx #2]
+ str w14, [x26, w6, uxtw]
+ ldr w15, [x25, w7, uxtw #0]
+ ldr w16, [x24, w8, uxtw #2]
+ ldrsw x17, [x23, w9, sxtw]
+ ldr w18, [x22, w10, sxtw #0]
+ ldrsw x19, [x21, wzr, sxtw #2]
+// CHECK: ldr w3, [sp, x5] // encoding: [0xe3,0x6b,0x65,0xb8]
+// CHECK: ldr s9, [x27, x6] // encoding: [0x69,0x6b,0x66,0xbc]
+// CHECK: ldr w10, [x30, x7, lsl #2] // encoding: [0xca,0x7b,0x67,0xb8]
+// CHECK: ldr w11, [x29, x3, sxtx] // encoding: [0xab,0xeb,0x63,0xb8]
+// CHECK: str s12, [x28, xzr, sxtx] // encoding: [0x8c,0xeb,0x3f,0xbc]
+// CHECK: str w13, [x27, x5, sxtx #2] // encoding: [0x6d,0xfb,0x25,0xb8]
+// CHECK: str w14, [x26, w6, uxtw] // encoding: [0x4e,0x4b,0x26,0xb8]
+// CHECK: ldr w15, [x25, w7, uxtw] // encoding: [0x2f,0x4b,0x67,0xb8]
+// CHECK: ldr w16, [x24, w8, uxtw #2] // encoding: [0x10,0x5b,0x68,0xb8]
+// CHECK: ldrsw x17, [x23, w9, sxtw] // encoding: [0xf1,0xca,0xa9,0xb8]
+// CHECK: ldr w18, [x22, w10, sxtw] // encoding: [0xd2,0xca,0x6a,0xb8]
+// CHECK: ldrsw x19, [x21, wzr, sxtw #2] // encoding: [0xb3,0xda,0xbf,0xb8]
+
+ ldr x3, [sp, x5]
+ str x9, [x27, x6, lsl #0]
+ ldr d10, [x30, x7, lsl #3]
+ str x11, [x29, x3, sxtx]
+ ldr x12, [x28, xzr, sxtx #0]
+ ldr x13, [x27, x5, sxtx #3]
+ prfm pldl1keep, [x26, w6, uxtw]
+ ldr x15, [x25, w7, uxtw #0]
+ ldr x16, [x24, w8, uxtw #3]
+ ldr x17, [x23, w9, sxtw]
+ ldr x18, [x22, w10, sxtw #0]
+ str d19, [x21, wzr, sxtw #3]
+ prfm #6, [x0, x5]
+// CHECK: ldr x3, [sp, x5] // encoding: [0xe3,0x6b,0x65,0xf8]
+// CHECK: str x9, [x27, x6] // encoding: [0x69,0x6b,0x26,0xf8]
+// CHECK: ldr d10, [x30, x7, lsl #3] // encoding: [0xca,0x7b,0x67,0xfc]
+// CHECK: str x11, [x29, x3, sxtx] // encoding: [0xab,0xeb,0x23,0xf8]
+// CHECK: ldr x12, [x28, xzr, sxtx] // encoding: [0x8c,0xeb,0x7f,0xf8]
+// CHECK: ldr x13, [x27, x5, sxtx #3] // encoding: [0x6d,0xfb,0x65,0xf8]
+// CHECK: prfm pldl1keep, [x26, w6, uxtw] // encoding: [0x40,0x4b,0xa6,0xf8]
+// CHECK: ldr x15, [x25, w7, uxtw] // encoding: [0x2f,0x4b,0x67,0xf8]
+// CHECK: ldr x16, [x24, w8, uxtw #3] // encoding: [0x10,0x5b,0x68,0xf8]
+// CHECK: ldr x17, [x23, w9, sxtw] // encoding: [0xf1,0xca,0x69,0xf8]
+// CHECK: ldr x18, [x22, w10, sxtw] // encoding: [0xd2,0xca,0x6a,0xf8]
+// CHECK: str d19, [x21, wzr, sxtw #3] // encoding: [0xb3,0xda,0x3f,0xfc]
+// CHECK: prfm #6, [x0, x5, lsl #0] // encoding: [0x06,0x68,0xa5,0xf8]
+
+ ldr q3, [sp, x5]
+ ldr q9, [x27, x6, lsl #0]
+ ldr q10, [x30, x7, lsl #4]
+ str q11, [x29, x3, sxtx]
+ str q12, [x28, xzr, sxtx #0]
+ str q13, [x27, x5, sxtx #4]
+ ldr q14, [x26, w6, uxtw]
+ ldr q15, [x25, w7, uxtw #0]
+ ldr q16, [x24, w8, uxtw #4]
+ ldr q17, [x23, w9, sxtw]
+ str q18, [x22, w10, sxtw #0]
+ ldr q19, [x21, wzr, sxtw #4]
+// CHECK: ldr q3, [sp, x5] // encoding: [0xe3,0x6b,0xe5,0x3c]
+// CHECK: ldr q9, [x27, x6] // encoding: [0x69,0x6b,0xe6,0x3c]
+// CHECK: ldr q10, [x30, x7, lsl #4] // encoding: [0xca,0x7b,0xe7,0x3c]
+// CHECK: str q11, [x29, x3, sxtx] // encoding: [0xab,0xeb,0xa3,0x3c]
+// CHECK: str q12, [x28, xzr, sxtx] // encoding: [0x8c,0xeb,0xbf,0x3c]
+// CHECK: str q13, [x27, x5, sxtx #4] // encoding: [0x6d,0xfb,0xa5,0x3c]
+// CHECK: ldr q14, [x26, w6, uxtw] // encoding: [0x4e,0x4b,0xe6,0x3c]
+// CHECK: ldr q15, [x25, w7, uxtw] // encoding: [0x2f,0x4b,0xe7,0x3c]
+// CHECK: ldr q16, [x24, w8, uxtw #4] // encoding: [0x10,0x5b,0xe8,0x3c]
+// CHECK: ldr q17, [x23, w9, sxtw] // encoding: [0xf1,0xca,0xe9,0x3c]
+// CHECK: str q18, [x22, w10, sxtw] // encoding: [0xd2,0xca,0xaa,0x3c]
+// CHECK: ldr q19, [x21, wzr, sxtw #4] // encoding: [0xb3,0xda,0xff,0x3c]
+
+//------------------------------------------------------------------------------
+// Load/store register (immediate post-indexed)
+//------------------------------------------------------------------------------
+
+ strb w9, [x2], #255
+ strb w10, [x3], #1
+ strb w10, [x3], #-256
+ strh w9, [x2], #255
+ strh w9, [x2], #1
+ strh w10, [x3], #-256
+// CHECK: strb w9, [x2], #255 // encoding: [0x49,0xf4,0x0f,0x38]
+// CHECK: strb w10, [x3], #1 // encoding: [0x6a,0x14,0x00,0x38]
+// CHECK: strb w10, [x3], #-256 // encoding: [0x6a,0x04,0x10,0x38]
+// CHECK: strh w9, [x2], #255 // encoding: [0x49,0xf4,0x0f,0x78]
+// CHECK: strh w9, [x2], #1 // encoding: [0x49,0x14,0x00,0x78]
+// CHECK: strh w10, [x3], #-256 // encoding: [0x6a,0x04,0x10,0x78]
+
+ str w19, [sp], #255
+ str w20, [x30], #1
+ str w21, [x12], #-256
+ str xzr, [x9], #255
+ str x2, [x3], #1
+ str x19, [x12], #-256
+// CHECK: str w19, [sp], #255 // encoding: [0xf3,0xf7,0x0f,0xb8]
+// CHECK: str w20, [x30], #1 // encoding: [0xd4,0x17,0x00,0xb8]
+// CHECK: str w21, [x12], #-256 // encoding: [0x95,0x05,0x10,0xb8]
+// CHECK: str xzr, [x9], #255 // encoding: [0x3f,0xf5,0x0f,0xf8]
+// CHECK: str x2, [x3], #1 // encoding: [0x62,0x14,0x00,0xf8]
+// CHECK: str x19, [x12], #-256 // encoding: [0x93,0x05,0x10,0xf8]
+
+ ldrb w9, [x2], #255
+ ldrb w10, [x3], #1
+ ldrb w10, [x3], #-256
+ ldrh w9, [x2], #255
+ ldrh w9, [x2], #1
+ ldrh w10, [x3], #-256
+// CHECK: ldrb w9, [x2], #255 // encoding: [0x49,0xf4,0x4f,0x38]
+// CHECK: ldrb w10, [x3], #1 // encoding: [0x6a,0x14,0x40,0x38]
+// CHECK: ldrb w10, [x3], #-256 // encoding: [0x6a,0x04,0x50,0x38]
+// CHECK: ldrh w9, [x2], #255 // encoding: [0x49,0xf4,0x4f,0x78]
+// CHECK: ldrh w9, [x2], #1 // encoding: [0x49,0x14,0x40,0x78]
+// CHECK: ldrh w10, [x3], #-256 // encoding: [0x6a,0x04,0x50,0x78]
+
+ ldr w19, [sp], #255
+ ldr w20, [x30], #1
+ ldr w21, [x12], #-256
+ ldr xzr, [x9], #255
+ ldr x2, [x3], #1
+ ldr x19, [x12], #-256
+// CHECK: ldr w19, [sp], #255 // encoding: [0xf3,0xf7,0x4f,0xb8]
+// CHECK: ldr w20, [x30], #1 // encoding: [0xd4,0x17,0x40,0xb8]
+// CHECK: ldr w21, [x12], #-256 // encoding: [0x95,0x05,0x50,0xb8]
+// CHECK: ldr xzr, [x9], #255 // encoding: [0x3f,0xf5,0x4f,0xf8]
+// CHECK: ldr x2, [x3], #1 // encoding: [0x62,0x14,0x40,0xf8]
+// CHECK: ldr x19, [x12], #-256 // encoding: [0x93,0x05,0x50,0xf8]
+
+ ldrsb xzr, [x9], #255
+ ldrsb x2, [x3], #1
+ ldrsb x19, [x12], #-256
+ ldrsh xzr, [x9], #255
+ ldrsh x2, [x3], #1
+ ldrsh x19, [x12], #-256
+ ldrsw xzr, [x9], #255
+ ldrsw x2, [x3], #1
+ ldrsw x19, [x12], #-256
+// CHECK: ldrsb xzr, [x9], #255 // encoding: [0x3f,0xf5,0x8f,0x38]
+// CHECK: ldrsb x2, [x3], #1 // encoding: [0x62,0x14,0x80,0x38]
+// CHECK: ldrsb x19, [x12], #-256 // encoding: [0x93,0x05,0x90,0x38]
+// CHECK: ldrsh xzr, [x9], #255 // encoding: [0x3f,0xf5,0x8f,0x78]
+// CHECK: ldrsh x2, [x3], #1 // encoding: [0x62,0x14,0x80,0x78]
+// CHECK: ldrsh x19, [x12], #-256 // encoding: [0x93,0x05,0x90,0x78]
+// CHECK: ldrsw xzr, [x9], #255 // encoding: [0x3f,0xf5,0x8f,0xb8]
+// CHECK: ldrsw x2, [x3], #1 // encoding: [0x62,0x14,0x80,0xb8]
+// CHECK: ldrsw x19, [x12], #-256 // encoding: [0x93,0x05,0x90,0xb8]
+
+ ldrsb wzr, [x9], #255
+ ldrsb w2, [x3], #1
+ ldrsb w19, [x12], #-256
+ ldrsh wzr, [x9], #255
+ ldrsh w2, [x3], #1
+ ldrsh w19, [x12], #-256
+// CHECK: ldrsb wzr, [x9], #255 // encoding: [0x3f,0xf5,0xcf,0x38]
+// CHECK: ldrsb w2, [x3], #1 // encoding: [0x62,0x14,0xc0,0x38]
+// CHECK: ldrsb w19, [x12], #-256 // encoding: [0x93,0x05,0xd0,0x38]
+// CHECK: ldrsh wzr, [x9], #255 // encoding: [0x3f,0xf5,0xcf,0x78]
+// CHECK: ldrsh w2, [x3], #1 // encoding: [0x62,0x14,0xc0,0x78]
+// CHECK: ldrsh w19, [x12], #-256 // encoding: [0x93,0x05,0xd0,0x78]
+
+ str b0, [x0], #255
+ str b3, [x3], #1
+ str b5, [sp], #-256
+ str h10, [x10], #255
+ str h13, [x23], #1
+ str h15, [sp], #-256
+ str s20, [x20], #255
+ str s23, [x23], #1
+ str s25, [x0], #-256
+ str d20, [x20], #255
+ str d23, [x23], #1
+ str d25, [x0], #-256
+// CHECK: str b0, [x0], #255 // encoding: [0x00,0xf4,0x0f,0x3c]
+// CHECK: str b3, [x3], #1 // encoding: [0x63,0x14,0x00,0x3c]
+// CHECK: str b5, [sp], #-256 // encoding: [0xe5,0x07,0x10,0x3c]
+// CHECK: str h10, [x10], #255 // encoding: [0x4a,0xf5,0x0f,0x7c]
+// CHECK: str h13, [x23], #1 // encoding: [0xed,0x16,0x00,0x7c]
+// CHECK: str h15, [sp], #-256 // encoding: [0xef,0x07,0x10,0x7c]
+// CHECK: str s20, [x20], #255 // encoding: [0x94,0xf6,0x0f,0xbc]
+// CHECK: str s23, [x23], #1 // encoding: [0xf7,0x16,0x00,0xbc]
+// CHECK: str s25, [x0], #-256 // encoding: [0x19,0x04,0x10,0xbc]
+// CHECK: str d20, [x20], #255 // encoding: [0x94,0xf6,0x0f,0xfc]
+// CHECK: str d23, [x23], #1 // encoding: [0xf7,0x16,0x00,0xfc]
+// CHECK: str d25, [x0], #-256 // encoding: [0x19,0x04,0x10,0xfc]
+
+ ldr b0, [x0], #255
+ ldr b3, [x3], #1
+ ldr b5, [sp], #-256
+ ldr h10, [x10], #255
+ ldr h13, [x23], #1
+ ldr h15, [sp], #-256
+ ldr s20, [x20], #255
+ ldr s23, [x23], #1
+ ldr s25, [x0], #-256
+ ldr d20, [x20], #255
+ ldr d23, [x23], #1
+ ldr d25, [x0], #-256
+// CHECK: ldr b0, [x0], #255 // encoding: [0x00,0xf4,0x4f,0x3c]
+// CHECK: ldr b3, [x3], #1 // encoding: [0x63,0x14,0x40,0x3c]
+// CHECK: ldr b5, [sp], #-256 // encoding: [0xe5,0x07,0x50,0x3c]
+// CHECK: ldr h10, [x10], #255 // encoding: [0x4a,0xf5,0x4f,0x7c]
+// CHECK: ldr h13, [x23], #1 // encoding: [0xed,0x16,0x40,0x7c]
+// CHECK: ldr h15, [sp], #-256 // encoding: [0xef,0x07,0x50,0x7c]
+// CHECK: ldr s20, [x20], #255 // encoding: [0x94,0xf6,0x4f,0xbc]
+// CHECK: ldr s23, [x23], #1 // encoding: [0xf7,0x16,0x40,0xbc]
+// CHECK: ldr s25, [x0], #-256 // encoding: [0x19,0x04,0x50,0xbc]
+// CHECK: ldr d20, [x20], #255 // encoding: [0x94,0xf6,0x4f,0xfc]
+// CHECK: ldr d23, [x23], #1 // encoding: [0xf7,0x16,0x40,0xfc]
+// CHECK: ldr d25, [x0], #-256 // encoding: [0x19,0x04,0x50,0xfc]
+
+ ldr q20, [x1], #255
+ ldr q23, [x9], #1
+ ldr q25, [x20], #-256
+ str q10, [x1], #255
+ str q22, [sp], #1
+ str q21, [x20], #-256
+// CHECK: ldr q20, [x1], #255 // encoding: [0x34,0xf4,0xcf,0x3c]
+// CHECK: ldr q23, [x9], #1 // encoding: [0x37,0x15,0xc0,0x3c]
+// CHECK: ldr q25, [x20], #-256 // encoding: [0x99,0x06,0xd0,0x3c]
+// CHECK: str q10, [x1], #255 // encoding: [0x2a,0xf4,0x8f,0x3c]
+// CHECK: str q22, [sp], #1 // encoding: [0xf6,0x17,0x80,0x3c]
+// CHECK: str q21, [x20], #-256 // encoding: [0x95,0x06,0x90,0x3c]
+
+//------------------------------------------------------------------------------
+// Load/store register (immediate pre-indexed)
+//------------------------------------------------------------------------------
+
+ ldr x3, [x4, #0]!
+ ldr xzr, [sp, #0]!
+// CHECK: ldr x3, [x4, #0]! // encoding: [0x83,0x0c,0x40,0xf8]
+// CHECK: ldr xzr, [sp, #0]! // encoding: [0xff,0x0f,0x40,0xf8]
+
+ strb w9, [x2, #255]!
+ strb w10, [x3, #1]!
+ strb w10, [x3, #-256]!
+ strh w9, [x2, #255]!
+ strh w9, [x2, #1]!
+ strh w10, [x3, #-256]!
+// CHECK: strb w9, [x2, #255]! // encoding: [0x49,0xfc,0x0f,0x38]
+// CHECK: strb w10, [x3, #1]! // encoding: [0x6a,0x1c,0x00,0x38]
+// CHECK: strb w10, [x3, #-256]! // encoding: [0x6a,0x0c,0x10,0x38]
+// CHECK: strh w9, [x2, #255]! // encoding: [0x49,0xfc,0x0f,0x78]
+// CHECK: strh w9, [x2, #1]! // encoding: [0x49,0x1c,0x00,0x78]
+// CHECK: strh w10, [x3, #-256]! // encoding: [0x6a,0x0c,0x10,0x78]
+
+ str w19, [sp, #255]!
+ str w20, [x30, #1]!
+ str w21, [x12, #-256]!
+ str xzr, [x9, #255]!
+ str x2, [x3, #1]!
+ str x19, [x12, #-256]!
+// CHECK: str w19, [sp, #255]! // encoding: [0xf3,0xff,0x0f,0xb8]
+// CHECK: str w20, [x30, #1]! // encoding: [0xd4,0x1f,0x00,0xb8]
+// CHECK: str w21, [x12, #-256]! // encoding: [0x95,0x0d,0x10,0xb8]
+// CHECK: str xzr, [x9, #255]! // encoding: [0x3f,0xfd,0x0f,0xf8]
+// CHECK: str x2, [x3, #1]! // encoding: [0x62,0x1c,0x00,0xf8]
+// CHECK: str x19, [x12, #-256]! // encoding: [0x93,0x0d,0x10,0xf8]
+
+ ldrb w9, [x2, #255]!
+ ldrb w10, [x3, #1]!
+ ldrb w10, [x3, #-256]!
+ ldrh w9, [x2, #255]!
+ ldrh w9, [x2, #1]!
+ ldrh w10, [x3, #-256]!
+// CHECK: ldrb w9, [x2, #255]! // encoding: [0x49,0xfc,0x4f,0x38]
+// CHECK: ldrb w10, [x3, #1]! // encoding: [0x6a,0x1c,0x40,0x38]
+// CHECK: ldrb w10, [x3, #-256]! // encoding: [0x6a,0x0c,0x50,0x38]
+// CHECK: ldrh w9, [x2, #255]! // encoding: [0x49,0xfc,0x4f,0x78]
+// CHECK: ldrh w9, [x2, #1]! // encoding: [0x49,0x1c,0x40,0x78]
+// CHECK: ldrh w10, [x3, #-256]! // encoding: [0x6a,0x0c,0x50,0x78]
+
+ ldr w19, [sp, #255]!
+ ldr w20, [x30, #1]!
+ ldr w21, [x12, #-256]!
+ ldr xzr, [x9, #255]!
+ ldr x2, [x3, #1]!
+ ldr x19, [x12, #-256]!
+// CHECK: ldr w19, [sp, #255]! // encoding: [0xf3,0xff,0x4f,0xb8]
+// CHECK: ldr w20, [x30, #1]! // encoding: [0xd4,0x1f,0x40,0xb8]
+// CHECK: ldr w21, [x12, #-256]! // encoding: [0x95,0x0d,0x50,0xb8]
+// CHECK: ldr xzr, [x9, #255]! // encoding: [0x3f,0xfd,0x4f,0xf8]
+// CHECK: ldr x2, [x3, #1]! // encoding: [0x62,0x1c,0x40,0xf8]
+// CHECK: ldr x19, [x12, #-256]! // encoding: [0x93,0x0d,0x50,0xf8]
+
+ ldrsb xzr, [x9, #255]!
+ ldrsb x2, [x3, #1]!
+ ldrsb x19, [x12, #-256]!
+ ldrsh xzr, [x9, #255]!
+ ldrsh x2, [x3, #1]!
+ ldrsh x19, [x12, #-256]!
+ ldrsw xzr, [x9, #255]!
+ ldrsw x2, [x3, #1]!
+ ldrsw x19, [x12, #-256]!
+// CHECK: ldrsb xzr, [x9, #255]! // encoding: [0x3f,0xfd,0x8f,0x38]
+// CHECK: ldrsb x2, [x3, #1]! // encoding: [0x62,0x1c,0x80,0x38]
+// CHECK: ldrsb x19, [x12, #-256]! // encoding: [0x93,0x0d,0x90,0x38]
+// CHECK: ldrsh xzr, [x9, #255]! // encoding: [0x3f,0xfd,0x8f,0x78]
+// CHECK: ldrsh x2, [x3, #1]! // encoding: [0x62,0x1c,0x80,0x78]
+// CHECK: ldrsh x19, [x12, #-256]! // encoding: [0x93,0x0d,0x90,0x78]
+// CHECK: ldrsw xzr, [x9, #255]! // encoding: [0x3f,0xfd,0x8f,0xb8]
+// CHECK: ldrsw x2, [x3, #1]! // encoding: [0x62,0x1c,0x80,0xb8]
+// CHECK: ldrsw x19, [x12, #-256]! // encoding: [0x93,0x0d,0x90,0xb8]
+
+ ldrsb wzr, [x9, #255]!
+ ldrsb w2, [x3, #1]!
+ ldrsb w19, [x12, #-256]!
+ ldrsh wzr, [x9, #255]!
+ ldrsh w2, [x3, #1]!
+ ldrsh w19, [x12, #-256]!
+// CHECK: ldrsb wzr, [x9, #255]! // encoding: [0x3f,0xfd,0xcf,0x38]
+// CHECK: ldrsb w2, [x3, #1]! // encoding: [0x62,0x1c,0xc0,0x38]
+// CHECK: ldrsb w19, [x12, #-256]! // encoding: [0x93,0x0d,0xd0,0x38]
+// CHECK: ldrsh wzr, [x9, #255]! // encoding: [0x3f,0xfd,0xcf,0x78]
+// CHECK: ldrsh w2, [x3, #1]! // encoding: [0x62,0x1c,0xc0,0x78]
+// CHECK: ldrsh w19, [x12, #-256]! // encoding: [0x93,0x0d,0xd0,0x78]
+
+ str b0, [x0, #255]!
+ str b3, [x3, #1]!
+ str b5, [sp, #-256]!
+ str h10, [x10, #255]!
+ str h13, [x23, #1]!
+ str h15, [sp, #-256]!
+ str s20, [x20, #255]!
+ str s23, [x23, #1]!
+ str s25, [x0, #-256]!
+ str d20, [x20, #255]!
+ str d23, [x23, #1]!
+ str d25, [x0, #-256]!
+// CHECK: str b0, [x0, #255]! // encoding: [0x00,0xfc,0x0f,0x3c]
+// CHECK: str b3, [x3, #1]! // encoding: [0x63,0x1c,0x00,0x3c]
+// CHECK: str b5, [sp, #-256]! // encoding: [0xe5,0x0f,0x10,0x3c]
+// CHECK: str h10, [x10, #255]! // encoding: [0x4a,0xfd,0x0f,0x7c]
+// CHECK: str h13, [x23, #1]! // encoding: [0xed,0x1e,0x00,0x7c]
+// CHECK: str h15, [sp, #-256]! // encoding: [0xef,0x0f,0x10,0x7c]
+// CHECK: str s20, [x20, #255]! // encoding: [0x94,0xfe,0x0f,0xbc]
+// CHECK: str s23, [x23, #1]! // encoding: [0xf7,0x1e,0x00,0xbc]
+// CHECK: str s25, [x0, #-256]! // encoding: [0x19,0x0c,0x10,0xbc]
+// CHECK: str d20, [x20, #255]! // encoding: [0x94,0xfe,0x0f,0xfc]
+// CHECK: str d23, [x23, #1]! // encoding: [0xf7,0x1e,0x00,0xfc]
+// CHECK: str d25, [x0, #-256]! // encoding: [0x19,0x0c,0x10,0xfc]
+
+ ldr b0, [x0, #255]!
+ ldr b3, [x3, #1]!
+ ldr b5, [sp, #-256]!
+ ldr h10, [x10, #255]!
+ ldr h13, [x23, #1]!
+ ldr h15, [sp, #-256]!
+ ldr s20, [x20, #255]!
+ ldr s23, [x23, #1]!
+ ldr s25, [x0, #-256]!
+ ldr d20, [x20, #255]!
+ ldr d23, [x23, #1]!
+ ldr d25, [x0, #-256]!
+// CHECK: ldr b0, [x0, #255]! // encoding: [0x00,0xfc,0x4f,0x3c]
+// CHECK: ldr b3, [x3, #1]! // encoding: [0x63,0x1c,0x40,0x3c]
+// CHECK: ldr b5, [sp, #-256]! // encoding: [0xe5,0x0f,0x50,0x3c]
+// CHECK: ldr h10, [x10, #255]! // encoding: [0x4a,0xfd,0x4f,0x7c]
+// CHECK: ldr h13, [x23, #1]! // encoding: [0xed,0x1e,0x40,0x7c]
+// CHECK: ldr h15, [sp, #-256]! // encoding: [0xef,0x0f,0x50,0x7c]
+// CHECK: ldr s20, [x20, #255]! // encoding: [0x94,0xfe,0x4f,0xbc]
+// CHECK: ldr s23, [x23, #1]! // encoding: [0xf7,0x1e,0x40,0xbc]
+// CHECK: ldr s25, [x0, #-256]! // encoding: [0x19,0x0c,0x50,0xbc]
+// CHECK: ldr d20, [x20, #255]! // encoding: [0x94,0xfe,0x4f,0xfc]
+// CHECK: ldr d23, [x23, #1]! // encoding: [0xf7,0x1e,0x40,0xfc]
+// CHECK: ldr d25, [x0, #-256]! // encoding: [0x19,0x0c,0x50,0xfc]
+
+ ldr q20, [x1, #255]!
+ ldr q23, [x9, #1]!
+ ldr q25, [x20, #-256]!
+ str q10, [x1, #255]!
+ str q22, [sp, #1]!
+ str q21, [x20, #-256]!
+// CHECK: ldr q20, [x1, #255]! // encoding: [0x34,0xfc,0xcf,0x3c]
+// CHECK: ldr q23, [x9, #1]! // encoding: [0x37,0x1d,0xc0,0x3c]
+// CHECK: ldr q25, [x20, #-256]! // encoding: [0x99,0x0e,0xd0,0x3c]
+// CHECK: str q10, [x1, #255]! // encoding: [0x2a,0xfc,0x8f,0x3c]
+// CHECK: str q22, [sp, #1]! // encoding: [0xf6,0x1f,0x80,0x3c]
+// CHECK: str q21, [x20, #-256]! // encoding: [0x95,0x0e,0x90,0x3c]
+
+//------------------------------------------------------------------------------
+// Load/store (unprivileged)
+//------------------------------------------------------------------------------
+
+ sttrb w9, [sp, #0]
+ sttrh wzr, [x12, #255]
+ sttr w16, [x0, #-256]
+ sttr x28, [x14, #1]
+// CHECK: sttrb w9, [sp] // encoding: [0xe9,0x0b,0x00,0x38]
+// CHECK: sttrh wzr, [x12, #255] // encoding: [0x9f,0xf9,0x0f,0x78]
+// CHECK: sttr w16, [x0, #-256] // encoding: [0x10,0x08,0x10,0xb8]
+// CHECK: sttr x28, [x14, #1] // encoding: [0xdc,0x19,0x00,0xf8]
+
+ ldtrb w1, [x20, #255]
+ ldtrh w20, [x1, #255]
+ ldtr w12, [sp, #255]
+ ldtr xzr, [x12, #255]
+// CHECK: ldtrb w1, [x20, #255] // encoding: [0x81,0xfa,0x4f,0x38]
+// CHECK: ldtrh w20, [x1, #255] // encoding: [0x34,0xf8,0x4f,0x78]
+// CHECK: ldtr w12, [sp, #255] // encoding: [0xec,0xfb,0x4f,0xb8]
+// CHECK: ldtr xzr, [x12, #255] // encoding: [0x9f,0xf9,0x4f,0xf8]
+
+ ldtrsb x9, [x7, #-256]
+ ldtrsh x17, [x19, #-256]
+ ldtrsw x20, [x15, #-256]
+ ldtrsb w19, [x1, #-256]
+ ldtrsh w15, [x21, #-256]
+// CHECK: ldtrsb x9, [x7, #-256] // encoding: [0xe9,0x08,0x90,0x38]
+// CHECK: ldtrsh x17, [x19, #-256] // encoding: [0x71,0x0a,0x90,0x78]
+// CHECK: ldtrsw x20, [x15, #-256] // encoding: [0xf4,0x09,0x90,0xb8]
+// CHECK: ldtrsb w19, [x1, #-256] // encoding: [0x33,0x08,0xd0,0x38]
+// CHECK: ldtrsh w15, [x21, #-256] // encoding: [0xaf,0x0a,0xd0,0x78]
+
+//------------------------------------------------------------------------------
+// Load/store register pair (offset)
+//------------------------------------------------------------------------------
+
+ ldp w3, w5, [sp]
+ stp wzr, w9, [sp, #252]
+ ldp w2, wzr, [sp, #-256]
+ ldp w9, w10, [sp, #4]
+// CHECK: ldp w3, w5, [sp] // encoding: [0xe3,0x17,0x40,0x29]
+// CHECK: stp wzr, w9, [sp, #252] // encoding: [0xff,0xa7,0x1f,0x29]
+// CHECK: ldp w2, wzr, [sp, #-256] // encoding: [0xe2,0x7f,0x60,0x29]
+// CHECK: ldp w9, w10, [sp, #4] // encoding: [0xe9,0xab,0x40,0x29]
+
+ ldpsw x9, x10, [sp, #4]
+ ldpsw x9, x10, [x2, #-256]
+ ldpsw x20, x30, [sp, #252]
+// CHECK: ldpsw x9, x10, [sp, #4] // encoding: [0xe9,0xab,0x40,0x69]
+// CHECK: ldpsw x9, x10, [x2, #-256] // encoding: [0x49,0x28,0x60,0x69]
+// CHECK: ldpsw x20, x30, [sp, #252] // encoding: [0xf4,0xfb,0x5f,0x69]
+
+ ldp x21, x29, [x2, #504]
+ ldp x22, x23, [x3, #-512]
+ ldp x24, x25, [x4, #8]
+// CHECK: ldp x21, x29, [x2, #504] // encoding: [0x55,0xf4,0x5f,0xa9]
+// CHECK: ldp x22, x23, [x3, #-512] // encoding: [0x76,0x5c,0x60,0xa9]
+// CHECK: ldp x24, x25, [x4, #8] // encoding: [0x98,0xe4,0x40,0xa9]
+
+ ldp s29, s28, [sp, #252]
+ stp s27, s26, [sp, #-256]
+ ldp s1, s2, [x3, #44]
+// CHECK: ldp s29, s28, [sp, #252] // encoding: [0xfd,0xf3,0x5f,0x2d]
+// CHECK: stp s27, s26, [sp, #-256] // encoding: [0xfb,0x6b,0x20,0x2d]
+// CHECK: ldp s1, s2, [x3, #44] // encoding: [0x61,0x88,0x45,0x2d]
+
+ stp d3, d5, [x9, #504]
+ stp d7, d11, [x10, #-512]
+ ldp d2, d3, [x30, #-8]
+// CHECK: stp d3, d5, [x9, #504] // encoding: [0x23,0x95,0x1f,0x6d]
+// CHECK: stp d7, d11, [x10, #-512] // encoding: [0x47,0x2d,0x20,0x6d]
+// CHECK: ldp d2, d3, [x30, #-8] // encoding: [0xc2,0x8f,0x7f,0x6d]
+
+ stp q3, q5, [sp]
+ stp q17, q19, [sp, #1008]
+ ldp q23, q29, [x1, #-1024]
+// CHECK: stp q3, q5, [sp] // encoding: [0xe3,0x17,0x00,0xad]
+// CHECK: stp q17, q19, [sp, #1008] // encoding: [0xf1,0xcf,0x1f,0xad]
+// CHECK: ldp q23, q29, [x1, #-1024] // encoding: [0x37,0x74,0x60,0xad]
+
+//------------------------------------------------------------------------------
+// Load/store register pair (post-indexed)
+//------------------------------------------------------------------------------
+
+ ldp w3, w5, [sp], #0
+ stp wzr, w9, [sp], #252
+ ldp w2, wzr, [sp], #-256
+ ldp w9, w10, [sp], #4
+// CHECK: ldp w3, w5, [sp], #0 // encoding: [0xe3,0x17,0xc0,0x28]
+// CHECK: stp wzr, w9, [sp], #252 // encoding: [0xff,0xa7,0x9f,0x28]
+// CHECK: ldp w2, wzr, [sp], #-256 // encoding: [0xe2,0x7f,0xe0,0x28]
+// CHECK: ldp w9, w10, [sp], #4 // encoding: [0xe9,0xab,0xc0,0x28]
+
+ ldpsw x9, x10, [sp], #4
+ ldpsw x9, x10, [x2], #-256
+ ldpsw x20, x30, [sp], #252
+// CHECK: ldpsw x9, x10, [sp], #4 // encoding: [0xe9,0xab,0xc0,0x68]
+// CHECK: ldpsw x9, x10, [x2], #-256 // encoding: [0x49,0x28,0xe0,0x68]
+// CHECK: ldpsw x20, x30, [sp], #252 // encoding: [0xf4,0xfb,0xdf,0x68]
+
+ ldp x21, x29, [x2], #504
+ ldp x22, x23, [x3], #-512
+ ldp x24, x25, [x4], #8
+// CHECK: ldp x21, x29, [x2], #504 // encoding: [0x55,0xf4,0xdf,0xa8]
+// CHECK: ldp x22, x23, [x3], #-512 // encoding: [0x76,0x5c,0xe0,0xa8]
+// CHECK: ldp x24, x25, [x4], #8 // encoding: [0x98,0xe4,0xc0,0xa8]
+
+ ldp s29, s28, [sp], #252
+ stp s27, s26, [sp], #-256
+ ldp s1, s2, [x3], #44
+// CHECK: ldp s29, s28, [sp], #252 // encoding: [0xfd,0xf3,0xdf,0x2c]
+// CHECK: stp s27, s26, [sp], #-256 // encoding: [0xfb,0x6b,0xa0,0x2c]
+// CHECK: ldp s1, s2, [x3], #44 // encoding: [0x61,0x88,0xc5,0x2c]
+
+ stp d3, d5, [x9], #504
+ stp d7, d11, [x10], #-512
+ ldp d2, d3, [x30], #-8
+// CHECK: stp d3, d5, [x9], #504 // encoding: [0x23,0x95,0x9f,0x6c]
+// CHECK: stp d7, d11, [x10], #-512 // encoding: [0x47,0x2d,0xa0,0x6c]
+// CHECK: ldp d2, d3, [x30], #-8 // encoding: [0xc2,0x8f,0xff,0x6c]
+
+ stp q3, q5, [sp], #0
+ stp q17, q19, [sp], #1008
+ ldp q23, q29, [x1], #-1024
+// CHECK: stp q3, q5, [sp], #0 // encoding: [0xe3,0x17,0x80,0xac]
+// CHECK: stp q17, q19, [sp], #1008 // encoding: [0xf1,0xcf,0x9f,0xac]
+// CHECK: ldp q23, q29, [x1], #-1024 // encoding: [0x37,0x74,0xe0,0xac]
+
+//------------------------------------------------------------------------------
+// Load/store register pair (pre-indexed)
+//------------------------------------------------------------------------------
+ ldp w3, w5, [sp, #0]!
+ stp wzr, w9, [sp, #252]!
+ ldp w2, wzr, [sp, #-256]!
+ ldp w9, w10, [sp, #4]!
+// CHECK: ldp w3, w5, [sp, #0]! // encoding: [0xe3,0x17,0xc0,0x29]
+// CHECK: stp wzr, w9, [sp, #252]! // encoding: [0xff,0xa7,0x9f,0x29]
+// CHECK: ldp w2, wzr, [sp, #-256]! // encoding: [0xe2,0x7f,0xe0,0x29]
+// CHECK: ldp w9, w10, [sp, #4]! // encoding: [0xe9,0xab,0xc0,0x29]
+
+ ldpsw x9, x10, [sp, #4]!
+ ldpsw x9, x10, [x2, #-256]!
+ ldpsw x20, x30, [sp, #252]!
+// CHECK: ldpsw x9, x10, [sp, #4]! // encoding: [0xe9,0xab,0xc0,0x69]
+// CHECK: ldpsw x9, x10, [x2, #-256]! // encoding: [0x49,0x28,0xe0,0x69]
+// CHECK: ldpsw x20, x30, [sp, #252]! // encoding: [0xf4,0xfb,0xdf,0x69]
+
+ ldp x21, x29, [x2, #504]!
+ ldp x22, x23, [x3, #-512]!
+ ldp x24, x25, [x4, #8]!
+// CHECK: ldp x21, x29, [x2, #504]! // encoding: [0x55,0xf4,0xdf,0xa9]
+// CHECK: ldp x22, x23, [x3, #-512]! // encoding: [0x76,0x5c,0xe0,0xa9]
+// CHECK: ldp x24, x25, [x4, #8]! // encoding: [0x98,0xe4,0xc0,0xa9]
+
+ ldp s29, s28, [sp, #252]!
+ stp s27, s26, [sp, #-256]!
+ ldp s1, s2, [x3, #44]!
+// CHECK: ldp s29, s28, [sp, #252]! // encoding: [0xfd,0xf3,0xdf,0x2d]
+// CHECK: stp s27, s26, [sp, #-256]! // encoding: [0xfb,0x6b,0xa0,0x2d]
+// CHECK: ldp s1, s2, [x3, #44]! // encoding: [0x61,0x88,0xc5,0x2d]
+
+ stp d3, d5, [x9, #504]!
+ stp d7, d11, [x10, #-512]!
+ ldp d2, d3, [x30, #-8]!
+// CHECK: stp d3, d5, [x9, #504]! // encoding: [0x23,0x95,0x9f,0x6d]
+// CHECK: stp d7, d11, [x10, #-512]! // encoding: [0x47,0x2d,0xa0,0x6d]
+// CHECK: ldp d2, d3, [x30, #-8]! // encoding: [0xc2,0x8f,0xff,0x6d]
+
+ stp q3, q5, [sp, #0]!
+ stp q17, q19, [sp, #1008]!
+ ldp q23, q29, [x1, #-1024]!
+// CHECK: stp q3, q5, [sp, #0]! // encoding: [0xe3,0x17,0x80,0xad]
+// CHECK: stp q17, q19, [sp, #1008]! // encoding: [0xf1,0xcf,0x9f,0xad]
+// CHECK: ldp q23, q29, [x1, #-1024]! // encoding: [0x37,0x74,0xe0,0xad]
+
+//------------------------------------------------------------------------------
+// Load/store non-temporal register pair (offset)
+//------------------------------------------------------------------------------
+
+ ldnp w3, w5, [sp]
+ stnp wzr, w9, [sp, #252]
+ ldnp w2, wzr, [sp, #-256]
+ ldnp w9, w10, [sp, #4]
+// CHECK: ldnp w3, w5, [sp] // encoding: [0xe3,0x17,0x40,0x28]
+// CHECK: stnp wzr, w9, [sp, #252] // encoding: [0xff,0xa7,0x1f,0x28]
+// CHECK: ldnp w2, wzr, [sp, #-256] // encoding: [0xe2,0x7f,0x60,0x28]
+// CHECK: ldnp w9, w10, [sp, #4] // encoding: [0xe9,0xab,0x40,0x28]
+
+ ldnp x21, x29, [x2, #504]
+ ldnp x22, x23, [x3, #-512]
+ ldnp x24, x25, [x4, #8]
+// CHECK: ldnp x21, x29, [x2, #504] // encoding: [0x55,0xf4,0x5f,0xa8]
+// CHECK: ldnp x22, x23, [x3, #-512] // encoding: [0x76,0x5c,0x60,0xa8]
+// CHECK: ldnp x24, x25, [x4, #8] // encoding: [0x98,0xe4,0x40,0xa8]
+
+ ldnp s29, s28, [sp, #252]
+ stnp s27, s26, [sp, #-256]
+ ldnp s1, s2, [x3, #44]
+// CHECK: ldnp s29, s28, [sp, #252] // encoding: [0xfd,0xf3,0x5f,0x2c]
+// CHECK: stnp s27, s26, [sp, #-256] // encoding: [0xfb,0x6b,0x20,0x2c]
+// CHECK: ldnp s1, s2, [x3, #44] // encoding: [0x61,0x88,0x45,0x2c]
+
+ stnp d3, d5, [x9, #504]
+ stnp d7, d11, [x10, #-512]
+ ldnp d2, d3, [x30, #-8]
+// CHECK: stnp d3, d5, [x9, #504] // encoding: [0x23,0x95,0x1f,0x6c]
+// CHECK: stnp d7, d11, [x10, #-512] // encoding: [0x47,0x2d,0x20,0x6c]
+// CHECK: ldnp d2, d3, [x30, #-8] // encoding: [0xc2,0x8f,0x7f,0x6c]
+
+ stnp q3, q5, [sp]
+ stnp q17, q19, [sp, #1008]
+ ldnp q23, q29, [x1, #-1024]
+// CHECK: stnp q3, q5, [sp] // encoding: [0xe3,0x17,0x00,0xac]
+// CHECK: stnp q17, q19, [sp, #1008] // encoding: [0xf1,0xcf,0x1f,0xac]
+// CHECK: ldnp q23, q29, [x1, #-1024] // encoding: [0x37,0x74,0x60,0xac]
+
+//------------------------------------------------------------------------------
+// Logical (immediate)
+//------------------------------------------------------------------------------
+ // 32 bit replication-width
+ orr w3, w9, #0xffff0000
+ orr wsp, w10, #0xe00000ff
+ orr w9, w10, #0x000003ff
+// CHECK: orr w3, w9, #0xffff0000 // encoding: [0x23,0x3d,0x10,0x32]
+// CHECK: orr wsp, w10, #0xe00000ff // encoding: [0x5f,0x29,0x03,0x32]
+// CHECK: orr w9, w10, #0x3ff // encoding: [0x49,0x25,0x00,0x32]
+
+ // 16 bit replication width
+ and w14, w15, #0x80008000
+ and w12, w13, #0xffc3ffc3
+ and w11, wzr, #0x00030003
+// CHECK: and w14, w15, #0x80008000 // encoding: [0xee,0x81,0x01,0x12]
+// CHECK: and w12, w13, #0xffc3ffc3 // encoding: [0xac,0xad,0x0a,0x12]
+// CHECK: and w11, wzr, #0x30003 // encoding: [0xeb,0x87,0x00,0x12]
+
+ // 8 bit replication width
+ eor w3, w6, #0xe0e0e0e0
+ eor wsp, wzr, #0x03030303
+ eor w16, w17, #0x81818181
+// CHECK: eor w3, w6, #0xe0e0e0e0 // encoding: [0xc3,0xc8,0x03,0x52]
+// CHECK: eor wsp, wzr, #0x3030303 // encoding: [0xff,0xc7,0x00,0x52]
+// CHECK: eor w16, w17, #0x81818181 // encoding: [0x30,0xc6,0x01,0x52]
+
+ // 4 bit replication width
+ ands wzr, w18, #0xcccccccc
+ ands w19, w20, #0x33333333
+ ands w21, w22, #0x99999999
+// CHECK: ands wzr, w18, #0xcccccccc // encoding: [0x5f,0xe6,0x02,0x72]
+// CHECK: ands w19, w20, #0x33333333 // encoding: [0x93,0xe6,0x00,0x72]
+// CHECK: ands w21, w22, #0x99999999 // encoding: [0xd5,0xe6,0x01,0x72]
+
+ // 2 bit replication width
+ tst w3, #0xaaaaaaaa
+ tst wzr, #0x55555555
+// CHECK: ands wzr, w3, #0xaaaaaaaa // encoding: [0x7f,0xf0,0x01,0x72]
+// CHECK: ands wzr, wzr, #0x55555555 // encoding: [0xff,0xf3,0x00,0x72]
+
+ // 64 bit replication-width
+ eor x3, x5, #0xffffffffc000000
+ and x9, x10, #0x00007fffffffffff
+ orr x11, x12, #0x8000000000000fff
+// CHECK: eor x3, x5, #0xffffffffc000000 // encoding: [0xa3,0x84,0x66,0xd2]
+// CHECK: and x9, x10, #0x7fffffffffff // encoding: [0x49,0xb9,0x40,0x92]
+// CHECK: orr x11, x12, #0x8000000000000fff // encoding: [0x8b,0x31,0x41,0xb2]
+
+ // 32 bit replication-width
+ orr x3, x9, #0xffff0000ffff0000
+ orr sp, x10, #0xe00000ffe00000ff
+ orr x9, x10, #0x000003ff000003ff
+// CHECK: orr x3, x9, #0xffff0000ffff0000 // encoding: [0x23,0x3d,0x10,0xb2]
+// CHECK: orr sp, x10, #0xe00000ffe00000ff // encoding: [0x5f,0x29,0x03,0xb2]
+// CHECK: orr x9, x10, #0x3ff000003ff // encoding: [0x49,0x25,0x00,0xb2]
+
+ // 16 bit replication-width
+ and x14, x15, #0x8000800080008000
+ and x12, x13, #0xffc3ffc3ffc3ffc3
+ and x11, xzr, #0x0003000300030003
+// CHECK: and x14, x15, #0x8000800080008000 // encoding: [0xee,0x81,0x01,0x92]
+// CHECK: and x12, x13, #0xffc3ffc3ffc3ffc3 // encoding: [0xac,0xad,0x0a,0x92]
+// CHECK: and x11, xzr, #0x3000300030003 // encoding: [0xeb,0x87,0x00,0x92]
+
+ // 8 bit replication-width
+ eor x3, x6, #0xe0e0e0e0e0e0e0e0
+ eor sp, xzr, #0x0303030303030303
+ eor x16, x17, #0x8181818181818181
+// CHECK: eor x3, x6, #0xe0e0e0e0e0e0e0e0 // encoding: [0xc3,0xc8,0x03,0xd2]
+// CHECK: eor sp, xzr, #0x303030303030303 // encoding: [0xff,0xc7,0x00,0xd2]
+// CHECK: eor x16, x17, #0x8181818181818181 // encoding: [0x30,0xc6,0x01,0xd2]
+
+ // 4 bit replication-width
+ ands xzr, x18, #0xcccccccccccccccc
+ ands x19, x20, #0x3333333333333333
+ ands x21, x22, #0x9999999999999999
+// CHECK: ands xzr, x18, #0xcccccccccccccccc // encoding: [0x5f,0xe6,0x02,0xf2]
+// CHECK: ands x19, x20, #0x3333333333333333 // encoding: [0x93,0xe6,0x00,0xf2]
+// CHECK: ands x21, x22, #0x9999999999999999 // encoding: [0xd5,0xe6,0x01,0xf2]
+
+ // 2 bit replication-width
+ tst x3, #0xaaaaaaaaaaaaaaaa
+ tst xzr, #0x5555555555555555
+// CHECK: ands xzr, x3, #0xaaaaaaaaaaaaaaaa // encoding: [0x7f,0xf0,0x01,0xf2]
+// CHECK: ands xzr, xzr, #0x5555555555555555 // encoding: [0xff,0xf3,0x00,0xf2]
+
+ mov w3, #0xf000f
+ mov x10, #0xaaaaaaaaaaaaaaaa
+// CHECK: orr w3, wzr, #0xf000f // encoding: [0xe3,0x8f,0x00,0x32]
+// CHECK: orr x10, xzr, #0xaaaaaaaaaaaaaaaa // encoding: [0xea,0xf3,0x01,0xb2]
+
+//------------------------------------------------------------------------------
+// Logical (shifted register)
+//------------------------------------------------------------------------------
+
+ and w12, w23, w21
+ and w16, w15, w1, lsl #1
+ and w9, w4, w10, lsl #31
+ and w3, w30, w11, lsl #0
+ and x3, x5, x7, lsl #63
+// CHECK: and w12, w23, w21 // encoding: [0xec,0x02,0x15,0x0a]
+// CHECK: and w16, w15, w1, lsl #1 // encoding: [0xf0,0x05,0x01,0x0a]
+// CHECK: and w9, w4, w10, lsl #31 // encoding: [0x89,0x7c,0x0a,0x0a]
+// CHECK: and w3, w30, w11 // encoding: [0xc3,0x03,0x0b,0x0a]
+// CHECK: and x3, x5, x7, lsl #63 // encoding: [0xa3,0xfc,0x07,0x8a]
+
+ and x5, x14, x19, asr #4
+ and w3, w17, w19, ror #31
+ and w0, w2, wzr, lsr #17
+ and w3, w30, w11, asr #0
+// CHECK: and x5, x14, x19, asr #4 // encoding: [0xc5,0x11,0x93,0x8a]
+// CHECK: and w3, w17, w19, ror #31 // encoding: [0x23,0x7e,0xd3,0x0a]
+// CHECK: and w0, w2, wzr, lsr #17 // encoding: [0x40,0x44,0x5f,0x0a]
+// CHECK: and w3, w30, w11, asr #0 // encoding: [0xc3,0x03,0x8b,0x0a]
+
+ and xzr, x4, x26, lsl #0
+ and w3, wzr, w20, ror #0
+ and x7, x20, xzr, asr #63
+// CHECK: and xzr, x4, x26 // encoding: [0x9f,0x00,0x1a,0x8a]
+// CHECK: and w3, wzr, w20, ror #0 // encoding: [0xe3,0x03,0xd4,0x0a]
+// CHECK: and x7, x20, xzr, asr #63 // encoding: [0x87,0xfe,0x9f,0x8a]
+
+ bic x13, x20, x14, lsl #47
+ bic w2, w7, w9
+ orr w2, w7, w0, asr #31
+ orr x8, x9, x10, lsl #12
+ orn x3, x5, x7, asr #0
+ orn w2, w5, w29
+// CHECK: bic x13, x20, x14, lsl #47 // encoding: [0x8d,0xbe,0x2e,0x8a]
+// CHECK: bic w2, w7, w9 // encoding: [0xe2,0x00,0x29,0x0a]
+// CHECK: orr w2, w7, w0, asr #31 // encoding: [0xe2,0x7c,0x80,0x2a]
+// CHECK: orr x8, x9, x10, lsl #12 // encoding: [0x28,0x31,0x0a,0xaa]
+// CHECK: orn x3, x5, x7, asr #0 // encoding: [0xa3,0x00,0xa7,0xaa]
+// CHECK: orn w2, w5, w29 // encoding: [0xa2,0x00,0x3d,0x2a]
+
+ ands w7, wzr, w9, lsl #1
+ ands x3, x5, x20, ror #63
+ bics w3, w5, w7, lsl #0
+ bics x3, xzr, x3, lsl #1
+// CHECK: ands w7, wzr, w9, lsl #1 // encoding: [0xe7,0x07,0x09,0x6a]
+// CHECK: ands x3, x5, x20, ror #63 // encoding: [0xa3,0xfc,0xd4,0xea]
+// CHECK: bics w3, w5, w7 // encoding: [0xa3,0x00,0x27,0x6a]
+// CHECK: bics x3, xzr, x3, lsl #1 // encoding: [0xe3,0x07,0x23,0xea]
+
+ tst w3, w7, lsl #31
+ tst x2, x20, asr #0
+// CHECK: tst w3, w7, lsl #31 // encoding: [0x7f,0x7c,0x07,0x6a]
+// CHECK: tst x2, x20, asr #0 // encoding: [0x5f,0x00,0x94,0xea]
+
+ mov x3, x6
+ mov x3, xzr
+ mov wzr, w2
+ mov w3, w5
+// CHECK: mov x3, x6 // encoding: [0xe3,0x03,0x06,0xaa]
+// CHECK: mov x3, xzr // encoding: [0xe3,0x03,0x1f,0xaa]
+// CHECK: mov wzr, w2 // encoding: [0xff,0x03,0x02,0x2a]
+// CHECK: mov w3, w5 // encoding: [0xe3,0x03,0x05,0x2a]
+
+//------------------------------------------------------------------------------
+// Move wide (immediate)
+//------------------------------------------------------------------------------
+
+ movz w1, #65535, lsl #0
+ movz w2, #0, lsl #16
+ movn w2, #1234, lsl #0
+// CHECK: movz w1, #65535 // encoding: [0xe1,0xff,0x9f,0x52]
+// CHECK: movz w2, #0, lsl #16 // encoding: [0x02,0x00,0xa0,0x52]
+// CHECK: movn w2, #1234 // encoding: [0x42,0x9a,0x80,0x12]
+
+ movz x2, #1234, lsl #32
+ movk xzr, #4321, lsl #48
+// CHECK: movz x2, #1234, lsl #32 // encoding: [0x42,0x9a,0xc0,0xd2]
+// CHECK: movk xzr, #4321, lsl #48 // encoding: [0x3f,0x1c,0xe2,0xf2]
+
+ movz x2, #:abs_g0:sym
+ movk w3, #:abs_g0_nc:sym
+// CHECK: movz x2, #:abs_g0:sym // encoding: [0x02'A',A,0x80'A',0xd2'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0:sym, kind: fixup_a64_movw_uabs_g0
+// CHECK: movk w3, #:abs_g0_nc:sym // encoding: [0x03'A',A,0x80'A',0x72'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_nc:sym, kind: fixup_a64_movw_uabs_g0_nc
+
+ movz x4, #:abs_g1:sym
+ movk w5, #:abs_g1_nc:sym
+// CHECK: movz x4, #:abs_g1:sym // encoding: [0x04'A',A,0xa0'A',0xd2'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1:sym, kind: fixup_a64_movw_uabs_g1
+// CHECK: movk w5, #:abs_g1_nc:sym // encoding: [0x05'A',A,0xa0'A',0x72'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_nc:sym, kind: fixup_a64_movw_uabs_g1_nc
+
+ movz x6, #:abs_g2:sym
+ movk x7, #:abs_g2_nc:sym
+// CHECK: movz x6, #:abs_g2:sym // encoding: [0x06'A',A,0xc0'A',0xd2'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2:sym, kind: fixup_a64_movw_uabs_g2
+// CHECK: movk x7, #:abs_g2_nc:sym // encoding: [0x07'A',A,0xc0'A',0xf2'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2_nc:sym, kind: fixup_a64_movw_uabs_g2_nc
+
+ movz x8, #:abs_g3:sym
+ movk x9, #:abs_g3:sym
+// CHECK: movz x8, #:abs_g3:sym // encoding: [0x08'A',A,0xe0'A',0xd2'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g3:sym, kind: fixup_a64_movw_uabs_g3
+// CHECK: movk x9, #:abs_g3:sym // encoding: [0x09'A',A,0xe0'A',0xf2'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g3:sym, kind: fixup_a64_movw_uabs_g3
+
+ movn x30, #:abs_g0_s:sym
+ movz x19, #:abs_g0_s:sym
+ movn w10, #:abs_g0_s:sym
+ movz w25, #:abs_g0_s:sym
+// CHECK: movn x30, #:abs_g0_s:sym // encoding: [0x1e'A',A,0x80'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_s:sym, kind: fixup_a64_movw_sabs_g0
+// CHECK: movz x19, #:abs_g0_s:sym // encoding: [0x13'A',A,0x80'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_s:sym, kind: fixup_a64_movw_sabs_g0
+// CHECK: movn w10, #:abs_g0_s:sym // encoding: [0x0a'A',A,0x80'A',0x12'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_s:sym, kind: fixup_a64_movw_sabs_g0
+// CHECK: movz w25, #:abs_g0_s:sym // encoding: [0x19'A',A,0x80'A',0x12'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_s:sym, kind: fixup_a64_movw_sabs_g0
+
+ movn x30, #:abs_g1_s:sym
+ movz x19, #:abs_g1_s:sym
+ movn w10, #:abs_g1_s:sym
+ movz w25, #:abs_g1_s:sym
+// CHECK: movn x30, #:abs_g1_s:sym // encoding: [0x1e'A',A,0xa0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_s:sym, kind: fixup_a64_movw_sabs_g1
+// CHECK: movz x19, #:abs_g1_s:sym // encoding: [0x13'A',A,0xa0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_s:sym, kind: fixup_a64_movw_sabs_g1
+// CHECK: movn w10, #:abs_g1_s:sym // encoding: [0x0a'A',A,0xa0'A',0x12'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_s:sym, kind: fixup_a64_movw_sabs_g1
+// CHECK: movz w25, #:abs_g1_s:sym // encoding: [0x19'A',A,0xa0'A',0x12'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_s:sym, kind: fixup_a64_movw_sabs_g1
+
+ movn x30, #:abs_g2_s:sym
+ movz x19, #:abs_g2_s:sym
+// CHECK: movn x30, #:abs_g2_s:sym // encoding: [0x1e'A',A,0xc0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2_s:sym, kind: fixup_a64_movw_sabs_g2
+// CHECK: movz x19, #:abs_g2_s:sym // encoding: [0x13'A',A,0xc0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2_s:sym, kind: fixup_a64_movw_sabs_g2
+
+//------------------------------------------------------------------------------
+// PC-relative addressing
+//------------------------------------------------------------------------------
+
+ adr x2, loc
+ adr xzr, loc
+ // CHECK: adr x2, loc // encoding: [0x02'A',A,A,0x10'A']
+ // CHECK: // fixup A - offset: 0, value: loc, kind: fixup_a64_adr_prel
+ // CHECK: adr xzr, loc // encoding: [0x1f'A',A,A,0x10'A']
+ // CHECK: // fixup A - offset: 0, value: loc, kind: fixup_a64_adr_prel
+
+ adrp x29, loc
+ // CHECK: adrp x29, loc // encoding: [0x1d'A',A,A,0x90'A']
+ // CHECK: // fixup A - offset: 0, value: loc, kind: fixup_a64_adr_prel_page
+
+ adrp x30, #4096
+ adr x20, #0
+ adr x9, #-1
+ adr x5, #1048575
+// CHECK: adrp x30, #4096 // encoding: [0x1e,0x00,0x00,0xb0]
+// CHECK: adr x20, #0 // encoding: [0x14,0x00,0x00,0x10]
+// CHECK: adr x9, #-1 // encoding: [0xe9,0xff,0xff,0x70]
+// CHECK: adr x5, #1048575 // encoding: [0xe5,0xff,0x7f,0x70]
+
+ adr x9, #1048575
+ adr x2, #-1048576
+ adrp x9, #4294963200
+ adrp x20, #-4294967296
+// CHECK: adr x9, #1048575 // encoding: [0xe9,0xff,0x7f,0x70]
+// CHECK: adr x2, #-1048576 // encoding: [0x02,0x00,0x80,0x10]
+// CHECK: adrp x9, #4294963200 // encoding: [0xe9,0xff,0x7f,0xf0]
+// CHECK: adrp x20, #-4294967296 // encoding: [0x14,0x00,0x80,0x90]
+
+//------------------------------------------------------------------------------
+// System
+//------------------------------------------------------------------------------
+
+ hint #0
+ hint #127
+// CHECK: nop // encoding: [0x1f,0x20,0x03,0xd5]
+// CHECK: hint #127 // encoding: [0xff,0x2f,0x03,0xd5]
+
+ nop
+ yield
+ wfe
+ wfi
+ sev
+ sevl
+// CHECK: nop // encoding: [0x1f,0x20,0x03,0xd5]
+// CHECK: yield // encoding: [0x3f,0x20,0x03,0xd5]
+// CHECK: wfe // encoding: [0x5f,0x20,0x03,0xd5]
+// CHECK: wfi // encoding: [0x7f,0x20,0x03,0xd5]
+// CHECK: sev // encoding: [0x9f,0x20,0x03,0xd5]
+// CHECK: sevl // encoding: [0xbf,0x20,0x03,0xd5]
+
+ clrex
+ clrex #0
+ clrex #7
+ clrex #15
+// CHECK: clrex // encoding: [0x5f,0x3f,0x03,0xd5]
+// CHECK: clrex #0 // encoding: [0x5f,0x30,0x03,0xd5]
+// CHECK: clrex #7 // encoding: [0x5f,0x37,0x03,0xd5]
+// CHECK: clrex // encoding: [0x5f,0x3f,0x03,0xd5]
+
+ dsb #0
+ dsb #12
+ dsb #15
+ dsb oshld
+ dsb oshst
+ dsb osh
+ dsb nshld
+ dsb nshst
+ dsb nsh
+ dsb ishld
+ dsb ishst
+ dsb ish
+ dsb ld
+ dsb st
+ dsb sy
+// CHECK: dsb #0 // encoding: [0x9f,0x30,0x03,0xd5]
+// CHECK: dsb #12 // encoding: [0x9f,0x3c,0x03,0xd5]
+// CHECK: dsb sy // encoding: [0x9f,0x3f,0x03,0xd5]
+// CHECK: dsb oshld // encoding: [0x9f,0x31,0x03,0xd5]
+// CHECK: dsb oshst // encoding: [0x9f,0x32,0x03,0xd5]
+// CHECK: dsb osh // encoding: [0x9f,0x33,0x03,0xd5]
+// CHECK: dsb nshld // encoding: [0x9f,0x35,0x03,0xd5]
+// CHECK: dsb nshst // encoding: [0x9f,0x36,0x03,0xd5]
+// CHECK: dsb nsh // encoding: [0x9f,0x37,0x03,0xd5]
+// CHECK: dsb ishld // encoding: [0x9f,0x39,0x03,0xd5]
+// CHECK: dsb ishst // encoding: [0x9f,0x3a,0x03,0xd5]
+// CHECK: dsb ish // encoding: [0x9f,0x3b,0x03,0xd5]
+// CHECK: dsb ld // encoding: [0x9f,0x3d,0x03,0xd5]
+// CHECK: dsb st // encoding: [0x9f,0x3e,0x03,0xd5]
+// CHECK: dsb sy // encoding: [0x9f,0x3f,0x03,0xd5]
+
+ dmb #0
+ dmb #12
+ dmb #15
+ dmb oshld
+ dmb oshst
+ dmb osh
+ dmb nshld
+ dmb nshst
+ dmb nsh
+ dmb ishld
+ dmb ishst
+ dmb ish
+ dmb ld
+ dmb st
+ dmb sy
+// CHECK: dmb #0 // encoding: [0xbf,0x30,0x03,0xd5]
+// CHECK: dmb #12 // encoding: [0xbf,0x3c,0x03,0xd5]
+// CHECK: dmb sy // encoding: [0xbf,0x3f,0x03,0xd5]
+// CHECK: dmb oshld // encoding: [0xbf,0x31,0x03,0xd5]
+// CHECK: dmb oshst // encoding: [0xbf,0x32,0x03,0xd5]
+// CHECK: dmb osh // encoding: [0xbf,0x33,0x03,0xd5]
+// CHECK: dmb nshld // encoding: [0xbf,0x35,0x03,0xd5]
+// CHECK: dmb nshst // encoding: [0xbf,0x36,0x03,0xd5]
+// CHECK: dmb nsh // encoding: [0xbf,0x37,0x03,0xd5]
+// CHECK: dmb ishld // encoding: [0xbf,0x39,0x03,0xd5]
+// CHECK: dmb ishst // encoding: [0xbf,0x3a,0x03,0xd5]
+// CHECK: dmb ish // encoding: [0xbf,0x3b,0x03,0xd5]
+// CHECK: dmb ld // encoding: [0xbf,0x3d,0x03,0xd5]
+// CHECK: dmb st // encoding: [0xbf,0x3e,0x03,0xd5]
+// CHECK: dmb sy // encoding: [0xbf,0x3f,0x03,0xd5]
+
+ isb sy
+ isb
+ isb #12
+// CHECK: isb // encoding: [0xdf,0x3f,0x03,0xd5]
+// CHECK: isb // encoding: [0xdf,0x3f,0x03,0xd5]
+// CHECK: isb #12 // encoding: [0xdf,0x3c,0x03,0xd5]
+
+
+ msr spsel, #0
+ msr daifset, #15
+ msr daifclr, #12
+// CHECK: msr spsel, #0 // encoding: [0xbf,0x40,0x00,0xd5]
+// CHECK: msr daifset, #15 // encoding: [0xdf,0x4f,0x03,0xd5]
+// CHECK: msr daifclr, #12 // encoding: [0xff,0x4c,0x03,0xd5]
+
+ sys #7, c5, c9, #7, x5
+ sys #0, c15, c15, #2
+// CHECK: sys #7, c5, c9, #7, x5 // encoding: [0xe5,0x59,0x0f,0xd5]
+// CHECK: sys #0, c15, c15, #2, xzr // encoding: [0x5f,0xff,0x08,0xd5]
+
+ sysl x9, #7, c5, c9, #7
+ sysl x1, #0, c15, c15, #2
+// CHECK: sysl x9, #7, c5, c9, #7 // encoding: [0xe9,0x59,0x2f,0xd5]
+// CHECK: sysl x1, #0, c15, c15, #2 // encoding: [0x41,0xff,0x28,0xd5]
+
+ ic ialluis
+ ic iallu
+ ic ivau, x9
+// CHECK: ic ialluis // encoding: [0x1f,0x71,0x08,0xd5]
+// CHECK: ic iallu // encoding: [0x1f,0x75,0x08,0xd5]
+// CHECK: ic ivau, x9 // encoding: [0x29,0x75,0x0b,0xd5]
+
+ dc zva, x12
+ dc ivac, xzr
+ dc isw, x2
+ dc cvac, x9
+ dc csw, x10
+ dc cvau, x0
+ dc civac, x3
+ dc cisw, x30
+// CHECK: dc zva, x12 // encoding: [0x2c,0x74,0x0b,0xd5]
+// CHECK: dc ivac, xzr // encoding: [0x3f,0x76,0x08,0xd5]
+// CHECK: dc isw, x2 // encoding: [0x42,0x76,0x08,0xd5]
+// CHECK: dc cvac, x9 // encoding: [0x29,0x7a,0x0b,0xd5]
+// CHECK: dc csw, x10 // encoding: [0x4a,0x7a,0x08,0xd5]
+// CHECK: dc cvau, x0 // encoding: [0x20,0x7b,0x0b,0xd5]
+// CHECK: dc civac, x3 // encoding: [0x23,0x7e,0x0b,0xd5]
+// CHECK: dc cisw, x30 // encoding: [0x5e,0x7e,0x08,0xd5]
+
+ at S1E1R, x19
+ at S1E2R, x19
+ at S1E3R, x19
+ at S1E1W, x19
+ at S1E2W, x19
+ at S1E3W, x19
+ at S1E0R, x19
+ at S1E0W, x19
+ at S12E1R, x20
+ at S12E1W, x20
+ at S12E0R, x20
+ at S12E0W, x20
+// CHECK: at s1e1r, x19 // encoding: [0x13,0x78,0x08,0xd5]
+// CHECK: at s1e2r, x19 // encoding: [0x13,0x78,0x0c,0xd5]
+// CHECK: at s1e3r, x19 // encoding: [0x13,0x78,0x0e,0xd5]
+// CHECK: at s1e1w, x19 // encoding: [0x33,0x78,0x08,0xd5]
+// CHECK: at s1e2w, x19 // encoding: [0x33,0x78,0x0c,0xd5]
+// CHECK: at s1e3w, x19 // encoding: [0x33,0x78,0x0e,0xd5]
+// CHECK: at s1e0r, x19 // encoding: [0x53,0x78,0x08,0xd5]
+// CHECK: at s1e0w, x19 // encoding: [0x73,0x78,0x08,0xd5]
+// CHECK: at s12e1r, x20 // encoding: [0x94,0x78,0x0c,0xd5]
+// CHECK: at s12e1w, x20 // encoding: [0xb4,0x78,0x0c,0xd5]
+// CHECK: at s12e0r, x20 // encoding: [0xd4,0x78,0x0c,0xd5]
+// CHECK: at s12e0w, x20 // encoding: [0xf4,0x78,0x0c,0xd5]
+
+ tlbi IPAS2E1IS, x4
+ tlbi IPAS2LE1IS, x9
+ tlbi VMALLE1IS
+ tlbi ALLE2IS
+ tlbi ALLE3IS
+ tlbi VAE1IS, x1
+ tlbi VAE2IS, x2
+ tlbi VAE3IS, x3
+ tlbi ASIDE1IS, x5
+ tlbi VAAE1IS, x9
+ tlbi ALLE1IS
+ tlbi VALE1IS, x10
+ tlbi VALE2IS, x11
+ tlbi VALE3IS, x13
+ tlbi VMALLS12E1IS
+ tlbi VAALE1IS, x14
+ tlbi IPAS2E1, x15
+ tlbi IPAS2LE1, x16
+ tlbi VMALLE1
+ tlbi ALLE2
+ tlbi ALLE3
+ tlbi VAE1, x17
+ tlbi VAE2, x18
+ tlbi VAE3, x19
+ tlbi ASIDE1, x20
+ tlbi VAAE1, x21
+ tlbi ALLE1
+ tlbi VALE1, x22
+ tlbi VALE2, x23
+ tlbi VALE3, x24
+ tlbi VMALLS12E1
+ tlbi VAALE1, x25
+// CHECK: tlbi ipas2e1is, x4 // encoding: [0x24,0x80,0x0c,0xd5]
+// CHECK: tlbi ipas2le1is, x9 // encoding: [0xa9,0x80,0x0c,0xd5]
+// CHECK: tlbi vmalle1is // encoding: [0x1f,0x83,0x08,0xd5]
+// CHECK: tlbi alle2is // encoding: [0x1f,0x83,0x0c,0xd5]
+// CHECK: tlbi alle3is // encoding: [0x1f,0x83,0x0e,0xd5]
+// CHECK: tlbi vae1is, x1 // encoding: [0x21,0x83,0x08,0xd5]
+// CHECK: tlbi vae2is, x2 // encoding: [0x22,0x83,0x0c,0xd5]
+// CHECK: tlbi vae3is, x3 // encoding: [0x23,0x83,0x0e,0xd5]
+// CHECK: tlbi aside1is, x5 // encoding: [0x45,0x83,0x08,0xd5]
+// CHECK: tlbi vaae1is, x9 // encoding: [0x69,0x83,0x08,0xd5]
+// CHECK: tlbi alle1is // encoding: [0x9f,0x83,0x0c,0xd5]
+// CHECK: tlbi vale1is, x10 // encoding: [0xaa,0x83,0x08,0xd5]
+// CHECK: tlbi vale2is, x11 // encoding: [0xab,0x83,0x0c,0xd5]
+// CHECK: tlbi vale3is, x13 // encoding: [0xad,0x83,0x0e,0xd5]
+// CHECK: tlbi vmalls12e1is // encoding: [0xdf,0x83,0x0c,0xd5]
+// CHECK: tlbi vaale1is, x14 // encoding: [0xee,0x83,0x08,0xd5]
+// CHECK: tlbi ipas2e1, x15 // encoding: [0x2f,0x84,0x0c,0xd5]
+// CHECK: tlbi ipas2le1, x16 // encoding: [0xb0,0x84,0x0c,0xd5]
+// CHECK: tlbi vmalle1 // encoding: [0x1f,0x87,0x08,0xd5]
+// CHECK: tlbi alle2 // encoding: [0x1f,0x87,0x0c,0xd5]
+// CHECK: tlbi alle3 // encoding: [0x1f,0x87,0x0e,0xd5]
+// CHECK: tlbi vae1, x17 // encoding: [0x31,0x87,0x08,0xd5]
+// CHECK: tlbi vae2, x18 // encoding: [0x32,0x87,0x0c,0xd5]
+// CHECK: tlbi vae3, x19 // encoding: [0x33,0x87,0x0e,0xd5]
+// CHECK: tlbi aside1, x20 // encoding: [0x54,0x87,0x08,0xd5]
+// CHECK: tlbi vaae1, x21 // encoding: [0x75,0x87,0x08,0xd5]
+// CHECK: tlbi alle1 // encoding: [0x9f,0x87,0x0c,0xd5]
+// CHECK: tlbi vale1, x22 // encoding: [0xb6,0x87,0x08,0xd5]
+// CHECK: tlbi vale2, x23 // encoding: [0xb7,0x87,0x0c,0xd5]
+// CHECK: tlbi vale3, x24 // encoding: [0xb8,0x87,0x0e,0xd5]
+// CHECK: tlbi vmalls12e1 // encoding: [0xdf,0x87,0x0c,0xd5]
+// CHECK: tlbi vaale1, x25 // encoding: [0xf9,0x87,0x08,0xd5]
+
+ msr TEECR32_EL1, x12
+ msr OSDTRRX_EL1, x12
+ msr MDCCINT_EL1, x12
+ msr MDSCR_EL1, x12
+ msr OSDTRTX_EL1, x12
+ msr DBGDTR_EL0, x12
+ msr DBGDTRTX_EL0, x12
+ msr OSECCR_EL1, x12
+ msr DBGVCR32_EL2, x12
+ msr DBGBVR0_EL1, x12
+ msr DBGBVR1_EL1, x12
+ msr DBGBVR2_EL1, x12
+ msr DBGBVR3_EL1, x12
+ msr DBGBVR4_EL1, x12
+ msr DBGBVR5_EL1, x12
+ msr DBGBVR6_EL1, x12
+ msr DBGBVR7_EL1, x12
+ msr DBGBVR8_EL1, x12
+ msr DBGBVR9_EL1, x12
+ msr DBGBVR10_EL1, x12
+ msr DBGBVR11_EL1, x12
+ msr DBGBVR12_EL1, x12
+ msr DBGBVR13_EL1, x12
+ msr DBGBVR14_EL1, x12
+ msr DBGBVR15_EL1, x12
+ msr DBGBCR0_EL1, x12
+ msr DBGBCR1_EL1, x12
+ msr DBGBCR2_EL1, x12
+ msr DBGBCR3_EL1, x12
+ msr DBGBCR4_EL1, x12
+ msr DBGBCR5_EL1, x12
+ msr DBGBCR6_EL1, x12
+ msr DBGBCR7_EL1, x12
+ msr DBGBCR8_EL1, x12
+ msr DBGBCR9_EL1, x12
+ msr DBGBCR10_EL1, x12
+ msr DBGBCR11_EL1, x12
+ msr DBGBCR12_EL1, x12
+ msr DBGBCR13_EL1, x12
+ msr DBGBCR14_EL1, x12
+ msr DBGBCR15_EL1, x12
+ msr DBGWVR0_EL1, x12
+ msr DBGWVR1_EL1, x12
+ msr DBGWVR2_EL1, x12
+ msr DBGWVR3_EL1, x12
+ msr DBGWVR4_EL1, x12
+ msr DBGWVR5_EL1, x12
+ msr DBGWVR6_EL1, x12
+ msr DBGWVR7_EL1, x12
+ msr DBGWVR8_EL1, x12
+ msr DBGWVR9_EL1, x12
+ msr DBGWVR10_EL1, x12
+ msr DBGWVR11_EL1, x12
+ msr DBGWVR12_EL1, x12
+ msr DBGWVR13_EL1, x12
+ msr DBGWVR14_EL1, x12
+ msr DBGWVR15_EL1, x12
+ msr DBGWCR0_EL1, x12
+ msr DBGWCR1_EL1, x12
+ msr DBGWCR2_EL1, x12
+ msr DBGWCR3_EL1, x12
+ msr DBGWCR4_EL1, x12
+ msr DBGWCR5_EL1, x12
+ msr DBGWCR6_EL1, x12
+ msr DBGWCR7_EL1, x12
+ msr DBGWCR8_EL1, x12
+ msr DBGWCR9_EL1, x12
+ msr DBGWCR10_EL1, x12
+ msr DBGWCR11_EL1, x12
+ msr DBGWCR12_EL1, x12
+ msr DBGWCR13_EL1, x12
+ msr DBGWCR14_EL1, x12
+ msr DBGWCR15_EL1, x12
+ msr TEEHBR32_EL1, x12
+ msr OSLAR_EL1, x12
+ msr OSDLR_EL1, x12
+ msr DBGPRCR_EL1, x12
+ msr DBGCLAIMSET_EL1, x12
+ msr DBGCLAIMCLR_EL1, x12
+ msr CSSELR_EL1, x12
+ msr VPIDR_EL2, x12
+ msr VMPIDR_EL2, x12
+ msr SCTLR_EL1, x12
+ msr SCTLR_EL2, x12
+ msr SCTLR_EL3, x12
+ msr ACTLR_EL1, x12
+ msr ACTLR_EL2, x12
+ msr ACTLR_EL3, x12
+ msr CPACR_EL1, x12
+ msr HCR_EL2, x12
+ msr SCR_EL3, x12
+ msr MDCR_EL2, x12
+ msr SDER32_EL3, x12
+ msr CPTR_EL2, x12
+ msr CPTR_EL3, x12
+ msr HSTR_EL2, x12
+ msr HACR_EL2, x12
+ msr MDCR_EL3, x12
+ msr TTBR0_EL1, x12
+ msr TTBR0_EL2, x12
+ msr TTBR0_EL3, x12
+ msr TTBR1_EL1, x12
+ msr TCR_EL1, x12
+ msr TCR_EL2, x12
+ msr TCR_EL3, x12
+ msr VTTBR_EL2, x12
+ msr VTCR_EL2, x12
+ msr DACR32_EL2, x12
+ msr SPSR_EL1, x12
+ msr SPSR_EL2, x12
+ msr SPSR_EL3, x12
+ msr ELR_EL1, x12
+ msr ELR_EL2, x12
+ msr ELR_EL3, x12
+ msr SP_EL0, x12
+ msr SP_EL1, x12
+ msr SP_EL2, x12
+ msr SPSel, x12
+ msr NZCV, x12
+ msr DAIF, x12
+ msr CurrentEL, x12
+ msr SPSR_irq, x12
+ msr SPSR_abt, x12
+ msr SPSR_und, x12
+ msr SPSR_fiq, x12
+ msr FPCR, x12
+ msr FPSR, x12
+ msr DSPSR_EL0, x12
+ msr DLR_EL0, x12
+ msr IFSR32_EL2, x12
+ msr AFSR0_EL1, x12
+ msr AFSR0_EL2, x12
+ msr AFSR0_EL3, x12
+ msr AFSR1_EL1, x12
+ msr AFSR1_EL2, x12
+ msr AFSR1_EL3, x12
+ msr ESR_EL1, x12
+ msr ESR_EL2, x12
+ msr ESR_EL3, x12
+ msr FPEXC32_EL2, x12
+ msr FAR_EL1, x12
+ msr FAR_EL2, x12
+ msr FAR_EL3, x12
+ msr HPFAR_EL2, x12
+ msr PAR_EL1, x12
+ msr PMCR_EL0, x12
+ msr PMCNTENSET_EL0, x12
+ msr PMCNTENCLR_EL0, x12
+ msr PMOVSCLR_EL0, x12
+ msr PMSELR_EL0, x12
+ msr PMCCNTR_EL0, x12
+ msr PMXEVTYPER_EL0, x12
+ msr PMXEVCNTR_EL0, x12
+ msr PMUSERENR_EL0, x12
+ msr PMINTENSET_EL1, x12
+ msr PMINTENCLR_EL1, x12
+ msr PMOVSSET_EL0, x12
+ msr MAIR_EL1, x12
+ msr MAIR_EL2, x12
+ msr MAIR_EL3, x12
+ msr AMAIR_EL1, x12
+ msr AMAIR_EL2, x12
+ msr AMAIR_EL3, x12
+ msr VBAR_EL1, x12
+ msr VBAR_EL2, x12
+ msr VBAR_EL3, x12
+ msr RMR_EL1, x12
+ msr RMR_EL2, x12
+ msr RMR_EL3, x12
+ msr CONTEXTIDR_EL1, x12
+ msr TPIDR_EL0, x12
+ msr TPIDR_EL2, x12
+ msr TPIDR_EL3, x12
+ msr TPIDRRO_EL0, x12
+ msr TPIDR_EL1, x12
+ msr CNTFRQ_EL0, x12
+ msr CNTVOFF_EL2, x12
+ msr CNTKCTL_EL1, x12
+ msr CNTHCTL_EL2, x12
+ msr CNTP_TVAL_EL0, x12
+ msr CNTHP_TVAL_EL2, x12
+ msr CNTPS_TVAL_EL1, x12
+ msr CNTP_CTL_EL0, x12
+ msr CNTHP_CTL_EL2, x12
+ msr CNTPS_CTL_EL1, x12
+ msr CNTP_CVAL_EL0, x12
+ msr CNTHP_CVAL_EL2, x12
+ msr CNTPS_CVAL_EL1, x12
+ msr CNTV_TVAL_EL0, x12
+ msr CNTV_CTL_EL0, x12
+ msr CNTV_CVAL_EL0, x12
+ msr PMEVCNTR0_EL0, x12
+ msr PMEVCNTR1_EL0, x12
+ msr PMEVCNTR2_EL0, x12
+ msr PMEVCNTR3_EL0, x12
+ msr PMEVCNTR4_EL0, x12
+ msr PMEVCNTR5_EL0, x12
+ msr PMEVCNTR6_EL0, x12
+ msr PMEVCNTR7_EL0, x12
+ msr PMEVCNTR8_EL0, x12
+ msr PMEVCNTR9_EL0, x12
+ msr PMEVCNTR10_EL0, x12
+ msr PMEVCNTR11_EL0, x12
+ msr PMEVCNTR12_EL0, x12
+ msr PMEVCNTR13_EL0, x12
+ msr PMEVCNTR14_EL0, x12
+ msr PMEVCNTR15_EL0, x12
+ msr PMEVCNTR16_EL0, x12
+ msr PMEVCNTR17_EL0, x12
+ msr PMEVCNTR18_EL0, x12
+ msr PMEVCNTR19_EL0, x12
+ msr PMEVCNTR20_EL0, x12
+ msr PMEVCNTR21_EL0, x12
+ msr PMEVCNTR22_EL0, x12
+ msr PMEVCNTR23_EL0, x12
+ msr PMEVCNTR24_EL0, x12
+ msr PMEVCNTR25_EL0, x12
+ msr PMEVCNTR26_EL0, x12
+ msr PMEVCNTR27_EL0, x12
+ msr PMEVCNTR28_EL0, x12
+ msr PMEVCNTR29_EL0, x12
+ msr PMEVCNTR30_EL0, x12
+ msr PMCCFILTR_EL0, x12
+ msr PMEVTYPER0_EL0, x12
+ msr PMEVTYPER1_EL0, x12
+ msr PMEVTYPER2_EL0, x12
+ msr PMEVTYPER3_EL0, x12
+ msr PMEVTYPER4_EL0, x12
+ msr PMEVTYPER5_EL0, x12
+ msr PMEVTYPER6_EL0, x12
+ msr PMEVTYPER7_EL0, x12
+ msr PMEVTYPER8_EL0, x12
+ msr PMEVTYPER9_EL0, x12
+ msr PMEVTYPER10_EL0, x12
+ msr PMEVTYPER11_EL0, x12
+ msr PMEVTYPER12_EL0, x12
+ msr PMEVTYPER13_EL0, x12
+ msr PMEVTYPER14_EL0, x12
+ msr PMEVTYPER15_EL0, x12
+ msr PMEVTYPER16_EL0, x12
+ msr PMEVTYPER17_EL0, x12
+ msr PMEVTYPER18_EL0, x12
+ msr PMEVTYPER19_EL0, x12
+ msr PMEVTYPER20_EL0, x12
+ msr PMEVTYPER21_EL0, x12
+ msr PMEVTYPER22_EL0, x12
+ msr PMEVTYPER23_EL0, x12
+ msr PMEVTYPER24_EL0, x12
+ msr PMEVTYPER25_EL0, x12
+ msr PMEVTYPER26_EL0, x12
+ msr PMEVTYPER27_EL0, x12
+ msr PMEVTYPER28_EL0, x12
+ msr PMEVTYPER29_EL0, x12
+ msr PMEVTYPER30_EL0, x12
+// CHECK: msr teecr32_el1, x12 // encoding: [0x0c,0x00,0x12,0xd5]
+// CHECK: msr osdtrrx_el1, x12 // encoding: [0x4c,0x00,0x10,0xd5]
+// CHECK: msr mdccint_el1, x12 // encoding: [0x0c,0x02,0x10,0xd5]
+// CHECK: msr mdscr_el1, x12 // encoding: [0x4c,0x02,0x10,0xd5]
+// CHECK: msr osdtrtx_el1, x12 // encoding: [0x4c,0x03,0x10,0xd5]
+// CHECK: msr dbgdtr_el0, x12 // encoding: [0x0c,0x04,0x13,0xd5]
+// CHECK: msr dbgdtrtx_el0, x12 // encoding: [0x0c,0x05,0x13,0xd5]
+// CHECK: msr oseccr_el1, x12 // encoding: [0x4c,0x06,0x10,0xd5]
+// CHECK: msr dbgvcr32_el2, x12 // encoding: [0x0c,0x07,0x14,0xd5]
+// CHECK: msr dbgbvr0_el1, x12 // encoding: [0x8c,0x00,0x10,0xd5]
+// CHECK: msr dbgbvr1_el1, x12 // encoding: [0x8c,0x01,0x10,0xd5]
+// CHECK: msr dbgbvr2_el1, x12 // encoding: [0x8c,0x02,0x10,0xd5]
+// CHECK: msr dbgbvr3_el1, x12 // encoding: [0x8c,0x03,0x10,0xd5]
+// CHECK: msr dbgbvr4_el1, x12 // encoding: [0x8c,0x04,0x10,0xd5]
+// CHECK: msr dbgbvr5_el1, x12 // encoding: [0x8c,0x05,0x10,0xd5]
+// CHECK: msr dbgbvr6_el1, x12 // encoding: [0x8c,0x06,0x10,0xd5]
+// CHECK: msr dbgbvr7_el1, x12 // encoding: [0x8c,0x07,0x10,0xd5]
+// CHECK: msr dbgbvr8_el1, x12 // encoding: [0x8c,0x08,0x10,0xd5]
+// CHECK: msr dbgbvr9_el1, x12 // encoding: [0x8c,0x09,0x10,0xd5]
+// CHECK: msr dbgbvr10_el1, x12 // encoding: [0x8c,0x0a,0x10,0xd5]
+// CHECK: msr dbgbvr11_el1, x12 // encoding: [0x8c,0x0b,0x10,0xd5]
+// CHECK: msr dbgbvr12_el1, x12 // encoding: [0x8c,0x0c,0x10,0xd5]
+// CHECK: msr dbgbvr13_el1, x12 // encoding: [0x8c,0x0d,0x10,0xd5]
+// CHECK: msr dbgbvr14_el1, x12 // encoding: [0x8c,0x0e,0x10,0xd5]
+// CHECK: msr dbgbvr15_el1, x12 // encoding: [0x8c,0x0f,0x10,0xd5]
+// CHECK: msr dbgbcr0_el1, x12 // encoding: [0xac,0x00,0x10,0xd5]
+// CHECK: msr dbgbcr1_el1, x12 // encoding: [0xac,0x01,0x10,0xd5]
+// CHECK: msr dbgbcr2_el1, x12 // encoding: [0xac,0x02,0x10,0xd5]
+// CHECK: msr dbgbcr3_el1, x12 // encoding: [0xac,0x03,0x10,0xd5]
+// CHECK: msr dbgbcr4_el1, x12 // encoding: [0xac,0x04,0x10,0xd5]
+// CHECK: msr dbgbcr5_el1, x12 // encoding: [0xac,0x05,0x10,0xd5]
+// CHECK: msr dbgbcr6_el1, x12 // encoding: [0xac,0x06,0x10,0xd5]
+// CHECK: msr dbgbcr7_el1, x12 // encoding: [0xac,0x07,0x10,0xd5]
+// CHECK: msr dbgbcr8_el1, x12 // encoding: [0xac,0x08,0x10,0xd5]
+// CHECK: msr dbgbcr9_el1, x12 // encoding: [0xac,0x09,0x10,0xd5]
+// CHECK: msr dbgbcr10_el1, x12 // encoding: [0xac,0x0a,0x10,0xd5]
+// CHECK: msr dbgbcr11_el1, x12 // encoding: [0xac,0x0b,0x10,0xd5]
+// CHECK: msr dbgbcr12_el1, x12 // encoding: [0xac,0x0c,0x10,0xd5]
+// CHECK: msr dbgbcr13_el1, x12 // encoding: [0xac,0x0d,0x10,0xd5]
+// CHECK: msr dbgbcr14_el1, x12 // encoding: [0xac,0x0e,0x10,0xd5]
+// CHECK: msr dbgbcr15_el1, x12 // encoding: [0xac,0x0f,0x10,0xd5]
+// CHECK: msr dbgwvr0_el1, x12 // encoding: [0xcc,0x00,0x10,0xd5]
+// CHECK: msr dbgwvr1_el1, x12 // encoding: [0xcc,0x01,0x10,0xd5]
+// CHECK: msr dbgwvr2_el1, x12 // encoding: [0xcc,0x02,0x10,0xd5]
+// CHECK: msr dbgwvr3_el1, x12 // encoding: [0xcc,0x03,0x10,0xd5]
+// CHECK: msr dbgwvr4_el1, x12 // encoding: [0xcc,0x04,0x10,0xd5]
+// CHECK: msr dbgwvr5_el1, x12 // encoding: [0xcc,0x05,0x10,0xd5]
+// CHECK: msr dbgwvr6_el1, x12 // encoding: [0xcc,0x06,0x10,0xd5]
+// CHECK: msr dbgwvr7_el1, x12 // encoding: [0xcc,0x07,0x10,0xd5]
+// CHECK: msr dbgwvr8_el1, x12 // encoding: [0xcc,0x08,0x10,0xd5]
+// CHECK: msr dbgwvr9_el1, x12 // encoding: [0xcc,0x09,0x10,0xd5]
+// CHECK: msr dbgwvr10_el1, x12 // encoding: [0xcc,0x0a,0x10,0xd5]
+// CHECK: msr dbgwvr11_el1, x12 // encoding: [0xcc,0x0b,0x10,0xd5]
+// CHECK: msr dbgwvr12_el1, x12 // encoding: [0xcc,0x0c,0x10,0xd5]
+// CHECK: msr dbgwvr13_el1, x12 // encoding: [0xcc,0x0d,0x10,0xd5]
+// CHECK: msr dbgwvr14_el1, x12 // encoding: [0xcc,0x0e,0x10,0xd5]
+// CHECK: msr dbgwvr15_el1, x12 // encoding: [0xcc,0x0f,0x10,0xd5]
+// CHECK: msr dbgwcr0_el1, x12 // encoding: [0xec,0x00,0x10,0xd5]
+// CHECK: msr dbgwcr1_el1, x12 // encoding: [0xec,0x01,0x10,0xd5]
+// CHECK: msr dbgwcr2_el1, x12 // encoding: [0xec,0x02,0x10,0xd5]
+// CHECK: msr dbgwcr3_el1, x12 // encoding: [0xec,0x03,0x10,0xd5]
+// CHECK: msr dbgwcr4_el1, x12 // encoding: [0xec,0x04,0x10,0xd5]
+// CHECK: msr dbgwcr5_el1, x12 // encoding: [0xec,0x05,0x10,0xd5]
+// CHECK: msr dbgwcr6_el1, x12 // encoding: [0xec,0x06,0x10,0xd5]
+// CHECK: msr dbgwcr7_el1, x12 // encoding: [0xec,0x07,0x10,0xd5]
+// CHECK: msr dbgwcr8_el1, x12 // encoding: [0xec,0x08,0x10,0xd5]
+// CHECK: msr dbgwcr9_el1, x12 // encoding: [0xec,0x09,0x10,0xd5]
+// CHECK: msr dbgwcr10_el1, x12 // encoding: [0xec,0x0a,0x10,0xd5]
+// CHECK: msr dbgwcr11_el1, x12 // encoding: [0xec,0x0b,0x10,0xd5]
+// CHECK: msr dbgwcr12_el1, x12 // encoding: [0xec,0x0c,0x10,0xd5]
+// CHECK: msr dbgwcr13_el1, x12 // encoding: [0xec,0x0d,0x10,0xd5]
+// CHECK: msr dbgwcr14_el1, x12 // encoding: [0xec,0x0e,0x10,0xd5]
+// CHECK: msr dbgwcr15_el1, x12 // encoding: [0xec,0x0f,0x10,0xd5]
+// CHECK: msr teehbr32_el1, x12 // encoding: [0x0c,0x10,0x12,0xd5]
+// CHECK: msr oslar_el1, x12 // encoding: [0x8c,0x10,0x10,0xd5]
+// CHECK: msr osdlr_el1, x12 // encoding: [0x8c,0x13,0x10,0xd5]
+// CHECK: msr dbgprcr_el1, x12 // encoding: [0x8c,0x14,0x10,0xd5]
+// CHECK: msr dbgclaimset_el1, x12 // encoding: [0xcc,0x78,0x10,0xd5]
+// CHECK: msr dbgclaimclr_el1, x12 // encoding: [0xcc,0x79,0x10,0xd5]
+// CHECK: msr csselr_el1, x12 // encoding: [0x0c,0x00,0x1a,0xd5]
+// CHECK: msr vpidr_el2, x12 // encoding: [0x0c,0x00,0x1c,0xd5]
+// CHECK: msr vmpidr_el2, x12 // encoding: [0xac,0x00,0x1c,0xd5]
+// CHECK: msr sctlr_el1, x12 // encoding: [0x0c,0x10,0x18,0xd5]
+// CHECK: msr sctlr_el2, x12 // encoding: [0x0c,0x10,0x1c,0xd5]
+// CHECK: msr sctlr_el3, x12 // encoding: [0x0c,0x10,0x1e,0xd5]
+// CHECK: msr actlr_el1, x12 // encoding: [0x2c,0x10,0x18,0xd5]
+// CHECK: msr actlr_el2, x12 // encoding: [0x2c,0x10,0x1c,0xd5]
+// CHECK: msr actlr_el3, x12 // encoding: [0x2c,0x10,0x1e,0xd5]
+// CHECK: msr cpacr_el1, x12 // encoding: [0x4c,0x10,0x18,0xd5]
+// CHECK: msr hcr_el2, x12 // encoding: [0x0c,0x11,0x1c,0xd5]
+// CHECK: msr scr_el3, x12 // encoding: [0x0c,0x11,0x1e,0xd5]
+// CHECK: msr mdcr_el2, x12 // encoding: [0x2c,0x11,0x1c,0xd5]
+// CHECK: msr sder32_el3, x12 // encoding: [0x2c,0x11,0x1e,0xd5]
+// CHECK: msr cptr_el2, x12 // encoding: [0x4c,0x11,0x1c,0xd5]
+// CHECK: msr cptr_el3, x12 // encoding: [0x4c,0x11,0x1e,0xd5]
+// CHECK: msr hstr_el2, x12 // encoding: [0x6c,0x11,0x1c,0xd5]
+// CHECK: msr hacr_el2, x12 // encoding: [0xec,0x11,0x1c,0xd5]
+// CHECK: msr mdcr_el3, x12 // encoding: [0x2c,0x13,0x1e,0xd5]
+// CHECK: msr ttbr0_el1, x12 // encoding: [0x0c,0x20,0x18,0xd5]
+// CHECK: msr ttbr0_el2, x12 // encoding: [0x0c,0x20,0x1c,0xd5]
+// CHECK: msr ttbr0_el3, x12 // encoding: [0x0c,0x20,0x1e,0xd5]
+// CHECK: msr ttbr1_el1, x12 // encoding: [0x2c,0x20,0x18,0xd5]
+// CHECK: msr tcr_el1, x12 // encoding: [0x4c,0x20,0x18,0xd5]
+// CHECK: msr tcr_el2, x12 // encoding: [0x4c,0x20,0x1c,0xd5]
+// CHECK: msr tcr_el3, x12 // encoding: [0x4c,0x20,0x1e,0xd5]
+// CHECK: msr vttbr_el2, x12 // encoding: [0x0c,0x21,0x1c,0xd5]
+// CHECK: msr vtcr_el2, x12 // encoding: [0x4c,0x21,0x1c,0xd5]
+// CHECK: msr dacr32_el2, x12 // encoding: [0x0c,0x30,0x1c,0xd5]
+// CHECK: msr spsr_el1, x12 // encoding: [0x0c,0x40,0x18,0xd5]
+// CHECK: msr spsr_el2, x12 // encoding: [0x0c,0x40,0x1c,0xd5]
+// CHECK: msr spsr_el3, x12 // encoding: [0x0c,0x40,0x1e,0xd5]
+// CHECK: msr elr_el1, x12 // encoding: [0x2c,0x40,0x18,0xd5]
+// CHECK: msr elr_el2, x12 // encoding: [0x2c,0x40,0x1c,0xd5]
+// CHECK: msr elr_el3, x12 // encoding: [0x2c,0x40,0x1e,0xd5]
+// CHECK: msr sp_el0, x12 // encoding: [0x0c,0x41,0x18,0xd5]
+// CHECK: msr sp_el1, x12 // encoding: [0x0c,0x41,0x1c,0xd5]
+// CHECK: msr sp_el2, x12 // encoding: [0x0c,0x41,0x1e,0xd5]
+// CHECK: msr spsel, x12 // encoding: [0x0c,0x42,0x18,0xd5]
+// CHECK: msr nzcv, x12 // encoding: [0x0c,0x42,0x1b,0xd5]
+// CHECK: msr daif, x12 // encoding: [0x2c,0x42,0x1b,0xd5]
+// CHECK: msr currentel, x12 // encoding: [0x4c,0x42,0x18,0xd5]
+// CHECK: msr spsr_irq, x12 // encoding: [0x0c,0x43,0x1c,0xd5]
+// CHECK: msr spsr_abt, x12 // encoding: [0x2c,0x43,0x1c,0xd5]
+// CHECK: msr spsr_und, x12 // encoding: [0x4c,0x43,0x1c,0xd5]
+// CHECK: msr spsr_fiq, x12 // encoding: [0x6c,0x43,0x1c,0xd5]
+// CHECK: msr fpcr, x12 // encoding: [0x0c,0x44,0x1b,0xd5]
+// CHECK: msr fpsr, x12 // encoding: [0x2c,0x44,0x1b,0xd5]
+// CHECK: msr dspsr_el0, x12 // encoding: [0x0c,0x45,0x1b,0xd5]
+// CHECK: msr dlr_el0, x12 // encoding: [0x2c,0x45,0x1b,0xd5]
+// CHECK: msr ifsr32_el2, x12 // encoding: [0x2c,0x50,0x1c,0xd5]
+// CHECK: msr afsr0_el1, x12 // encoding: [0x0c,0x51,0x18,0xd5]
+// CHECK: msr afsr0_el2, x12 // encoding: [0x0c,0x51,0x1c,0xd5]
+// CHECK: msr afsr0_el3, x12 // encoding: [0x0c,0x51,0x1e,0xd5]
+// CHECK: msr afsr1_el1, x12 // encoding: [0x2c,0x51,0x18,0xd5]
+// CHECK: msr afsr1_el2, x12 // encoding: [0x2c,0x51,0x1c,0xd5]
+// CHECK: msr afsr1_el3, x12 // encoding: [0x2c,0x51,0x1e,0xd5]
+// CHECK: msr esr_el1, x12 // encoding: [0x0c,0x52,0x18,0xd5]
+// CHECK: msr esr_el2, x12 // encoding: [0x0c,0x52,0x1c,0xd5]
+// CHECK: msr esr_el3, x12 // encoding: [0x0c,0x52,0x1e,0xd5]
+// CHECK: msr fpexc32_el2, x12 // encoding: [0x0c,0x53,0x1c,0xd5]
+// CHECK: msr far_el1, x12 // encoding: [0x0c,0x60,0x18,0xd5]
+// CHECK: msr far_el2, x12 // encoding: [0x0c,0x60,0x1c,0xd5]
+// CHECK: msr far_el3, x12 // encoding: [0x0c,0x60,0x1e,0xd5]
+// CHECK: msr hpfar_el2, x12 // encoding: [0x8c,0x60,0x1c,0xd5]
+// CHECK: msr par_el1, x12 // encoding: [0x0c,0x74,0x18,0xd5]
+// CHECK: msr pmcr_el0, x12 // encoding: [0x0c,0x9c,0x1b,0xd5]
+// CHECK: msr pmcntenset_el0, x12 // encoding: [0x2c,0x9c,0x1b,0xd5]
+// CHECK: msr pmcntenclr_el0, x12 // encoding: [0x4c,0x9c,0x1b,0xd5]
+// CHECK: msr pmovsclr_el0, x12 // encoding: [0x6c,0x9c,0x1b,0xd5]
+// CHECK: msr pmselr_el0, x12 // encoding: [0xac,0x9c,0x1b,0xd5]
+// CHECK: msr pmccntr_el0, x12 // encoding: [0x0c,0x9d,0x1b,0xd5]
+// CHECK: msr pmxevtyper_el0, x12 // encoding: [0x2c,0x9d,0x1b,0xd5]
+// CHECK: msr pmxevcntr_el0, x12 // encoding: [0x4c,0x9d,0x1b,0xd5]
+// CHECK: msr pmuserenr_el0, x12 // encoding: [0x0c,0x9e,0x1b,0xd5]
+// CHECK: msr pmintenset_el1, x12 // encoding: [0x2c,0x9e,0x18,0xd5]
+// CHECK: msr pmintenclr_el1, x12 // encoding: [0x4c,0x9e,0x18,0xd5]
+// CHECK: msr pmovsset_el0, x12 // encoding: [0x6c,0x9e,0x1b,0xd5]
+// CHECK: msr mair_el1, x12 // encoding: [0x0c,0xa2,0x18,0xd5]
+// CHECK: msr mair_el2, x12 // encoding: [0x0c,0xa2,0x1c,0xd5]
+// CHECK: msr mair_el3, x12 // encoding: [0x0c,0xa2,0x1e,0xd5]
+// CHECK: msr amair_el1, x12 // encoding: [0x0c,0xa3,0x18,0xd5]
+// CHECK: msr amair_el2, x12 // encoding: [0x0c,0xa3,0x1c,0xd5]
+// CHECK: msr amair_el3, x12 // encoding: [0x0c,0xa3,0x1e,0xd5]
+// CHECK: msr vbar_el1, x12 // encoding: [0x0c,0xc0,0x18,0xd5]
+// CHECK: msr vbar_el2, x12 // encoding: [0x0c,0xc0,0x1c,0xd5]
+// CHECK: msr vbar_el3, x12 // encoding: [0x0c,0xc0,0x1e,0xd5]
+// CHECK: msr rmr_el1, x12 // encoding: [0x4c,0xc0,0x18,0xd5]
+// CHECK: msr rmr_el2, x12 // encoding: [0x4c,0xc0,0x1c,0xd5]
+// CHECK: msr rmr_el3, x12 // encoding: [0x4c,0xc0,0x1e,0xd5]
+// CHECK: msr contextidr_el1, x12 // encoding: [0x2c,0xd0,0x18,0xd5]
+// CHECK: msr tpidr_el0, x12 // encoding: [0x4c,0xd0,0x1b,0xd5]
+// CHECK: msr tpidr_el2, x12 // encoding: [0x4c,0xd0,0x1c,0xd5]
+// CHECK: msr tpidr_el3, x12 // encoding: [0x4c,0xd0,0x1e,0xd5]
+// CHECK: msr tpidrro_el0, x12 // encoding: [0x6c,0xd0,0x1b,0xd5]
+// CHECK: msr tpidr_el1, x12 // encoding: [0x8c,0xd0,0x18,0xd5]
+// CHECK: msr cntfrq_el0, x12 // encoding: [0x0c,0xe0,0x1b,0xd5]
+// CHECK: msr cntvoff_el2, x12 // encoding: [0x6c,0xe0,0x1c,0xd5]
+// CHECK: msr cntkctl_el1, x12 // encoding: [0x0c,0xe1,0x18,0xd5]
+// CHECK: msr cnthctl_el2, x12 // encoding: [0x0c,0xe1,0x1c,0xd5]
+// CHECK: msr cntp_tval_el0, x12 // encoding: [0x0c,0xe2,0x1b,0xd5]
+// CHECK: msr cnthp_tval_el2, x12 // encoding: [0x0c,0xe2,0x1c,0xd5]
+// CHECK: msr cntps_tval_el1, x12 // encoding: [0x0c,0xe2,0x1f,0xd5]
+// CHECK: msr cntp_ctl_el0, x12 // encoding: [0x2c,0xe2,0x1b,0xd5]
+// CHECK: msr cnthp_ctl_el2, x12 // encoding: [0x2c,0xe2,0x1c,0xd5]
+// CHECK: msr cntps_ctl_el1, x12 // encoding: [0x2c,0xe2,0x1f,0xd5]
+// CHECK: msr cntp_cval_el0, x12 // encoding: [0x4c,0xe2,0x1b,0xd5]
+// CHECK: msr cnthp_cval_el2, x12 // encoding: [0x4c,0xe2,0x1c,0xd5]
+// CHECK: msr cntps_cval_el1, x12 // encoding: [0x4c,0xe2,0x1f,0xd5]
+// CHECK: msr cntv_tval_el0, x12 // encoding: [0x0c,0xe3,0x1b,0xd5]
+// CHECK: msr cntv_ctl_el0, x12 // encoding: [0x2c,0xe3,0x1b,0xd5]
+// CHECK: msr cntv_cval_el0, x12 // encoding: [0x4c,0xe3,0x1b,0xd5]
+// CHECK: msr pmevcntr0_el0, x12 // encoding: [0x0c,0xe8,0x1b,0xd5]
+// CHECK: msr pmevcntr1_el0, x12 // encoding: [0x2c,0xe8,0x1b,0xd5]
+// CHECK: msr pmevcntr2_el0, x12 // encoding: [0x4c,0xe8,0x1b,0xd5]
+// CHECK: msr pmevcntr3_el0, x12 // encoding: [0x6c,0xe8,0x1b,0xd5]
+// CHECK: msr pmevcntr4_el0, x12 // encoding: [0x8c,0xe8,0x1b,0xd5]
+// CHECK: msr pmevcntr5_el0, x12 // encoding: [0xac,0xe8,0x1b,0xd5]
+// CHECK: msr pmevcntr6_el0, x12 // encoding: [0xcc,0xe8,0x1b,0xd5]
+// CHECK: msr pmevcntr7_el0, x12 // encoding: [0xec,0xe8,0x1b,0xd5]
+// CHECK: msr pmevcntr8_el0, x12 // encoding: [0x0c,0xe9,0x1b,0xd5]
+// CHECK: msr pmevcntr9_el0, x12 // encoding: [0x2c,0xe9,0x1b,0xd5]
+// CHECK: msr pmevcntr10_el0, x12 // encoding: [0x4c,0xe9,0x1b,0xd5]
+// CHECK: msr pmevcntr11_el0, x12 // encoding: [0x6c,0xe9,0x1b,0xd5]
+// CHECK: msr pmevcntr12_el0, x12 // encoding: [0x8c,0xe9,0x1b,0xd5]
+// CHECK: msr pmevcntr13_el0, x12 // encoding: [0xac,0xe9,0x1b,0xd5]
+// CHECK: msr pmevcntr14_el0, x12 // encoding: [0xcc,0xe9,0x1b,0xd5]
+// CHECK: msr pmevcntr15_el0, x12 // encoding: [0xec,0xe9,0x1b,0xd5]
+// CHECK: msr pmevcntr16_el0, x12 // encoding: [0x0c,0xea,0x1b,0xd5]
+// CHECK: msr pmevcntr17_el0, x12 // encoding: [0x2c,0xea,0x1b,0xd5]
+// CHECK: msr pmevcntr18_el0, x12 // encoding: [0x4c,0xea,0x1b,0xd5]
+// CHECK: msr pmevcntr19_el0, x12 // encoding: [0x6c,0xea,0x1b,0xd5]
+// CHECK: msr pmevcntr20_el0, x12 // encoding: [0x8c,0xea,0x1b,0xd5]
+// CHECK: msr pmevcntr21_el0, x12 // encoding: [0xac,0xea,0x1b,0xd5]
+// CHECK: msr pmevcntr22_el0, x12 // encoding: [0xcc,0xea,0x1b,0xd5]
+// CHECK: msr pmevcntr23_el0, x12 // encoding: [0xec,0xea,0x1b,0xd5]
+// CHECK: msr pmevcntr24_el0, x12 // encoding: [0x0c,0xeb,0x1b,0xd5]
+// CHECK: msr pmevcntr25_el0, x12 // encoding: [0x2c,0xeb,0x1b,0xd5]
+// CHECK: msr pmevcntr26_el0, x12 // encoding: [0x4c,0xeb,0x1b,0xd5]
+// CHECK: msr pmevcntr27_el0, x12 // encoding: [0x6c,0xeb,0x1b,0xd5]
+// CHECK: msr pmevcntr28_el0, x12 // encoding: [0x8c,0xeb,0x1b,0xd5]
+// CHECK: msr pmevcntr29_el0, x12 // encoding: [0xac,0xeb,0x1b,0xd5]
+// CHECK: msr pmevcntr30_el0, x12 // encoding: [0xcc,0xeb,0x1b,0xd5]
+// CHECK: msr pmccfiltr_el0, x12 // encoding: [0xec,0xef,0x1b,0xd5]
+// CHECK: msr pmevtyper0_el0, x12 // encoding: [0x0c,0xec,0x1b,0xd5]
+// CHECK: msr pmevtyper1_el0, x12 // encoding: [0x2c,0xec,0x1b,0xd5]
+// CHECK: msr pmevtyper2_el0, x12 // encoding: [0x4c,0xec,0x1b,0xd5]
+// CHECK: msr pmevtyper3_el0, x12 // encoding: [0x6c,0xec,0x1b,0xd5]
+// CHECK: msr pmevtyper4_el0, x12 // encoding: [0x8c,0xec,0x1b,0xd5]
+// CHECK: msr pmevtyper5_el0, x12 // encoding: [0xac,0xec,0x1b,0xd5]
+// CHECK: msr pmevtyper6_el0, x12 // encoding: [0xcc,0xec,0x1b,0xd5]
+// CHECK: msr pmevtyper7_el0, x12 // encoding: [0xec,0xec,0x1b,0xd5]
+// CHECK: msr pmevtyper8_el0, x12 // encoding: [0x0c,0xed,0x1b,0xd5]
+// CHECK: msr pmevtyper9_el0, x12 // encoding: [0x2c,0xed,0x1b,0xd5]
+// CHECK: msr pmevtyper10_el0, x12 // encoding: [0x4c,0xed,0x1b,0xd5]
+// CHECK: msr pmevtyper11_el0, x12 // encoding: [0x6c,0xed,0x1b,0xd5]
+// CHECK: msr pmevtyper12_el0, x12 // encoding: [0x8c,0xed,0x1b,0xd5]
+// CHECK: msr pmevtyper13_el0, x12 // encoding: [0xac,0xed,0x1b,0xd5]
+// CHECK: msr pmevtyper14_el0, x12 // encoding: [0xcc,0xed,0x1b,0xd5]
+// CHECK: msr pmevtyper15_el0, x12 // encoding: [0xec,0xed,0x1b,0xd5]
+// CHECK: msr pmevtyper16_el0, x12 // encoding: [0x0c,0xee,0x1b,0xd5]
+// CHECK: msr pmevtyper17_el0, x12 // encoding: [0x2c,0xee,0x1b,0xd5]
+// CHECK: msr pmevtyper18_el0, x12 // encoding: [0x4c,0xee,0x1b,0xd5]
+// CHECK: msr pmevtyper19_el0, x12 // encoding: [0x6c,0xee,0x1b,0xd5]
+// CHECK: msr pmevtyper20_el0, x12 // encoding: [0x8c,0xee,0x1b,0xd5]
+// CHECK: msr pmevtyper21_el0, x12 // encoding: [0xac,0xee,0x1b,0xd5]
+// CHECK: msr pmevtyper22_el0, x12 // encoding: [0xcc,0xee,0x1b,0xd5]
+// CHECK: msr pmevtyper23_el0, x12 // encoding: [0xec,0xee,0x1b,0xd5]
+// CHECK: msr pmevtyper24_el0, x12 // encoding: [0x0c,0xef,0x1b,0xd5]
+// CHECK: msr pmevtyper25_el0, x12 // encoding: [0x2c,0xef,0x1b,0xd5]
+// CHECK: msr pmevtyper26_el0, x12 // encoding: [0x4c,0xef,0x1b,0xd5]
+// CHECK: msr pmevtyper27_el0, x12 // encoding: [0x6c,0xef,0x1b,0xd5]
+// CHECK: msr pmevtyper28_el0, x12 // encoding: [0x8c,0xef,0x1b,0xd5]
+// CHECK: msr pmevtyper29_el0, x12 // encoding: [0xac,0xef,0x1b,0xd5]
+// CHECK: msr pmevtyper30_el0, x12 // encoding: [0xcc,0xef,0x1b,0xd5]
+
+ mrs x9, TEECR32_EL1
+ mrs x9, OSDTRRX_EL1
+ mrs x9, MDCCSR_EL0
+ mrs x9, MDCCINT_EL1
+ mrs x9, MDSCR_EL1
+ mrs x9, OSDTRTX_EL1
+ mrs x9, DBGDTR_EL0
+ mrs x9, DBGDTRRX_EL0
+ mrs x9, OSECCR_EL1
+ mrs x9, DBGVCR32_EL2
+ mrs x9, DBGBVR0_EL1
+ mrs x9, DBGBVR1_EL1
+ mrs x9, DBGBVR2_EL1
+ mrs x9, DBGBVR3_EL1
+ mrs x9, DBGBVR4_EL1
+ mrs x9, DBGBVR5_EL1
+ mrs x9, DBGBVR6_EL1
+ mrs x9, DBGBVR7_EL1
+ mrs x9, DBGBVR8_EL1
+ mrs x9, DBGBVR9_EL1
+ mrs x9, DBGBVR10_EL1
+ mrs x9, DBGBVR11_EL1
+ mrs x9, DBGBVR12_EL1
+ mrs x9, DBGBVR13_EL1
+ mrs x9, DBGBVR14_EL1
+ mrs x9, DBGBVR15_EL1
+ mrs x9, DBGBCR0_EL1
+ mrs x9, DBGBCR1_EL1
+ mrs x9, DBGBCR2_EL1
+ mrs x9, DBGBCR3_EL1
+ mrs x9, DBGBCR4_EL1
+ mrs x9, DBGBCR5_EL1
+ mrs x9, DBGBCR6_EL1
+ mrs x9, DBGBCR7_EL1
+ mrs x9, DBGBCR8_EL1
+ mrs x9, DBGBCR9_EL1
+ mrs x9, DBGBCR10_EL1
+ mrs x9, DBGBCR11_EL1
+ mrs x9, DBGBCR12_EL1
+ mrs x9, DBGBCR13_EL1
+ mrs x9, DBGBCR14_EL1
+ mrs x9, DBGBCR15_EL1
+ mrs x9, DBGWVR0_EL1
+ mrs x9, DBGWVR1_EL1
+ mrs x9, DBGWVR2_EL1
+ mrs x9, DBGWVR3_EL1
+ mrs x9, DBGWVR4_EL1
+ mrs x9, DBGWVR5_EL1
+ mrs x9, DBGWVR6_EL1
+ mrs x9, DBGWVR7_EL1
+ mrs x9, DBGWVR8_EL1
+ mrs x9, DBGWVR9_EL1
+ mrs x9, DBGWVR10_EL1
+ mrs x9, DBGWVR11_EL1
+ mrs x9, DBGWVR12_EL1
+ mrs x9, DBGWVR13_EL1
+ mrs x9, DBGWVR14_EL1
+ mrs x9, DBGWVR15_EL1
+ mrs x9, DBGWCR0_EL1
+ mrs x9, DBGWCR1_EL1
+ mrs x9, DBGWCR2_EL1
+ mrs x9, DBGWCR3_EL1
+ mrs x9, DBGWCR4_EL1
+ mrs x9, DBGWCR5_EL1
+ mrs x9, DBGWCR6_EL1
+ mrs x9, DBGWCR7_EL1
+ mrs x9, DBGWCR8_EL1
+ mrs x9, DBGWCR9_EL1
+ mrs x9, DBGWCR10_EL1
+ mrs x9, DBGWCR11_EL1
+ mrs x9, DBGWCR12_EL1
+ mrs x9, DBGWCR13_EL1
+ mrs x9, DBGWCR14_EL1
+ mrs x9, DBGWCR15_EL1
+ mrs x9, MDRAR_EL1
+ mrs x9, TEEHBR32_EL1
+ mrs x9, OSLSR_EL1
+ mrs x9, OSDLR_EL1
+ mrs x9, DBGPRCR_EL1
+ mrs x9, DBGCLAIMSET_EL1
+ mrs x9, DBGCLAIMCLR_EL1
+ mrs x9, DBGAUTHSTATUS_EL1
+ mrs x9, MIDR_EL1
+ mrs x9, CCSIDR_EL1
+ mrs x9, CSSELR_EL1
+ mrs x9, VPIDR_EL2
+ mrs x9, CLIDR_EL1
+ mrs x9, CTR_EL0
+ mrs x9, MPIDR_EL1
+ mrs x9, VMPIDR_EL2
+ mrs x9, REVIDR_EL1
+ mrs x9, AIDR_EL1
+ mrs x9, DCZID_EL0
+ mrs x9, ID_PFR0_EL1
+ mrs x9, ID_PFR1_EL1
+ mrs x9, ID_DFR0_EL1
+ mrs x9, ID_AFR0_EL1
+ mrs x9, ID_MMFR0_EL1
+ mrs x9, ID_MMFR1_EL1
+ mrs x9, ID_MMFR2_EL1
+ mrs x9, ID_MMFR3_EL1
+ mrs x9, ID_ISAR0_EL1
+ mrs x9, ID_ISAR1_EL1
+ mrs x9, ID_ISAR2_EL1
+ mrs x9, ID_ISAR3_EL1
+ mrs x9, ID_ISAR4_EL1
+ mrs x9, ID_ISAR5_EL1
+ mrs x9, MVFR0_EL1
+ mrs x9, MVFR1_EL1
+ mrs x9, MVFR2_EL1
+ mrs x9, ID_AA64PFR0_EL1
+ mrs x9, ID_AA64PFR1_EL1
+ mrs x9, ID_AA64DFR0_EL1
+ mrs x9, ID_AA64DFR1_EL1
+ mrs x9, ID_AA64AFR0_EL1
+ mrs x9, ID_AA64AFR1_EL1
+ mrs x9, ID_AA64ISAR0_EL1
+ mrs x9, ID_AA64ISAR1_EL1
+ mrs x9, ID_AA64MMFR0_EL1
+ mrs x9, ID_AA64MMFR1_EL1
+ mrs x9, SCTLR_EL1
+ mrs x9, SCTLR_EL2
+ mrs x9, SCTLR_EL3
+ mrs x9, ACTLR_EL1
+ mrs x9, ACTLR_EL2
+ mrs x9, ACTLR_EL3
+ mrs x9, CPACR_EL1
+ mrs x9, HCR_EL2
+ mrs x9, SCR_EL3
+ mrs x9, MDCR_EL2
+ mrs x9, SDER32_EL3
+ mrs x9, CPTR_EL2
+ mrs x9, CPTR_EL3
+ mrs x9, HSTR_EL2
+ mrs x9, HACR_EL2
+ mrs x9, MDCR_EL3
+ mrs x9, TTBR0_EL1
+ mrs x9, TTBR0_EL2
+ mrs x9, TTBR0_EL3
+ mrs x9, TTBR1_EL1
+ mrs x9, TCR_EL1
+ mrs x9, TCR_EL2
+ mrs x9, TCR_EL3
+ mrs x9, VTTBR_EL2
+ mrs x9, VTCR_EL2
+ mrs x9, DACR32_EL2
+ mrs x9, SPSR_EL1
+ mrs x9, SPSR_EL2
+ mrs x9, SPSR_EL3
+ mrs x9, ELR_EL1
+ mrs x9, ELR_EL2
+ mrs x9, ELR_EL3
+ mrs x9, SP_EL0
+ mrs x9, SP_EL1
+ mrs x9, SP_EL2
+ mrs x9, SPSel
+ mrs x9, NZCV
+ mrs x9, DAIF
+ mrs x9, CurrentEL
+ mrs x9, SPSR_irq
+ mrs x9, SPSR_abt
+ mrs x9, SPSR_und
+ mrs x9, SPSR_fiq
+ mrs x9, FPCR
+ mrs x9, FPSR
+ mrs x9, DSPSR_EL0
+ mrs x9, DLR_EL0
+ mrs x9, IFSR32_EL2
+ mrs x9, AFSR0_EL1
+ mrs x9, AFSR0_EL2
+ mrs x9, AFSR0_EL3
+ mrs x9, AFSR1_EL1
+ mrs x9, AFSR1_EL2
+ mrs x9, AFSR1_EL3
+ mrs x9, ESR_EL1
+ mrs x9, ESR_EL2
+ mrs x9, ESR_EL3
+ mrs x9, FPEXC32_EL2
+ mrs x9, FAR_EL1
+ mrs x9, FAR_EL2
+ mrs x9, FAR_EL3
+ mrs x9, HPFAR_EL2
+ mrs x9, PAR_EL1
+ mrs x9, PMCR_EL0
+ mrs x9, PMCNTENSET_EL0
+ mrs x9, PMCNTENCLR_EL0
+ mrs x9, PMOVSCLR_EL0
+ mrs x9, PMSELR_EL0
+ mrs x9, PMCEID0_EL0
+ mrs x9, PMCEID1_EL0
+ mrs x9, PMCCNTR_EL0
+ mrs x9, PMXEVTYPER_EL0
+ mrs x9, PMXEVCNTR_EL0
+ mrs x9, PMUSERENR_EL0
+ mrs x9, PMINTENSET_EL1
+ mrs x9, PMINTENCLR_EL1
+ mrs x9, PMOVSSET_EL0
+ mrs x9, MAIR_EL1
+ mrs x9, MAIR_EL2
+ mrs x9, MAIR_EL3
+ mrs x9, AMAIR_EL1
+ mrs x9, AMAIR_EL2
+ mrs x9, AMAIR_EL3
+ mrs x9, VBAR_EL1
+ mrs x9, VBAR_EL2
+ mrs x9, VBAR_EL3
+ mrs x9, RVBAR_EL1
+ mrs x9, RVBAR_EL2
+ mrs x9, RVBAR_EL3
+ mrs x9, RMR_EL1
+ mrs x9, RMR_EL2
+ mrs x9, RMR_EL3
+ mrs x9, ISR_EL1
+ mrs x9, CONTEXTIDR_EL1
+ mrs x9, TPIDR_EL0
+ mrs x9, TPIDR_EL2
+ mrs x9, TPIDR_EL3
+ mrs x9, TPIDRRO_EL0
+ mrs x9, TPIDR_EL1
+ mrs x9, CNTFRQ_EL0
+ mrs x9, CNTPCT_EL0
+ mrs x9, CNTVCT_EL0
+ mrs x9, CNTVOFF_EL2
+ mrs x9, CNTKCTL_EL1
+ mrs x9, CNTHCTL_EL2
+ mrs x9, CNTP_TVAL_EL0
+ mrs x9, CNTHP_TVAL_EL2
+ mrs x9, CNTPS_TVAL_EL1
+ mrs x9, CNTP_CTL_EL0
+ mrs x9, CNTHP_CTL_EL2
+ mrs x9, CNTPS_CTL_EL1
+ mrs x9, CNTP_CVAL_EL0
+ mrs x9, CNTHP_CVAL_EL2
+ mrs x9, CNTPS_CVAL_EL1
+ mrs x9, CNTV_TVAL_EL0
+ mrs x9, CNTV_CTL_EL0
+ mrs x9, CNTV_CVAL_EL0
+ mrs x9, PMEVCNTR0_EL0
+ mrs x9, PMEVCNTR1_EL0
+ mrs x9, PMEVCNTR2_EL0
+ mrs x9, PMEVCNTR3_EL0
+ mrs x9, PMEVCNTR4_EL0
+ mrs x9, PMEVCNTR5_EL0
+ mrs x9, PMEVCNTR6_EL0
+ mrs x9, PMEVCNTR7_EL0
+ mrs x9, PMEVCNTR8_EL0
+ mrs x9, PMEVCNTR9_EL0
+ mrs x9, PMEVCNTR10_EL0
+ mrs x9, PMEVCNTR11_EL0
+ mrs x9, PMEVCNTR12_EL0
+ mrs x9, PMEVCNTR13_EL0
+ mrs x9, PMEVCNTR14_EL0
+ mrs x9, PMEVCNTR15_EL0
+ mrs x9, PMEVCNTR16_EL0
+ mrs x9, PMEVCNTR17_EL0
+ mrs x9, PMEVCNTR18_EL0
+ mrs x9, PMEVCNTR19_EL0
+ mrs x9, PMEVCNTR20_EL0
+ mrs x9, PMEVCNTR21_EL0
+ mrs x9, PMEVCNTR22_EL0
+ mrs x9, PMEVCNTR23_EL0
+ mrs x9, PMEVCNTR24_EL0
+ mrs x9, PMEVCNTR25_EL0
+ mrs x9, PMEVCNTR26_EL0
+ mrs x9, PMEVCNTR27_EL0
+ mrs x9, PMEVCNTR28_EL0
+ mrs x9, PMEVCNTR29_EL0
+ mrs x9, PMEVCNTR30_EL0
+ mrs x9, PMCCFILTR_EL0
+ mrs x9, PMEVTYPER0_EL0
+ mrs x9, PMEVTYPER1_EL0
+ mrs x9, PMEVTYPER2_EL0
+ mrs x9, PMEVTYPER3_EL0
+ mrs x9, PMEVTYPER4_EL0
+ mrs x9, PMEVTYPER5_EL0
+ mrs x9, PMEVTYPER6_EL0
+ mrs x9, PMEVTYPER7_EL0
+ mrs x9, PMEVTYPER8_EL0
+ mrs x9, PMEVTYPER9_EL0
+ mrs x9, PMEVTYPER10_EL0
+ mrs x9, PMEVTYPER11_EL0
+ mrs x9, PMEVTYPER12_EL0
+ mrs x9, PMEVTYPER13_EL0
+ mrs x9, PMEVTYPER14_EL0
+ mrs x9, PMEVTYPER15_EL0
+ mrs x9, PMEVTYPER16_EL0
+ mrs x9, PMEVTYPER17_EL0
+ mrs x9, PMEVTYPER18_EL0
+ mrs x9, PMEVTYPER19_EL0
+ mrs x9, PMEVTYPER20_EL0
+ mrs x9, PMEVTYPER21_EL0
+ mrs x9, PMEVTYPER22_EL0
+ mrs x9, PMEVTYPER23_EL0
+ mrs x9, PMEVTYPER24_EL0
+ mrs x9, PMEVTYPER25_EL0
+ mrs x9, PMEVTYPER26_EL0
+ mrs x9, PMEVTYPER27_EL0
+ mrs x9, PMEVTYPER28_EL0
+ mrs x9, PMEVTYPER29_EL0
+ mrs x9, PMEVTYPER30_EL0
+// CHECK: mrs x9, teecr32_el1 // encoding: [0x09,0x00,0x32,0xd5]
+// CHECK: mrs x9, osdtrrx_el1 // encoding: [0x49,0x00,0x30,0xd5]
+// CHECK: mrs x9, mdccsr_el0 // encoding: [0x09,0x01,0x33,0xd5]
+// CHECK: mrs x9, mdccint_el1 // encoding: [0x09,0x02,0x30,0xd5]
+// CHECK: mrs x9, mdscr_el1 // encoding: [0x49,0x02,0x30,0xd5]
+// CHECK: mrs x9, osdtrtx_el1 // encoding: [0x49,0x03,0x30,0xd5]
+// CHECK: mrs x9, dbgdtr_el0 // encoding: [0x09,0x04,0x33,0xd5]
+// CHECK: mrs x9, dbgdtrrx_el0 // encoding: [0x09,0x05,0x33,0xd5]
+// CHECK: mrs x9, oseccr_el1 // encoding: [0x49,0x06,0x30,0xd5]
+// CHECK: mrs x9, dbgvcr32_el2 // encoding: [0x09,0x07,0x34,0xd5]
+// CHECK: mrs x9, dbgbvr0_el1 // encoding: [0x89,0x00,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr1_el1 // encoding: [0x89,0x01,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr2_el1 // encoding: [0x89,0x02,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr3_el1 // encoding: [0x89,0x03,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr4_el1 // encoding: [0x89,0x04,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr5_el1 // encoding: [0x89,0x05,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr6_el1 // encoding: [0x89,0x06,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr7_el1 // encoding: [0x89,0x07,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr8_el1 // encoding: [0x89,0x08,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr9_el1 // encoding: [0x89,0x09,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr10_el1 // encoding: [0x89,0x0a,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr11_el1 // encoding: [0x89,0x0b,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr12_el1 // encoding: [0x89,0x0c,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr13_el1 // encoding: [0x89,0x0d,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr14_el1 // encoding: [0x89,0x0e,0x30,0xd5]
+// CHECK: mrs x9, dbgbvr15_el1 // encoding: [0x89,0x0f,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr0_el1 // encoding: [0xa9,0x00,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr1_el1 // encoding: [0xa9,0x01,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr2_el1 // encoding: [0xa9,0x02,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr3_el1 // encoding: [0xa9,0x03,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr4_el1 // encoding: [0xa9,0x04,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr5_el1 // encoding: [0xa9,0x05,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr6_el1 // encoding: [0xa9,0x06,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr7_el1 // encoding: [0xa9,0x07,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr8_el1 // encoding: [0xa9,0x08,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr9_el1 // encoding: [0xa9,0x09,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr10_el1 // encoding: [0xa9,0x0a,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr11_el1 // encoding: [0xa9,0x0b,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr12_el1 // encoding: [0xa9,0x0c,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr13_el1 // encoding: [0xa9,0x0d,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr14_el1 // encoding: [0xa9,0x0e,0x30,0xd5]
+// CHECK: mrs x9, dbgbcr15_el1 // encoding: [0xa9,0x0f,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr0_el1 // encoding: [0xc9,0x00,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr1_el1 // encoding: [0xc9,0x01,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr2_el1 // encoding: [0xc9,0x02,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr3_el1 // encoding: [0xc9,0x03,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr4_el1 // encoding: [0xc9,0x04,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr5_el1 // encoding: [0xc9,0x05,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr6_el1 // encoding: [0xc9,0x06,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr7_el1 // encoding: [0xc9,0x07,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr8_el1 // encoding: [0xc9,0x08,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr9_el1 // encoding: [0xc9,0x09,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr10_el1 // encoding: [0xc9,0x0a,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr11_el1 // encoding: [0xc9,0x0b,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr12_el1 // encoding: [0xc9,0x0c,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr13_el1 // encoding: [0xc9,0x0d,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr14_el1 // encoding: [0xc9,0x0e,0x30,0xd5]
+// CHECK: mrs x9, dbgwvr15_el1 // encoding: [0xc9,0x0f,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr0_el1 // encoding: [0xe9,0x00,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr1_el1 // encoding: [0xe9,0x01,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr2_el1 // encoding: [0xe9,0x02,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr3_el1 // encoding: [0xe9,0x03,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr4_el1 // encoding: [0xe9,0x04,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr5_el1 // encoding: [0xe9,0x05,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr6_el1 // encoding: [0xe9,0x06,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr7_el1 // encoding: [0xe9,0x07,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr8_el1 // encoding: [0xe9,0x08,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr9_el1 // encoding: [0xe9,0x09,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr10_el1 // encoding: [0xe9,0x0a,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr11_el1 // encoding: [0xe9,0x0b,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr12_el1 // encoding: [0xe9,0x0c,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr13_el1 // encoding: [0xe9,0x0d,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr14_el1 // encoding: [0xe9,0x0e,0x30,0xd5]
+// CHECK: mrs x9, dbgwcr15_el1 // encoding: [0xe9,0x0f,0x30,0xd5]
+// CHECK: mrs x9, mdrar_el1 // encoding: [0x09,0x10,0x30,0xd5]
+// CHECK: mrs x9, teehbr32_el1 // encoding: [0x09,0x10,0x32,0xd5]
+// CHECK: mrs x9, oslsr_el1 // encoding: [0x89,0x11,0x30,0xd5]
+// CHECK: mrs x9, osdlr_el1 // encoding: [0x89,0x13,0x30,0xd5]
+// CHECK: mrs x9, dbgprcr_el1 // encoding: [0x89,0x14,0x30,0xd5]
+// CHECK: mrs x9, dbgclaimset_el1 // encoding: [0xc9,0x78,0x30,0xd5]
+// CHECK: mrs x9, dbgclaimclr_el1 // encoding: [0xc9,0x79,0x30,0xd5]
+// CHECK: mrs x9, dbgauthstatus_el1 // encoding: [0xc9,0x7e,0x30,0xd5]
+// CHECK: mrs x9, midr_el1 // encoding: [0x09,0x00,0x38,0xd5]
+// CHECK: mrs x9, ccsidr_el1 // encoding: [0x09,0x00,0x39,0xd5]
+// CHECK: mrs x9, csselr_el1 // encoding: [0x09,0x00,0x3a,0xd5]
+// CHECK: mrs x9, vpidr_el2 // encoding: [0x09,0x00,0x3c,0xd5]
+// CHECK: mrs x9, clidr_el1 // encoding: [0x29,0x00,0x39,0xd5]
+// CHECK: mrs x9, ctr_el0 // encoding: [0x29,0x00,0x3b,0xd5]
+// CHECK: mrs x9, mpidr_el1 // encoding: [0xa9,0x00,0x38,0xd5]
+// CHECK: mrs x9, vmpidr_el2 // encoding: [0xa9,0x00,0x3c,0xd5]
+// CHECK: mrs x9, revidr_el1 // encoding: [0xc9,0x00,0x38,0xd5]
+// CHECK: mrs x9, aidr_el1 // encoding: [0xe9,0x00,0x39,0xd5]
+// CHECK: mrs x9, dczid_el0 // encoding: [0xe9,0x00,0x3b,0xd5]
+// CHECK: mrs x9, id_pfr0_el1 // encoding: [0x09,0x01,0x38,0xd5]
+// CHECK: mrs x9, id_pfr1_el1 // encoding: [0x29,0x01,0x38,0xd5]
+// CHECK: mrs x9, id_dfr0_el1 // encoding: [0x49,0x01,0x38,0xd5]
+// CHECK: mrs x9, id_afr0_el1 // encoding: [0x69,0x01,0x38,0xd5]
+// CHECK: mrs x9, id_mmfr0_el1 // encoding: [0x89,0x01,0x38,0xd5]
+// CHECK: mrs x9, id_mmfr1_el1 // encoding: [0xa9,0x01,0x38,0xd5]
+// CHECK: mrs x9, id_mmfr2_el1 // encoding: [0xc9,0x01,0x38,0xd5]
+// CHECK: mrs x9, id_mmfr3_el1 // encoding: [0xe9,0x01,0x38,0xd5]
+// CHECK: mrs x9, id_isar0_el1 // encoding: [0x09,0x02,0x38,0xd5]
+// CHECK: mrs x9, id_isar1_el1 // encoding: [0x29,0x02,0x38,0xd5]
+// CHECK: mrs x9, id_isar2_el1 // encoding: [0x49,0x02,0x38,0xd5]
+// CHECK: mrs x9, id_isar3_el1 // encoding: [0x69,0x02,0x38,0xd5]
+// CHECK: mrs x9, id_isar4_el1 // encoding: [0x89,0x02,0x38,0xd5]
+// CHECK: mrs x9, id_isar5_el1 // encoding: [0xa9,0x02,0x38,0xd5]
+// CHECK: mrs x9, mvfr0_el1 // encoding: [0x09,0x03,0x38,0xd5]
+// CHECK: mrs x9, mvfr1_el1 // encoding: [0x29,0x03,0x38,0xd5]
+// CHECK: mrs x9, mvfr2_el1 // encoding: [0x49,0x03,0x38,0xd5]
+// CHECK: mrs x9, id_aa64pfr0_el1 // encoding: [0x09,0x04,0x38,0xd5]
+// CHECK: mrs x9, id_aa64pfr1_el1 // encoding: [0x29,0x04,0x38,0xd5]
+// CHECK: mrs x9, id_aa64dfr0_el1 // encoding: [0x09,0x05,0x38,0xd5]
+// CHECK: mrs x9, id_aa64dfr1_el1 // encoding: [0x29,0x05,0x38,0xd5]
+// CHECK: mrs x9, id_aa64afr0_el1 // encoding: [0x89,0x05,0x38,0xd5]
+// CHECK: mrs x9, id_aa64afr1_el1 // encoding: [0xa9,0x05,0x38,0xd5]
+// CHECK: mrs x9, id_aa64isar0_el1 // encoding: [0x09,0x06,0x38,0xd5]
+// CHECK: mrs x9, id_aa64isar1_el1 // encoding: [0x29,0x06,0x38,0xd5]
+// CHECK: mrs x9, id_aa64mmfr0_el1 // encoding: [0x09,0x07,0x38,0xd5]
+// CHECK: mrs x9, id_aa64mmfr1_el1 // encoding: [0x29,0x07,0x38,0xd5]
+// CHECK: mrs x9, sctlr_el1 // encoding: [0x09,0x10,0x38,0xd5]
+// CHECK: mrs x9, sctlr_el2 // encoding: [0x09,0x10,0x3c,0xd5]
+// CHECK: mrs x9, sctlr_el3 // encoding: [0x09,0x10,0x3e,0xd5]
+// CHECK: mrs x9, actlr_el1 // encoding: [0x29,0x10,0x38,0xd5]
+// CHECK: mrs x9, actlr_el2 // encoding: [0x29,0x10,0x3c,0xd5]
+// CHECK: mrs x9, actlr_el3 // encoding: [0x29,0x10,0x3e,0xd5]
+// CHECK: mrs x9, cpacr_el1 // encoding: [0x49,0x10,0x38,0xd5]
+// CHECK: mrs x9, hcr_el2 // encoding: [0x09,0x11,0x3c,0xd5]
+// CHECK: mrs x9, scr_el3 // encoding: [0x09,0x11,0x3e,0xd5]
+// CHECK: mrs x9, mdcr_el2 // encoding: [0x29,0x11,0x3c,0xd5]
+// CHECK: mrs x9, sder32_el3 // encoding: [0x29,0x11,0x3e,0xd5]
+// CHECK: mrs x9, cptr_el2 // encoding: [0x49,0x11,0x3c,0xd5]
+// CHECK: mrs x9, cptr_el3 // encoding: [0x49,0x11,0x3e,0xd5]
+// CHECK: mrs x9, hstr_el2 // encoding: [0x69,0x11,0x3c,0xd5]
+// CHECK: mrs x9, hacr_el2 // encoding: [0xe9,0x11,0x3c,0xd5]
+// CHECK: mrs x9, mdcr_el3 // encoding: [0x29,0x13,0x3e,0xd5]
+// CHECK: mrs x9, ttbr0_el1 // encoding: [0x09,0x20,0x38,0xd5]
+// CHECK: mrs x9, ttbr0_el2 // encoding: [0x09,0x20,0x3c,0xd5]
+// CHECK: mrs x9, ttbr0_el3 // encoding: [0x09,0x20,0x3e,0xd5]
+// CHECK: mrs x9, ttbr1_el1 // encoding: [0x29,0x20,0x38,0xd5]
+// CHECK: mrs x9, tcr_el1 // encoding: [0x49,0x20,0x38,0xd5]
+// CHECK: mrs x9, tcr_el2 // encoding: [0x49,0x20,0x3c,0xd5]
+// CHECK: mrs x9, tcr_el3 // encoding: [0x49,0x20,0x3e,0xd5]
+// CHECK: mrs x9, vttbr_el2 // encoding: [0x09,0x21,0x3c,0xd5]
+// CHECK: mrs x9, vtcr_el2 // encoding: [0x49,0x21,0x3c,0xd5]
+// CHECK: mrs x9, dacr32_el2 // encoding: [0x09,0x30,0x3c,0xd5]
+// CHECK: mrs x9, spsr_el1 // encoding: [0x09,0x40,0x38,0xd5]
+// CHECK: mrs x9, spsr_el2 // encoding: [0x09,0x40,0x3c,0xd5]
+// CHECK: mrs x9, spsr_el3 // encoding: [0x09,0x40,0x3e,0xd5]
+// CHECK: mrs x9, elr_el1 // encoding: [0x29,0x40,0x38,0xd5]
+// CHECK: mrs x9, elr_el2 // encoding: [0x29,0x40,0x3c,0xd5]
+// CHECK: mrs x9, elr_el3 // encoding: [0x29,0x40,0x3e,0xd5]
+// CHECK: mrs x9, sp_el0 // encoding: [0x09,0x41,0x38,0xd5]
+// CHECK: mrs x9, sp_el1 // encoding: [0x09,0x41,0x3c,0xd5]
+// CHECK: mrs x9, sp_el2 // encoding: [0x09,0x41,0x3e,0xd5]
+// CHECK: mrs x9, spsel // encoding: [0x09,0x42,0x38,0xd5]
+// CHECK: mrs x9, nzcv // encoding: [0x09,0x42,0x3b,0xd5]
+// CHECK: mrs x9, daif // encoding: [0x29,0x42,0x3b,0xd5]
+// CHECK: mrs x9, currentel // encoding: [0x49,0x42,0x38,0xd5]
+// CHECK: mrs x9, spsr_irq // encoding: [0x09,0x43,0x3c,0xd5]
+// CHECK: mrs x9, spsr_abt // encoding: [0x29,0x43,0x3c,0xd5]
+// CHECK: mrs x9, spsr_und // encoding: [0x49,0x43,0x3c,0xd5]
+// CHECK: mrs x9, spsr_fiq // encoding: [0x69,0x43,0x3c,0xd5]
+// CHECK: mrs x9, fpcr // encoding: [0x09,0x44,0x3b,0xd5]
+// CHECK: mrs x9, fpsr // encoding: [0x29,0x44,0x3b,0xd5]
+// CHECK: mrs x9, dspsr_el0 // encoding: [0x09,0x45,0x3b,0xd5]
+// CHECK: mrs x9, dlr_el0 // encoding: [0x29,0x45,0x3b,0xd5]
+// CHECK: mrs x9, ifsr32_el2 // encoding: [0x29,0x50,0x3c,0xd5]
+// CHECK: mrs x9, afsr0_el1 // encoding: [0x09,0x51,0x38,0xd5]
+// CHECK: mrs x9, afsr0_el2 // encoding: [0x09,0x51,0x3c,0xd5]
+// CHECK: mrs x9, afsr0_el3 // encoding: [0x09,0x51,0x3e,0xd5]
+// CHECK: mrs x9, afsr1_el1 // encoding: [0x29,0x51,0x38,0xd5]
+// CHECK: mrs x9, afsr1_el2 // encoding: [0x29,0x51,0x3c,0xd5]
+// CHECK: mrs x9, afsr1_el3 // encoding: [0x29,0x51,0x3e,0xd5]
+// CHECK: mrs x9, esr_el1 // encoding: [0x09,0x52,0x38,0xd5]
+// CHECK: mrs x9, esr_el2 // encoding: [0x09,0x52,0x3c,0xd5]
+// CHECK: mrs x9, esr_el3 // encoding: [0x09,0x52,0x3e,0xd5]
+// CHECK: mrs x9, fpexc32_el2 // encoding: [0x09,0x53,0x3c,0xd5]
+// CHECK: mrs x9, far_el1 // encoding: [0x09,0x60,0x38,0xd5]
+// CHECK: mrs x9, far_el2 // encoding: [0x09,0x60,0x3c,0xd5]
+// CHECK: mrs x9, far_el3 // encoding: [0x09,0x60,0x3e,0xd5]
+// CHECK: mrs x9, hpfar_el2 // encoding: [0x89,0x60,0x3c,0xd5]
+// CHECK: mrs x9, par_el1 // encoding: [0x09,0x74,0x38,0xd5]
+// CHECK: mrs x9, pmcr_el0 // encoding: [0x09,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, pmcntenset_el0 // encoding: [0x29,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, pmcntenclr_el0 // encoding: [0x49,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, pmovsclr_el0 // encoding: [0x69,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, pmselr_el0 // encoding: [0xa9,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, pmceid0_el0 // encoding: [0xc9,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, pmceid1_el0 // encoding: [0xe9,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, pmccntr_el0 // encoding: [0x09,0x9d,0x3b,0xd5]
+// CHECK: mrs x9, pmxevtyper_el0 // encoding: [0x29,0x9d,0x3b,0xd5]
+// CHECK: mrs x9, pmxevcntr_el0 // encoding: [0x49,0x9d,0x3b,0xd5]
+// CHECK: mrs x9, pmuserenr_el0 // encoding: [0x09,0x9e,0x3b,0xd5]
+// CHECK: mrs x9, pmintenset_el1 // encoding: [0x29,0x9e,0x38,0xd5]
+// CHECK: mrs x9, pmintenclr_el1 // encoding: [0x49,0x9e,0x38,0xd5]
+// CHECK: mrs x9, pmovsset_el0 // encoding: [0x69,0x9e,0x3b,0xd5]
+// CHECK: mrs x9, mair_el1 // encoding: [0x09,0xa2,0x38,0xd5]
+// CHECK: mrs x9, mair_el2 // encoding: [0x09,0xa2,0x3c,0xd5]
+// CHECK: mrs x9, mair_el3 // encoding: [0x09,0xa2,0x3e,0xd5]
+// CHECK: mrs x9, amair_el1 // encoding: [0x09,0xa3,0x38,0xd5]
+// CHECK: mrs x9, amair_el2 // encoding: [0x09,0xa3,0x3c,0xd5]
+// CHECK: mrs x9, amair_el3 // encoding: [0x09,0xa3,0x3e,0xd5]
+// CHECK: mrs x9, vbar_el1 // encoding: [0x09,0xc0,0x38,0xd5]
+// CHECK: mrs x9, vbar_el2 // encoding: [0x09,0xc0,0x3c,0xd5]
+// CHECK: mrs x9, vbar_el3 // encoding: [0x09,0xc0,0x3e,0xd5]
+// CHECK: mrs x9, rvbar_el1 // encoding: [0x29,0xc0,0x38,0xd5]
+// CHECK: mrs x9, rvbar_el2 // encoding: [0x29,0xc0,0x3c,0xd5]
+// CHECK: mrs x9, rvbar_el3 // encoding: [0x29,0xc0,0x3e,0xd5]
+// CHECK: mrs x9, rmr_el1 // encoding: [0x49,0xc0,0x38,0xd5]
+// CHECK: mrs x9, rmr_el2 // encoding: [0x49,0xc0,0x3c,0xd5]
+// CHECK: mrs x9, rmr_el3 // encoding: [0x49,0xc0,0x3e,0xd5]
+// CHECK: mrs x9, isr_el1 // encoding: [0x09,0xc1,0x38,0xd5]
+// CHECK: mrs x9, contextidr_el1 // encoding: [0x29,0xd0,0x38,0xd5]
+// CHECK: mrs x9, tpidr_el0 // encoding: [0x49,0xd0,0x3b,0xd5]
+// CHECK: mrs x9, tpidr_el2 // encoding: [0x49,0xd0,0x3c,0xd5]
+// CHECK: mrs x9, tpidr_el3 // encoding: [0x49,0xd0,0x3e,0xd5]
+// CHECK: mrs x9, tpidrro_el0 // encoding: [0x69,0xd0,0x3b,0xd5]
+// CHECK: mrs x9, tpidr_el1 // encoding: [0x89,0xd0,0x38,0xd5]
+// CHECK: mrs x9, cntfrq_el0 // encoding: [0x09,0xe0,0x3b,0xd5]
+// CHECK: mrs x9, cntpct_el0 // encoding: [0x29,0xe0,0x3b,0xd5]
+// CHECK: mrs x9, cntvct_el0 // encoding: [0x49,0xe0,0x3b,0xd5]
+// CHECK: mrs x9, cntvoff_el2 // encoding: [0x69,0xe0,0x3c,0xd5]
+// CHECK: mrs x9, cntkctl_el1 // encoding: [0x09,0xe1,0x38,0xd5]
+// CHECK: mrs x9, cnthctl_el2 // encoding: [0x09,0xe1,0x3c,0xd5]
+// CHECK: mrs x9, cntp_tval_el0 // encoding: [0x09,0xe2,0x3b,0xd5]
+// CHECK: mrs x9, cnthp_tval_el2 // encoding: [0x09,0xe2,0x3c,0xd5]
+// CHECK: mrs x9, cntps_tval_el1 // encoding: [0x09,0xe2,0x3f,0xd5]
+// CHECK: mrs x9, cntp_ctl_el0 // encoding: [0x29,0xe2,0x3b,0xd5]
+// CHECK: mrs x9, cnthp_ctl_el2 // encoding: [0x29,0xe2,0x3c,0xd5]
+// CHECK: mrs x9, cntps_ctl_el1 // encoding: [0x29,0xe2,0x3f,0xd5]
+// CHECK: mrs x9, cntp_cval_el0 // encoding: [0x49,0xe2,0x3b,0xd5]
+// CHECK: mrs x9, cnthp_cval_el2 // encoding: [0x49,0xe2,0x3c,0xd5]
+// CHECK: mrs x9, cntps_cval_el1 // encoding: [0x49,0xe2,0x3f,0xd5]
+// CHECK: mrs x9, cntv_tval_el0 // encoding: [0x09,0xe3,0x3b,0xd5]
+// CHECK: mrs x9, cntv_ctl_el0 // encoding: [0x29,0xe3,0x3b,0xd5]
+// CHECK: mrs x9, cntv_cval_el0 // encoding: [0x49,0xe3,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr0_el0 // encoding: [0x09,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr1_el0 // encoding: [0x29,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr2_el0 // encoding: [0x49,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr3_el0 // encoding: [0x69,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr4_el0 // encoding: [0x89,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr5_el0 // encoding: [0xa9,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr6_el0 // encoding: [0xc9,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr7_el0 // encoding: [0xe9,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr8_el0 // encoding: [0x09,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr9_el0 // encoding: [0x29,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr10_el0 // encoding: [0x49,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr11_el0 // encoding: [0x69,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr12_el0 // encoding: [0x89,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr13_el0 // encoding: [0xa9,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr14_el0 // encoding: [0xc9,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr15_el0 // encoding: [0xe9,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr16_el0 // encoding: [0x09,0xea,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr17_el0 // encoding: [0x29,0xea,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr18_el0 // encoding: [0x49,0xea,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr19_el0 // encoding: [0x69,0xea,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr20_el0 // encoding: [0x89,0xea,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr21_el0 // encoding: [0xa9,0xea,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr22_el0 // encoding: [0xc9,0xea,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr23_el0 // encoding: [0xe9,0xea,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr24_el0 // encoding: [0x09,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr25_el0 // encoding: [0x29,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr26_el0 // encoding: [0x49,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr27_el0 // encoding: [0x69,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr28_el0 // encoding: [0x89,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr29_el0 // encoding: [0xa9,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, pmevcntr30_el0 // encoding: [0xc9,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, pmccfiltr_el0 // encoding: [0xe9,0xef,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper0_el0 // encoding: [0x09,0xec,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper1_el0 // encoding: [0x29,0xec,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper2_el0 // encoding: [0x49,0xec,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper3_el0 // encoding: [0x69,0xec,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper4_el0 // encoding: [0x89,0xec,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper5_el0 // encoding: [0xa9,0xec,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper6_el0 // encoding: [0xc9,0xec,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper7_el0 // encoding: [0xe9,0xec,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper8_el0 // encoding: [0x09,0xed,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper9_el0 // encoding: [0x29,0xed,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper10_el0 // encoding: [0x49,0xed,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper11_el0 // encoding: [0x69,0xed,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper12_el0 // encoding: [0x89,0xed,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper13_el0 // encoding: [0xa9,0xed,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper14_el0 // encoding: [0xc9,0xed,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper15_el0 // encoding: [0xe9,0xed,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper16_el0 // encoding: [0x09,0xee,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper17_el0 // encoding: [0x29,0xee,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper18_el0 // encoding: [0x49,0xee,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper19_el0 // encoding: [0x69,0xee,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper20_el0 // encoding: [0x89,0xee,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper21_el0 // encoding: [0xa9,0xee,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper22_el0 // encoding: [0xc9,0xee,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper23_el0 // encoding: [0xe9,0xee,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper24_el0 // encoding: [0x09,0xef,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper25_el0 // encoding: [0x29,0xef,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper26_el0 // encoding: [0x49,0xef,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper27_el0 // encoding: [0x69,0xef,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper28_el0 // encoding: [0x89,0xef,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper29_el0 // encoding: [0xa9,0xef,0x3b,0xd5]
+// CHECK: mrs x9, pmevtyper30_el0 // encoding: [0xc9,0xef,0x3b,0xd5]
+
+ mrs x12, s3_7_c15_c1_5
+ mrs x13, s3_2_c11_c15_7
+ msr s3_0_c15_c0_0, x12
+ msr s3_7_c11_c13_7, x5
+// CHECK: mrs x12, s3_7_c15_c1_5 // encoding: [0xac,0xf1,0x3f,0xd5]
+// CHECK: mrs x13, s3_2_c11_c15_7 // encoding: [0xed,0xbf,0x3a,0xd5]
+// CHECK: msr s3_0_c15_c0_0, x12 // encoding: [0x0c,0xf0,0x18,0xd5]
+// CHECK: msr s3_7_c11_c13_7, x5 // encoding: [0xe5,0xbd,0x1f,0xd5]
+
+//------------------------------------------------------------------------------
+// Unconditional branch (immediate)
+//------------------------------------------------------------------------------
+
+ tbz x5, #0, somewhere
+ tbz xzr, #63, elsewhere
+ tbnz x5, #45, nowhere
+// CHECK: tbz x5, #0, somewhere // encoding: [0x05'A',A,A,0x36'A']
+// CHECK: // fixup A - offset: 0, value: somewhere, kind: fixup_a64_tstbr
+// CHECK: tbz xzr, #63, elsewhere // encoding: [0x1f'A',A,0xf8'A',0xb6'A']
+// CHECK: // fixup A - offset: 0, value: elsewhere, kind: fixup_a64_tstbr
+// CHECK: tbnz x5, #45, nowhere // encoding: [0x05'A',A,0x68'A',0xb7'A']
+// CHECK: // fixup A - offset: 0, value: nowhere, kind: fixup_a64_tstbr
+
+ tbnz w3, #2, there
+ tbnz wzr, #31, nowhere
+ tbz w5, #12, anywhere
+// CHECK: tbnz w3, #2, there // encoding: [0x03'A',A,0x10'A',0x37'A']
+// CHECK: // fixup A - offset: 0, value: there, kind: fixup_a64_tstbr
+// CHECK: tbnz wzr, #31, nowhere // encoding: [0x1f'A',A,0xf8'A',0x37'A']
+// CHECK: // fixup A - offset: 0, value: nowhere, kind: fixup_a64_tstbr
+// CHECK: tbz w5, #12, anywhere // encoding: [0x05'A',A,0x60'A',0x36'A']
+// CHECK: // fixup A - offset: 0, value: anywhere, kind: fixup_a64_tstbr
+
+//------------------------------------------------------------------------------
+// Unconditional branch (immediate)
+//------------------------------------------------------------------------------
+
+ b somewhere
+ bl elsewhere
+// CHECK: b somewhere // encoding: [A,A,A,0x14'A']
+// CHECK: // fixup A - offset: 0, value: somewhere, kind: fixup_a64_uncondbr
+// CHECK: bl elsewhere // encoding: [A,A,A,0x94'A']
+// CHECK: // fixup A - offset: 0, value: elsewhere, kind: fixup_a64_call
+
+ b #4
+ bl #0
+ b #134217724
+ bl #-134217728
+// CHECK: b #4 // encoding: [0x01,0x00,0x00,0x14]
+// CHECK: bl #0 // encoding: [0x00,0x00,0x00,0x94]
+// CHECK: b #134217724 // encoding: [0xff,0xff,0xff,0x15]
+// CHECK: bl #-134217728 // encoding: [0x00,0x00,0x00,0x96]
+
+//------------------------------------------------------------------------------
+// Unconditional branch (register)
+//------------------------------------------------------------------------------
+
+ br x20
+ blr xzr
+ ret x10
+// CHECK: br x20 // encoding: [0x80,0x02,0x1f,0xd6]
+// CHECK: blr xzr // encoding: [0xe0,0x03,0x3f,0xd6]
+// CHECK: ret x10 // encoding: [0x40,0x01,0x5f,0xd6]
+
+ ret
+ eret
+ drps
+// CHECK: ret // encoding: [0xc0,0x03,0x5f,0xd6]
+// CHECK: eret // encoding: [0xe0,0x03,0x9f,0xd6]
+// CHECK: drps // encoding: [0xe0,0x03,0xbf,0xd6]
+
diff --git a/test/MC/AArch64/elf-globaladdress.ll b/test/MC/AArch64/elf-globaladdress.ll
new file mode 100644
index 0000000000..3ca361d9f2
--- /dev/null
+++ b/test/MC/AArch64/elf-globaladdress.ll
@@ -0,0 +1,111 @@
+;; RUN: llc -march=aarch64 -filetype=obj %s -o - | \
+;; RUN: elf-dump | FileCheck -check-prefix=OBJ %s
+
+; Also take it on a round-trip through llvm-mc to stretch assembly-parsing's legs:
+;; RUN: llc -march=aarch64 %s -o - | \
+;; RUN: llvm-mc -arch=aarch64 -filetype=obj -o - | \
+;; RUN: elf-dump | FileCheck -check-prefix=OBJ %s
+
+@var8 = global i8 0
+@var16 = global i16 0
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @loadstore() {
+ %val8 = load i8* @var8
+ store volatile i8 %val8, i8* @var8
+
+ %val16 = load i16* @var16
+ store volatile i16 %val16, i16* @var16
+
+ %val32 = load i32* @var32
+ store volatile i32 %val32, i32* @var32
+
+ %val64 = load i64* @var64
+ store volatile i64 %val64, i64* @var64
+
+ ret void
+}
+
+@globaddr = global i64* null
+
+define void @address() {
+ store i64* @var64, i64** @globaddr
+ ret void
+}
+
+; Check we're using EM_AARCH64
+; OBJ: 'e_machine', 0x00
+
+; OBJ: .rela.text
+
+; var8
+; R_AARCH64_ADR_PREL_PG_HI21 against var8
+; OBJ: 'r_sym', 0x0000000f
+; OBJ-NEXT: 'r_type', 0x00000113
+
+; R_AARCH64_LDST8_ABS_LO12_NC against var8
+; OBJ: 'r_sym', 0x0000000f
+; OBJ-NEXT: 'r_type', 0x00000116
+
+
+; var16
+; R_AARCH64_ADR_PREL_PG_HI21 against var16
+; OBJ: 'r_sym', 0x0000000c
+; OBJ-NEXT: 'r_type', 0x00000113
+
+; R_AARCH64_LDST16_ABS_LO12_NC against var16
+; OBJ: 'r_sym', 0x0000000c
+; OBJ-NEXT: 'r_type', 0x0000011c
+
+
+; var32
+; R_AARCH64_ADR_PREL_PG_HI21 against var32
+; OBJ: 'r_sym', 0x0000000d
+; OBJ-NEXT: 'r_type', 0x00000113
+
+; R_AARCH64_LDST32_ABS_LO12_NC against var32
+; OBJ: 'r_sym', 0x0000000d
+; OBJ-NEXT: 'r_type', 0x0000011d
+
+
+; var64
+; R_AARCH64_ADR_PREL_PG_HI21 against var64
+; OBJ: 'r_sym', 0x0000000e
+; OBJ-NEXT: 'r_type', 0x00000113
+
+; R_AARCH64_LDST64_ABS_LO12_NC against var64
+; OBJ: 'r_sym', 0x0000000e
+; OBJ-NEXT: 'r_type', 0x0000011e
+
+; This is on the store, so not really important, but it stops the next
+; match working.
+; R_AARCH64_LDST64_ABS_LO12_NC against var64
+; OBJ: 'r_sym', 0x0000000e
+; OBJ-NEXT: 'r_type', 0x0000011e
+
+
+; Pure address-calculation against var64
+; R_AARCH64_ADR_PREL_PG_HI21 against var64
+; OBJ: 'r_sym', 0x0000000e
+; OBJ-NEXT: 'r_type', 0x00000113
+
+; R_AARCH64_ADD_ABS_LO12_NC against var64
+; OBJ: 'r_sym', 0x0000000e
+; OBJ-NEXT: 'r_type', 0x00000115
+
+
+; Make sure the symbols don't move around, otherwise relocation info
+; will be wrong:
+
+; OBJ: Symbol 12
+; OBJ-NEXT: var16
+
+; OBJ: Symbol 13
+; OBJ-NEXT: var32
+
+; OBJ: Symbol 14
+; OBJ-NEXT: var64
+
+; OBJ: Symbol 15
+; OBJ-NEXT: var8
diff --git a/test/MC/AArch64/elf-objdump.s b/test/MC/AArch64/elf-objdump.s
new file mode 100644
index 0000000000..c5aa5b1989
--- /dev/null
+++ b/test/MC/AArch64/elf-objdump.s
@@ -0,0 +1,5 @@
+// 64 bit little endian
+// RUN: llvm-mc -filetype=obj -arch=aarch64 -triple aarch64-none-linux-gnu %s -o - | llvm-objdump -d
+
+// We just want to see if llvm-objdump works at all.
+// CHECK: .text
diff --git a/test/MC/AArch64/elf-reloc-addsubimm.s b/test/MC/AArch64/elf-reloc-addsubimm.s
new file mode 100644
index 0000000000..7fa6e90b5d
--- /dev/null
+++ b/test/MC/AArch64/elf-reloc-addsubimm.s
@@ -0,0 +1,13 @@
+// RUN: llvm-mc -arch=aarch64 -filetype=obj %s -o - | \
+// RUN: elf-dump | FileCheck -check-prefix=OBJ %s
+
+ add x2, x3, #:lo12:some_label
+// OBJ: .rela.text
+
+// OBJ: 'r_offset', 0x0000000000000000
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000115
+
+// OBJ: .symtab
+// OBJ: Symbol 5
+// OBJ-NEXT: some_label \ No newline at end of file
diff --git a/test/MC/AArch64/elf-reloc-condbr.s b/test/MC/AArch64/elf-reloc-condbr.s
new file mode 100644
index 0000000000..283d3b95d0
--- /dev/null
+++ b/test/MC/AArch64/elf-reloc-condbr.s
@@ -0,0 +1,13 @@
+// RUN: llvm-mc -arch=aarch64 -filetype=obj %s -o - | \
+// RUN: elf-dump | FileCheck -check-prefix=OBJ %s
+
+ b.eq somewhere
+// OBJ: .rela.text
+
+// OBJ: 'r_offset', 0x0000000000000000
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000118
+
+// OBJ: .symtab
+// OBJ: Symbol 5
+// OBJ-NEXT: somewhere \ No newline at end of file
diff --git a/test/MC/AArch64/elf-reloc-ldrlit.s b/test/MC/AArch64/elf-reloc-ldrlit.s
new file mode 100644
index 0000000000..ce9ff49db4
--- /dev/null
+++ b/test/MC/AArch64/elf-reloc-ldrlit.s
@@ -0,0 +1,28 @@
+// RUN: llvm-mc -arch=aarch64 -filetype=obj %s -o - | \
+// RUN: elf-dump | FileCheck -check-prefix=OBJ %s
+
+ ldr x0, some_label
+ ldr w3, some_label
+ ldrsw x9, some_label
+ prfm pldl3keep, some_label
+// OBJ: .rela.text
+
+// OBJ: 'r_offset', 0x0000000000000000
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000111
+
+// OBJ: 'r_offset', 0x0000000000000004
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000111
+
+// OBJ: 'r_offset', 0x0000000000000008
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000111
+
+// OBJ: 'r_offset', 0x000000000000000c
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000111
+
+// OBJ: .symtab
+// OBJ: Symbol 5
+// OBJ-NEXT: some_label \ No newline at end of file
diff --git a/test/MC/AArch64/elf-reloc-ldstunsimm.s b/test/MC/AArch64/elf-reloc-ldstunsimm.s
new file mode 100644
index 0000000000..345fc8247d
--- /dev/null
+++ b/test/MC/AArch64/elf-reloc-ldstunsimm.s
@@ -0,0 +1,34 @@
+// RUN: llvm-mc -arch=aarch64 -filetype=obj %s -o - | \
+// RUN: elf-dump | FileCheck -check-prefix=OBJ %s
+
+ ldrb w0, [sp, #:lo12:some_label]
+ ldrh w0, [sp, #:lo12:some_label]
+ ldr w0, [sp, #:lo12:some_label]
+ ldr x0, [sp, #:lo12:some_label]
+ str q0, [sp, #:lo12:some_label]
+
+// OBJ: .rela.text
+
+// OBJ: 'r_offset', 0x0000000000000000
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000116
+
+// OBJ: 'r_offset', 0x0000000000000004
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000011c
+
+// OBJ: 'r_offset', 0x0000000000000008
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000011d
+
+// OBJ: 'r_offset', 0x000000000000000c
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000011e
+
+// OBJ: 'r_offset', 0x0000000000000010
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000012b
+
+// OBJ: .symtab
+// OBJ: Symbol 5
+// OBJ-NEXT: some_label
diff --git a/test/MC/AArch64/elf-reloc-movw.s b/test/MC/AArch64/elf-reloc-movw.s
new file mode 100644
index 0000000000..cb7dc6768e
--- /dev/null
+++ b/test/MC/AArch64/elf-reloc-movw.s
@@ -0,0 +1,98 @@
+// RUN: llvm-mc -arch=aarch64 -filetype=obj %s -o - | \
+// RUN: elf-dump | FileCheck -check-prefix=OBJ %s
+
+ movz x0, #:abs_g0:some_label
+ movk x0, #:abs_g0_nc:some_label
+
+ movz x3, #:abs_g1:some_label
+ movk x5, #:abs_g1_nc:some_label
+
+ movz x3, #:abs_g2:some_label
+ movk x5, #:abs_g2_nc:some_label
+
+ movz x7, #:abs_g3:some_label
+ movk x11, #:abs_g3:some_label
+
+ movz x13, #:abs_g0_s:some_label
+ movn x17, #:abs_g0_s:some_label
+
+ movz x19, #:abs_g1_s:some_label
+ movn x19, #:abs_g1_s:some_label
+
+ movz x19, #:abs_g2_s:some_label
+ movn x19, #:abs_g2_s:some_label
+// OBJ: .rela.text
+
+// :abs_g0: => R_AARCH64_MOVW_UABS_G0
+// OBJ: 'r_offset', 0x0000000000000000
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000107
+
+// :abs_g0_nc: => R_AARCH64_MOVW_UABS_G0_NC
+// OBJ: 'r_offset', 0x0000000000000004
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000108
+
+// :abs_g1: => R_AARCH64_MOVW_UABS_G1
+// OBJ: 'r_offset', 0x0000000000000008
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000109
+
+// :abs_g1_nc: => R_AARCH64_MOVW_UABS_G1_NC
+// OBJ: 'r_offset', 0x000000000000000c
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000010a
+
+// :abs_g2: => R_AARCH64_MOVW_UABS_G2
+// OBJ: 'r_offset', 0x0000000000000010
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000010b
+
+// :abs_g2_nc: => R_AARCH64_MOVW_UABS_G2_NC
+// OBJ: 'r_offset', 0x0000000000000014
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000010c
+
+// :abs_g3: => R_AARCH64_MOVW_UABS_G3
+// OBJ: 'r_offset', 0x0000000000000018
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000010d
+
+// :abs_g3: => R_AARCH64_MOVW_UABS_G3
+// OBJ: 'r_offset', 0x000000000000001c
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000010d
+
+// :abs_g0_s: => R_AARCH64_MOVW_SABS_G0
+// OBJ: 'r_offset', 0x0000000000000020
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000010e
+
+// :abs_g0_s: => R_AARCH64_MOVW_SABS_G0
+// OBJ: 'r_offset', 0x0000000000000024
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000010e
+
+// :abs_g1_s: => R_AARCH64_MOVW_SABS_G1
+// OBJ: 'r_offset', 0x0000000000000028
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000010f
+
+// :abs_g1_s: => R_AARCH64_MOVW_SABS_G1
+// OBJ: 'r_offset', 0x000000000000002c
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000010f
+
+// :abs_g2_s: => R_AARCH64_MOVW_SABS_G2
+// OBJ: 'r_offset', 0x0000000000000030
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000110
+
+// :abs_g2_s: => R_AARCH64_MOVW_SABS_G2
+// OBJ: 'r_offset', 0x0000000000000034
+// OBJ: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000110
+
+// OBJ: .symtab
+// OBJ: Symbol 5
+// OBJ-NEXT: some_label
diff --git a/test/MC/AArch64/elf-reloc-pcreladdressing.s b/test/MC/AArch64/elf-reloc-pcreladdressing.s
new file mode 100644
index 0000000000..39a8ba9402
--- /dev/null
+++ b/test/MC/AArch64/elf-reloc-pcreladdressing.s
@@ -0,0 +1,29 @@
+// RUN: llvm-mc -arch=aarch64 -filetype=obj %s -o - | \
+// RUN: elf-dump | FileCheck -check-prefix=OBJ %s
+
+ adr x2, some_label
+ adrp x5, some_label
+
+ adrp x5, :got:some_label
+ ldr x0, [x5, #:got_lo12:some_label]
+// OBJ: .rela.text
+
+// OBJ: 'r_offset', 0x0000000000000000
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000112
+
+// OBJ: 'r_offset', 0x0000000000000004
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000113
+
+// OBJ: 'r_offset', 0x0000000000000008
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000137
+
+// OBJ: 'r_offset', 0x000000000000000c
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000138
+
+// OBJ: .symtab
+// OBJ: Symbol 5
+// OBJ-NEXT: some_label \ No newline at end of file
diff --git a/test/MC/AArch64/elf-reloc-tstb.s b/test/MC/AArch64/elf-reloc-tstb.s
new file mode 100644
index 0000000000..c5e2981a22
--- /dev/null
+++ b/test/MC/AArch64/elf-reloc-tstb.s
@@ -0,0 +1,18 @@
+// RUN: llvm-mc -arch=aarch64 -filetype=obj %s -o - | \
+// RUN: elf-dump | FileCheck -check-prefix=OBJ %s
+
+ tbz x6, #45, somewhere
+ tbnz w3, #15, somewhere
+// OBJ: .rela.text
+
+// OBJ: 'r_offset', 0x0000000000000000
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000117
+
+// OBJ: 'r_offset', 0x0000000000000004
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x00000117
+
+// OBJ: .symtab
+// OBJ: Symbol 5
+// OBJ-NEXT: somewhere
diff --git a/test/MC/AArch64/elf-reloc-uncondbrimm.s b/test/MC/AArch64/elf-reloc-uncondbrimm.s
new file mode 100644
index 0000000000..0e97bc6669
--- /dev/null
+++ b/test/MC/AArch64/elf-reloc-uncondbrimm.s
@@ -0,0 +1,18 @@
+// RUN: llvm-mc -arch=aarch64 -filetype=obj %s -o - | \
+// RUN: elf-dump | FileCheck -check-prefix=OBJ %s
+
+ b somewhere
+ bl somewhere
+// OBJ: .rela.text
+
+// OBJ: 'r_offset', 0x0000000000000000
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000011a
+
+// OBJ: 'r_offset', 0x0000000000000004
+// OBJ-NEXT: 'r_sym', 0x00000005
+// OBJ-NEXT: 'r_type', 0x0000011b
+
+// OBJ: .symtab
+// OBJ: Symbol 5
+// OBJ-NEXT: somewhere \ No newline at end of file
diff --git a/test/MC/AArch64/lit.local.cfg b/test/MC/AArch64/lit.local.cfg
new file mode 100644
index 0000000000..cc02173c8e
--- /dev/null
+++ b/test/MC/AArch64/lit.local.cfg
@@ -0,0 +1,5 @@
+config.suffixes = ['.ll', '.c', '.cpp', '.s']
+
+targets = set(config.root.targets_to_build.split())
+if not 'AArch64' in targets:
+ config.unsupported = True \ No newline at end of file
diff --git a/test/MC/AArch64/mapping-across-sections.s b/test/MC/AArch64/mapping-across-sections.s
new file mode 100644
index 0000000000..3d32c1dfb4
--- /dev/null
+++ b/test/MC/AArch64/mapping-across-sections.s
@@ -0,0 +1,28 @@
+// RUN: llvm-mc -triple=aarch64-none-linux-gnu -filetype=obj < %s | llvm-objdump -t - | FileCheck %s
+
+ .text
+ add w0, w0, w0
+
+// .wibble should *not* inherit .text's mapping symbol. It's a completely different section.
+ .section .wibble
+ add w0, w0, w0
+
+// A setion should be able to start with a $d
+ .section .starts_data
+ .word 42
+
+// Changing back to .text should not emit a redundant $x
+ .text
+ add w0, w0, w0
+
+// With all those constraints, we want:
+// + .text to have $x at 0 and no others
+// + .wibble to have $x at 0
+// + .starts_data to have $d at 0
+
+
+// CHECK: 00000000 .starts_data 00000000 $d
+// CHECK-NEXT: 00000000 .text 00000000 $x
+// CHECK-NEXT: 00000000 .wibble 00000000 $x
+// CHECK-NOT: ${{[adtx]}}
+
diff --git a/test/MC/AArch64/mapping-within-section.s b/test/MC/AArch64/mapping-within-section.s
new file mode 100644
index 0000000000..c8bd804fa0
--- /dev/null
+++ b/test/MC/AArch64/mapping-within-section.s
@@ -0,0 +1,23 @@
+// RUN: llvm-mc -triple=aarch64-none-linux-gnu -filetype=obj < %s | llvm-objdump -t - | FileCheck %s
+
+ .text
+// $x at 0x0000
+ add w0, w0, w0
+// $d at 0x0004
+ .ascii "012"
+ .byte 1
+ .hword 2
+ .word 4
+ .xword 8
+ .single 4.0
+ .double 8.0
+ .space 10
+ .zero 3
+ .fill 10, 2, 42
+ .org 100, 12
+// $x at 0x0018
+ add x0, x0, x0
+
+// CHECK: 00000004 .text 00000000 $d
+// CHECK-NEXT: 00000000 .text 00000000 $x
+// CHECK-NEXT: 00000064 .text 00000000 $x
diff --git a/test/MC/AArch64/tls-relocs.s b/test/MC/AArch64/tls-relocs.s
new file mode 100644
index 0000000000..690fa8c009
--- /dev/null
+++ b/test/MC/AArch64/tls-relocs.s
@@ -0,0 +1,662 @@
+// RUN: llvm-mc -arch=aarch64 -show-encoding < %s | FileCheck %s
+// RUN: llvm-mc -arch=aarch64 -filetype=obj < %s -o %t
+// RUN: elf-dump %t | FileCheck --check-prefix=CHECK-ELF %s
+// RUN: llvm-objdump -r %t | FileCheck --check-prefix=CHECK-ELF-NAMES %s
+
+// CHECK-ELF: .rela.text
+
+ // TLS local-dynamic forms
+ movz x1, #:dtprel_g2:var
+ movn x2, #:dtprel_g2:var
+ movz x3, #:dtprel_g2:var
+ movn x4, #:dtprel_g2:var
+// CHECK: movz x1, #:dtprel_g2:var // encoding: [0x01'A',A,0xc0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_a64_movw_dtprel_g2
+// CHECK-NEXT: movn x2, #:dtprel_g2:var // encoding: [0x02'A',A,0xc0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_a64_movw_dtprel_g2
+// CHECK-NEXT: movz x3, #:dtprel_g2:var // encoding: [0x03'A',A,0xc0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_a64_movw_dtprel_g2
+// CHECK-NEXT: movn x4, #:dtprel_g2:var // encoding: [0x04'A',A,0xc0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_a64_movw_dtprel_g2
+
+// CHECK-ELF: # Relocation 0
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000000)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM:0x[0-9a-f]+]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020b)
+// CHECK-ELF: # Relocation 1
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000004)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020b)
+// CHECK-ELF: # Relocation 2
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000008)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020b)
+// CHECK-ELF: # Relocation 3
+// CHECK-ELF-NEXT: (('r_offset', 0x000000000000000c)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020b)
+
+// CHECK-ELF-NAMES: 0 R_AARCH64_TLSLD_MOVW_DTPREL_G2
+// CHECK-ELF-NAMES: 4 R_AARCH64_TLSLD_MOVW_DTPREL_G2
+// CHECK-ELF-NAMES: 8 R_AARCH64_TLSLD_MOVW_DTPREL_G2
+// CHECK-ELF-NAMES: 12 R_AARCH64_TLSLD_MOVW_DTPREL_G2
+
+ movz x5, #:dtprel_g1:var
+ movn x6, #:dtprel_g1:var
+ movz w7, #:dtprel_g1:var
+ movn w8, #:dtprel_g1:var
+// CHECK: movz x5, #:dtprel_g1:var // encoding: [0x05'A',A,0xa0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_a64_movw_dtprel_g1
+// CHECK-NEXT: movn x6, #:dtprel_g1:var // encoding: [0x06'A',A,0xa0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_a64_movw_dtprel_g1
+// CHECK-NEXT: movz w7, #:dtprel_g1:var // encoding: [0x07'A',A,0xa0'A',0x12'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_a64_movw_dtprel_g1
+// CHECK-NEXT: movn w8, #:dtprel_g1:var // encoding: [0x08'A',A,0xa0'A',0x12'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_a64_movw_dtprel_g1
+
+// CHECK-ELF: # Relocation 4
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000010)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020c)
+// CHECK-ELF: # Relocation 5
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000014)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020c)
+// CHECK-ELF: # Relocation 6
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000018)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020c)
+// CHECK-ELF: # Relocation 7
+// CHECK-ELF-NEXT: (('r_offset', 0x000000000000001c)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020c)
+
+// CHECK-ELF-NAMES: 16 R_AARCH64_TLSLD_MOVW_DTPREL_G1
+// CHECK-ELF-NAMES: 20 R_AARCH64_TLSLD_MOVW_DTPREL_G1
+// CHECK-ELF-NAMES: 24 R_AARCH64_TLSLD_MOVW_DTPREL_G1
+// CHECK-ELF-NAMES: 28 R_AARCH64_TLSLD_MOVW_DTPREL_G1
+
+ movk x9, #:dtprel_g1_nc:var
+ movk w10, #:dtprel_g1_nc:var
+// CHECK: movk x9, #:dtprel_g1_nc:var // encoding: [0x09'A',A,0xa0'A',0xf2'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1_nc:var, kind: fixup_a64_movw_dtprel_g1_nc
+// CHECK-NEXT: movk w10, #:dtprel_g1_nc:var // encoding: [0x0a'A',A,0xa0'A',0x72'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1_nc:var, kind: fixup_a64_movw_dtprel_g1_nc
+
+// CHECK-ELF: # Relocation 8
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000020)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020d)
+// CHECK-ELF: # Relocation 9
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000024)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020d)
+
+// CHECK-ELF-NAMES: 32 R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC
+// CHECK-ELF-NAMES: 36 R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC
+
+ movz x11, #:dtprel_g0:var
+ movn x12, #:dtprel_g0:var
+ movz w13, #:dtprel_g0:var
+ movn w14, #:dtprel_g0:var
+// CHECK: movz x11, #:dtprel_g0:var // encoding: [0x0b'A',A,0x80'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_a64_movw_dtprel_g0
+// CHECK-NEXT: movn x12, #:dtprel_g0:var // encoding: [0x0c'A',A,0x80'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_a64_movw_dtprel_g0
+// CHECK-NEXT: movz w13, #:dtprel_g0:var // encoding: [0x0d'A',A,0x80'A',0x12'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_a64_movw_dtprel_g0
+// CHECK-NEXT: movn w14, #:dtprel_g0:var // encoding: [0x0e'A',A,0x80'A',0x12'A']
+
+
+// CHECK-ELF: # Relocation 10
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000028)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020e)
+// CHECK-ELF: # Relocation 11
+// CHECK-ELF-NEXT: (('r_offset', 0x000000000000002c)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020e)
+// CHECK-ELF: # Relocation 12
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000030)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020e)
+// CHECK-ELF: # Relocation 13
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000034)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020e)
+
+// CHECK-ELF-NAMES: 40 R_AARCH64_TLSLD_MOVW_DTPREL_G0
+// CHECK-ELF-NAMES: 44 R_AARCH64_TLSLD_MOVW_DTPREL_G0
+// CHECK-ELF-NAMES: 48 R_AARCH64_TLSLD_MOVW_DTPREL_G0
+// CHECK-ELF-NAMES: 52 R_AARCH64_TLSLD_MOVW_DTPREL_G0
+
+
+ movk x15, #:dtprel_g0_nc:var
+ movk w16, #:dtprel_g0_nc:var
+// CHECK: movk x15, #:dtprel_g0_nc:var // encoding: [0x0f'A',A,0x80'A',0xf2'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0_nc:var, kind: fixup_a64_movw_dtprel_g0_nc
+// CHECK-NEXT: movk w16, #:dtprel_g0_nc:var // encoding: [0x10'A',A,0x80'A',0x72'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0_nc:var, kind: fixup_a64_movw_dtprel_g0_nc
+
+// CHECK-ELF: # Relocation 14
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000038)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020f)
+// CHECK-ELF: # Relocation 15
+// CHECK-ELF-NEXT: (('r_offset', 0x000000000000003c)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000020f)
+
+// CHECK-ELF-NAMES: 56 R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC
+// CHECK-ELF-NAMES: 60 R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC
+
+ add x17, x18, #:dtprel_hi12:var, lsl #12
+ add w19, w20, #:dtprel_hi12:var, lsl #12
+// CHECK: add x17, x18, #:dtprel_hi12:var, lsl #12 // encoding: [0x51'A',0x02'A',0x40'A',0x91'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_hi12:var, kind: fixup_a64_add_dtprel_hi12
+// CHECK-NEXT: add w19, w20, #:dtprel_hi12:var, lsl #12 // encoding: [0x93'A',0x02'A',0x40'A',0x11'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_hi12:var, kind: fixup_a64_add_dtprel_hi12
+
+// CHECK-ELF: # Relocation 16
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000040)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000210)
+// CHECK-ELF: # Relocation 17
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000044)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000210)
+
+// CHECK-ELF-NAMES: 64 R_AARCH64_TLSLD_ADD_DTPREL_HI12
+// CHECK-ELF-NAMES: 68 R_AARCH64_TLSLD_ADD_DTPREL_HI12
+
+
+ add x21, x22, #:dtprel_lo12:var
+ add w23, w24, #:dtprel_lo12:var
+// CHECK: add x21, x22, #:dtprel_lo12:var // encoding: [0xd5'A',0x02'A',A,0x91'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_a64_add_dtprel_lo12
+// CHECK-NEXT: add w23, w24, #:dtprel_lo12:var // encoding: [0x17'A',0x03'A',A,0x11'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_a64_add_dtprel_lo12
+
+// CHECK-ELF: # Relocation 18
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000048)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000211)
+// CHECK-ELF: # Relocation 19
+// CHECK-ELF-NEXT: (('r_offset', 0x000000000000004c)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000211)
+
+// CHECK-ELF-NAMES: 72 R_AARCH64_TLSLD_ADD_DTPREL_LO12
+// CHECK-ELF-NAMES: 76 R_AARCH64_TLSLD_ADD_DTPREL_LO12
+
+ add x25, x26, #:dtprel_lo12_nc:var
+ add w27, w28, #:dtprel_lo12_nc:var
+// CHECK: add x25, x26, #:dtprel_lo12_nc:var // encoding: [0x59'A',0x03'A',A,0x91'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_a64_add_dtprel_lo12_nc
+// CHECK-NEXT: add w27, w28, #:dtprel_lo12_nc:var // encoding: [0x9b'A',0x03'A',A,0x11'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_a64_add_dtprel_lo12_nc
+
+// CHECK-ELF: # Relocation 20
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000050)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000212)
+// CHECK-ELF: # Relocation 21
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000054)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000212)
+
+// CHECK-ELF-NAMES: 80 R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC
+// CHECK-ELF-NAMES: 84 R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC
+
+ ldrb w29, [x30, #:dtprel_lo12:var]
+ ldrsb x29, [x28, #:dtprel_lo12_nc:var]
+// CHECK: ldrb w29, [x30, #:dtprel_lo12:var] // encoding: [0xdd'A',0x03'A',0x40'A',0x39'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_a64_ldst8_dtprel_lo12
+// CHECK-NEXT: ldrsb x29, [x28, #:dtprel_lo12_nc:var] // encoding: [0x9d'A',0x03'A',0x80'A',0x39'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_a64_ldst8_dtprel_lo12_nc
+
+// CHECK-ELF: # Relocation 22
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000058)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000213)
+// CHECK-ELF: # Relocation 23
+// CHECK-ELF-NEXT: (('r_offset', 0x000000000000005c)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000214)
+
+// CHECK-ELF-NAMES: 88 R_AARCH64_TLSLD_LDST8_DTPREL_LO12
+// CHECK-ELF-NAMES: 92 R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC
+
+ strh w27, [x26, #:dtprel_lo12:var]
+ ldrsh x25, [x24, #:dtprel_lo12_nc:var]
+// CHECK: strh w27, [x26, #:dtprel_lo12:var] // encoding: [0x5b'A',0x03'A',A,0x79'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_a64_ldst16_dtprel_lo12
+// CHECK-NEXT: ldrsh x25, [x24, #:dtprel_lo12_nc:var] // encoding: [0x19'A',0x03'A',0x80'A',0x79'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_a64_ldst16_dtprel_lo12_n
+
+// CHECK-ELF: # Relocation 24
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000060)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000215)
+// CHECK-ELF: # Relocation 25
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000064)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000216)
+
+// CHECK-ELF-NAMES: 96 R_AARCH64_TLSLD_LDST16_DTPREL_LO12
+// CHECK-ELF-NAMES: 100 R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC
+
+ ldr w23, [x22, #:dtprel_lo12:var]
+ ldrsw x21, [x20, #:dtprel_lo12_nc:var]
+// CHECK: ldr w23, [x22, #:dtprel_lo12:var] // encoding: [0xd7'A',0x02'A',0x40'A',0xb9'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_a64_ldst32_dtprel_lo12
+// CHECK-NEXT: ldrsw x21, [x20, #:dtprel_lo12_nc:var] // encoding: [0x95'A',0x02'A',0x80'A',0xb9'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_a64_ldst32_dtprel_lo12_n
+
+// CHECK-ELF: # Relocation 26
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000068)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000217)
+// CHECK-ELF: # Relocation 27
+// CHECK-ELF-NEXT: (('r_offset', 0x000000000000006c)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000218)
+
+// CHECK-ELF-NAMES: 104 R_AARCH64_TLSLD_LDST32_DTPREL_LO12
+// CHECK-ELF-NAMES: 108 R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC
+
+ ldr x19, [x18, #:dtprel_lo12:var]
+ str x17, [x16, #:dtprel_lo12_nc:var]
+// CHECK: ldr x19, [x18, #:dtprel_lo12:var] // encoding: [0x53'A',0x02'A',0x40'A',0xf9'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_a64_ldst64_dtprel_lo12
+// CHECK-NEXT: str x17, [x16, #:dtprel_lo12_nc:var] // encoding: [0x11'A',0x02'A',A,0xf9'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_a64_ldst64_dtprel_lo12_nc
+
+
+// CHECK-ELF: # Relocation 28
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000070)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000219)
+// CHECK-ELF: # Relocation 29
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000074)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000021a)
+
+// CHECK-ELF-NAMES: 112 R_AARCH64_TLSLD_LDST64_DTPREL_LO12
+// CHECK-ELF-NAMES: 116 R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC
+
+ // TLS initial-exec forms
+ movz x15, #:gottprel_g1:var
+ movz w14, #:gottprel_g1:var
+// CHECK: movz x15, #:gottprel_g1:var // encoding: [0x0f'A',A,0xa0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_g1:var, kind: fixup_a64_movw_gottprel_g1
+// CHECK-NEXT: movz w14, #:gottprel_g1:var // encoding: [0x0e'A',A,0xa0'A',0x12'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_g1:var, kind: fixup_a64_movw_gottprel_g1
+
+// CHECK-ELF: # Relocation 30
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000078)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000021b)
+// CHECK-ELF: # Relocation 31
+// CHECK-ELF-NEXT: (('r_offset', 0x000000000000007c)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000021b)
+
+// CHECK-ELF-NAMES: 120 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1
+// CHECK-ELF-NAMES: 124 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1
+
+ movk x13, #:gottprel_g0_nc:var
+ movk w12, #:gottprel_g0_nc:var
+// CHECK: movk x13, #:gottprel_g0_nc:var // encoding: [0x0d'A',A,0x80'A',0xf2'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_g0_nc:var, kind: fixup_a64_movw_gottprel_g0_nc
+// CHECK-NEXT: movk w12, #:gottprel_g0_nc:var // encoding: [0x0c'A',A,0x80'A',0x72'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_g0_nc:var, kind: fixup_a64_movw_gottprel_g0_nc
+
+// CHECK-ELF: # Relocation 32
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000080)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000021c)
+// CHECK-ELF: # Relocation 33
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000084)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000021c)
+
+// CHECK-ELF-NAMES: 128 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC
+// CHECK-ELF-NAMES: 132 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC
+
+ adrp x11, :gottprel:var
+ ldr x10, [x0, #:gottprel_lo12:var]
+ ldr x9, :gottprel:var
+// CHECK: adrp x11, :gottprel:var // encoding: [0x0b'A',A,A,0x90'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel:var, kind: fixup_a64_adr_gottprel_page
+// CHECK-NEXT: ldr x10, [x0, #:gottprel_lo12:var] // encoding: [0x0a'A',A,0x40'A',0xf9'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_lo12:var, kind: fixup_a64_ld64_gottprel_lo12_nc
+// CHECK-NEXT: ldr x9, :gottprel:var // encoding: [0x09'A',A,A,0x58'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel:var, kind: fixup_a64_ld_gottprel_prel19
+
+// CHECK-ELF: # Relocation 34
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000088)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000021d)
+// CHECK-ELF: # Relocation 35
+// CHECK-ELF-NEXT: (('r_offset', 0x000000000000008c)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000021e)
+// CHECK-ELF: # Relocation 36
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000090)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000021f)
+
+// CHECK-ELF-NAMES: 136 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE
+// CHECK-ELF-NAMES: 140 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
+// CHECK-ELF-NAMES: 144 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19
+
+ // TLS local-exec forms
+ movz x3, #:tprel_g2:var
+ movn x4, #:tprel_g2:var
+// CHECK: movz x3, #:tprel_g2:var // encoding: [0x03'A',A,0xc0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g2:var, kind: fixup_a64_movw_tprel_g2
+// CHECK-NEXT: movn x4, #:tprel_g2:var // encoding: [0x04'A',A,0xc0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g2:var, kind: fixup_a64_movw_tprel_g2
+
+// CHECK-ELF: # Relocation 37
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000094)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000220)
+// CHECK-ELF: # Relocation 38
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000098)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000220)
+
+// CHECK-ELF-NAMES: 148 R_AARCH64_TLSLE_MOVW_TPREL_G2
+// CHECK-ELF-NAMES: 152 R_AARCH64_TLSLE_MOVW_TPREL_G2
+
+ movz x5, #:tprel_g1:var
+ movn x6, #:tprel_g1:var
+ movz w7, #:tprel_g1:var
+ movn w8, #:tprel_g1:var
+// CHECK: movz x5, #:tprel_g1:var // encoding: [0x05'A',A,0xa0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_a64_movw_tprel_g1
+// CHECK-NEXT: movn x6, #:tprel_g1:var // encoding: [0x06'A',A,0xa0'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_a64_movw_tprel_g1
+// CHECK-NEXT: movz w7, #:tprel_g1:var // encoding: [0x07'A',A,0xa0'A',0x12'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_a64_movw_tprel_g1
+// CHECK-NEXT: movn w8, #:tprel_g1:var // encoding: [0x08'A',A,0xa0'A',0x12'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_a64_movw_tprel_g1
+
+// CHECK-ELF: # Relocation 39
+// CHECK-ELF-NEXT: (('r_offset', 0x000000000000009c)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000221)
+// CHECK-ELF: # Relocation 40
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000a0)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000221)
+// CHECK-ELF: # Relocation 41
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000a4)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000221)
+// CHECK-ELF: # Relocation 42
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000a8)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000221)
+
+// CHECK-ELF-NAMES: 156 R_AARCH64_TLSLE_MOVW_TPREL_G1
+// CHECK-ELF-NAMES: 160 R_AARCH64_TLSLE_MOVW_TPREL_G1
+// CHECK-ELF-NAMES: 164 R_AARCH64_TLSLE_MOVW_TPREL_G1
+// CHECK-ELF-NAMES: 168 R_AARCH64_TLSLE_MOVW_TPREL_G1
+
+ movk x9, #:tprel_g1_nc:var
+ movk w10, #:tprel_g1_nc:var
+// CHECK: movk x9, #:tprel_g1_nc:var // encoding: [0x09'A',A,0xa0'A',0xf2'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1_nc:var, kind: fixup_a64_movw_tprel_g1_nc
+// CHECK-NEXT: movk w10, #:tprel_g1_nc:var // encoding: [0x0a'A',A,0xa0'A',0x72'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1_nc:var, kind: fixup_a64_movw_tprel_g1_nc
+
+// CHECK-ELF: # Relocation 43
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000ac)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000222)
+// CHECK-ELF: # Relocation 44
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000b0)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000222)
+
+// CHECK-ELF-NAMES: 172 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
+// CHECK-ELF-NAMES: 176 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
+
+ movz x11, #:tprel_g0:var
+ movn x12, #:tprel_g0:var
+ movz w13, #:tprel_g0:var
+ movn w14, #:tprel_g0:var
+// CHECK: movz x11, #:tprel_g0:var // encoding: [0x0b'A',A,0x80'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_a64_movw_tprel_g0
+// CHECK-NEXT: movn x12, #:tprel_g0:var // encoding: [0x0c'A',A,0x80'A',0x92'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_a64_movw_tprel_g0
+// CHECK-NEXT: movz w13, #:tprel_g0:var // encoding: [0x0d'A',A,0x80'A',0x12'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_a64_movw_tprel_g0
+// CHECK-NEXT: movn w14, #:tprel_g0:var // encoding: [0x0e'A',A,0x80'A',0x12'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_a64_movw_tprel_g0
+
+// CHECK-ELF: # Relocation 45
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000b4)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000223)
+// CHECK-ELF: # Relocation 46
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000b8)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000223)
+// CHECK-ELF: # Relocation 47
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000bc)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000223)
+// CHECK-ELF: # Relocation 48
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000c0)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000223)
+
+// CHECK-ELF-NAMES: 180 R_AARCH64_TLSLE_MOVW_TPREL_G0
+// CHECK-ELF-NAMES: 184 R_AARCH64_TLSLE_MOVW_TPREL_G0
+// CHECK-ELF-NAMES: 188 R_AARCH64_TLSLE_MOVW_TPREL_G0
+// CHECK-ELF-NAMES: 192 R_AARCH64_TLSLE_MOVW_TPREL_G0
+
+ movk x15, #:tprel_g0_nc:var
+ movk w16, #:tprel_g0_nc:var
+// CHECK: movk x15, #:tprel_g0_nc:var // encoding: [0x0f'A',A,0x80'A',0xf2'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0_nc:var, kind: fixup_a64_movw_tprel_g0_nc
+// CHECK-NEXT: movk w16, #:tprel_g0_nc:var // encoding: [0x10'A',A,0x80'A',0x72'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0_nc:var, kind: fixup_a64_movw_tprel_g0_nc
+
+// CHECK-ELF: # Relocation 49
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000c4)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000224)
+// CHECK-ELF: # Relocation 50
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000c8)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000224)
+
+// CHECK-ELF-NAMES: 196 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
+// CHECK-ELF-NAMES: 200 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
+
+ add x17, x18, #:tprel_hi12:var, lsl #12
+ add w19, w20, #:tprel_hi12:var, lsl #12
+// CHECK: add x17, x18, #:tprel_hi12:var, lsl #12 // encoding: [0x51'A',0x02'A',0x40'A',0x91'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_hi12:var, kind: fixup_a64_add_tprel_hi12
+// CHECK-NEXT: add w19, w20, #:tprel_hi12:var, lsl #12 // encoding: [0x93'A',0x02'A',0x40'A',0x11'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_hi12:var, kind: fixup_a64_add_tprel_hi12
+
+// CHECK-ELF: # Relocation 51
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000cc)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000225)
+// CHECK-ELF: # Relocation 52
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000d0)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000225)
+
+// CHECK-ELF-NAMES: 204 R_AARCH64_TLSLE_ADD_TPREL_HI12
+// CHECK-ELF-NAMES: 208 R_AARCH64_TLSLE_ADD_TPREL_HI12
+
+ add x21, x22, #:tprel_lo12:var
+ add w23, w24, #:tprel_lo12:var
+// CHECK: add x21, x22, #:tprel_lo12:var // encoding: [0xd5'A',0x02'A',A,0x91'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_a64_add_tprel_lo12
+// CHECK-NEXT: add w23, w24, #:tprel_lo12:var // encoding: [0x17'A',0x03'A',A,0x11'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_a64_add_tprel_lo12
+
+// CHECK-ELF: # Relocation 53
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000d4)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000226)
+// CHECK-ELF: # Relocation 54
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000d8)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000226)
+
+// CHECK-ELF-NAMES: 212 R_AARCH64_TLSLE_ADD_TPREL_LO12
+// CHECK-ELF-NAMES: 216 R_AARCH64_TLSLE_ADD_TPREL_LO12
+
+ add x25, x26, #:tprel_lo12_nc:var
+ add w27, w28, #:tprel_lo12_nc:var
+// CHECK: add x25, x26, #:tprel_lo12_nc:var // encoding: [0x59'A',0x03'A',A,0x91'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_a64_add_tprel_lo12_nc
+// CHECK-NEXT: add w27, w28, #:tprel_lo12_nc:var // encoding: [0x9b'A',0x03'A',A,0x11'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_a64_add_tprel_lo12_nc
+
+// CHECK-ELF: # Relocation 55
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000dc)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000227)
+// CHECK-ELF: # Relocation 56
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000e0)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000227)
+
+
+// CHECK-ELF-NAMES: 220 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
+// CHECK-ELF-NAMES: 224 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
+
+ ldrb w29, [x30, #:tprel_lo12:var]
+ ldrsb x29, [x28, #:tprel_lo12_nc:var]
+// CHECK: ldrb w29, [x30, #:tprel_lo12:var] // encoding: [0xdd'A',0x03'A',0x40'A',0x39'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_a64_ldst8_tprel_lo12
+// CHECK-NEXT: ldrsb x29, [x28, #:tprel_lo12_nc:var] // encoding: [0x9d'A',0x03'A',0x80'A',0x39'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_a64_ldst8_tprel_lo12_nc
+
+// CHECK-ELF: # Relocation 57
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000e4)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000228)
+// CHECK-ELF: # Relocation 58
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000e8)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000229)
+
+// CHECK-ELF-NAMES: 228 R_AARCH64_TLSLE_LDST8_TPREL_LO12
+// CHECK-ELF-NAMES: 232 R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC
+
+ strh w27, [x26, #:tprel_lo12:var]
+ ldrsh x25, [x24, #:tprel_lo12_nc:var]
+// CHECK: strh w27, [x26, #:tprel_lo12:var] // encoding: [0x5b'A',0x03'A',A,0x79'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_a64_ldst16_tprel_lo12
+// CHECK-NEXT: ldrsh x25, [x24, #:tprel_lo12_nc:var] // encoding: [0x19'A',0x03'A',0x80'A',0x79'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_a64_ldst16_tprel_lo12_n
+
+// CHECK-ELF: # Relocation 59
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000ec)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000022a)
+// CHECK-ELF: # Relocation 60
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000f0)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000022b)
+
+// CHECK-ELF-NAMES: 236 R_AARCH64_TLSLE_LDST16_TPREL_LO12
+// CHECK-ELF-NAMES: 240 R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC
+
+ ldr w23, [x22, #:tprel_lo12:var]
+ ldrsw x21, [x20, #:tprel_lo12_nc:var]
+// CHECK: ldr w23, [x22, #:tprel_lo12:var] // encoding: [0xd7'A',0x02'A',0x40'A',0xb9'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_a64_ldst32_tprel_lo12
+// CHECK-NEXT: ldrsw x21, [x20, #:tprel_lo12_nc:var] // encoding: [0x95'A',0x02'A',0x80'A',0xb9'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_a64_ldst32_tprel_lo12_n
+
+// CHECK-ELF: # Relocation 61
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000f4)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000022c)
+// CHECK-ELF: # Relocation 62
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000f8)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000022d)
+
+// CHECK-ELF-NAMES: 244 R_AARCH64_TLSLE_LDST32_TPREL_LO12
+// CHECK-ELF-NAMES: 248 R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC
+
+ ldr x19, [x18, #:tprel_lo12:var]
+ str x17, [x16, #:tprel_lo12_nc:var]
+// CHECK: ldr x19, [x18, #:tprel_lo12:var] // encoding: [0x53'A',0x02'A',0x40'A',0xf9'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_a64_ldst64_tprel_lo12
+// CHECK-NEXT: str x17, [x16, #:tprel_lo12_nc:var] // encoding: [0x11'A',0x02'A',A,0xf9'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_a64_ldst64_tprel_lo12_nc
+
+// CHECK-ELF: # Relocation 63
+// CHECK-ELF-NEXT: (('r_offset', 0x00000000000000fc)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000022e)
+// CHECK-ELF: # Relocation 64
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000100)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x0000022f)
+
+// CHECK-ELF-NAMES: 252 R_AARCH64_TLSLE_LDST64_TPREL_LO12
+// CHECK-ELF-NAMES: 256 R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC
+
+ // TLS descriptor forms
+ adrp x8, :tlsdesc:var
+ ldr x7, [x6, :tlsdesc_lo12:var]
+ add x5, x4, #:tlsdesc_lo12:var
+ .tlsdesccall var
+ blr x3
+
+// CHECK: adrp x8, :tlsdesc:var // encoding: [0x08'A',A,A,0x90'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tlsdesc:var, kind: fixup_a64_tlsdesc_adr_page
+// CHECK-NEXT: ldr x7, [x6, #:tlsdesc_lo12:var] // encoding: [0xc7'A',A,0x40'A',0xf9'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tlsdesc_lo12:var, kind: fixup_a64_tlsdesc_ld64_lo12_nc
+// CHECK-NEXT: add x5, x4, #:tlsdesc_lo12:var // encoding: [0x85'A',A,A,0x91'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tlsdesc_lo12:var, kind: fixup_a64_tlsdesc_add_lo12_nc
+// CHECK-NEXT: .tlsdesccall var // encoding: []
+// CHECK-NEXT: // fixup A - offset: 0, value: :tlsdesc:var, kind: fixup_a64_tlsdesc_call
+// CHECK: blr x3 // encoding: [0x60,0x00,0x3f,0xd6]
+
+
+// CHECK-ELF: # Relocation 65
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000104)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000232)
+// CHECK-ELF: # Relocation 66
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000108)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000233)
+// CHECK-ELF: # Relocation 67
+// CHECK-ELF-NEXT: (('r_offset', 0x000000000000010c)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000234)
+// CHECK-ELF: # Relocation 68
+// CHECK-ELF-NEXT: (('r_offset', 0x0000000000000110)
+// CHECK-ELF-NEXT: ('r_sym', [[VARSYM]])
+// CHECK-ELF-NEXT: ('r_type', 0x00000239)
+
+// CHECK-ELF-NAMES: 260 R_AARCH64_TLSDESC_ADR_PAGE
+// CHECK-ELF-NAMES: 264 R_AARCH64_TLSDESC_LD64_LO12_NC
+// CHECK-ELF-NAMES: 268 R_AARCH64_TLSDESC_ADD_LO12_NC
+// CHECK-ELF-NAMES: 272 R_AARCH64_TLSDESC_CALL
+
+
+// Make sure symbol 5 has type STT_TLS:
+
+// CHECK-ELF: # Symbol 5
+// CHECK-ELF-NEXT: (('st_name', 0x00000006) # 'var'
+// CHECK-ELF-NEXT: ('st_bind', 0x1)
+// CHECK-ELF-NEXT: ('st_type', 0x6)
diff --git a/test/MC/Disassembler/AArch64/basic-a64-instructions.txt b/test/MC/Disassembler/AArch64/basic-a64-instructions.txt
new file mode 100644
index 0000000000..9c5a5ebcf7
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/basic-a64-instructions.txt
@@ -0,0 +1,4145 @@
+# RUN: llvm-mc -triple=aarch64 -disassemble < %s | FileCheck %s
+
+#------------------------------------------------------------------------------
+# Add/sub (immediate)
+#------------------------------------------------------------------------------
+# CHECK: add w4, w5, #0
+# CHECK: add w2, w3, #4095
+# CHECK: add w30, w29, #1, lsl #12
+# CHECK: add w13, w5, #4095, lsl #12
+# CHECK: add x5, x7, #1638
+0xa4 0x0 0x0 0x11
+0x62 0xfc 0x3f 0x11
+0xbe 0x7 0x40 0x11
+0xad 0xfc 0x7f 0x11
+0xe5 0x98 0x19 0x91
+
+# CHECK: add w20, wsp, #801
+# CHECK: add wsp, wsp, #1104
+# CHECK: add wsp, w30, #4084
+0xf4 0x87 0xc 0x11
+0xff 0x43 0x11 0x11
+0xdf 0xd3 0x3f 0x11
+
+# CHECK: add x0, x24, #291
+# CHECK: add x3, x24, #4095, lsl #12
+# CHECK: add x8, sp, #1074
+# CHECK: add sp, x29, #3816
+0x0 0x8f 0x4 0x91
+0x3 0xff 0x7f 0x91
+0xe8 0xcb 0x10 0x91
+0xbf 0xa3 0x3b 0x91
+
+# CHECK: sub w0, wsp, #4077
+# CHECK: sub w4, w20, #546, lsl #12
+# CHECK: sub sp, sp, #288
+# CHECK: sub wsp, w19, #16
+0xe0 0xb7 0x3f 0x51
+0x84 0x8a 0x48 0x51
+0xff 0x83 0x4 0xd1
+0x7f 0x42 0x0 0x51
+
+
+# CHECK: adds w13, w23, #291, lsl #12
+# CHECK: cmn w2, #4095
+# CHECK: adds w20, wsp, #0
+# CHECK: cmn x3, #1, lsl #12
+0xed 0x8e 0x44 0x31
+0x5f 0xfc 0x3f 0x31
+0xf4 0x3 0x0 0x31
+0x7f 0x4 0x40 0xb1
+
+# CHECK: cmp sp, #20, lsl #12
+# CHECK: cmp x30, #4095
+# CHECK: subs x4, sp, #3822
+0xff 0x53 0x40 0xf1
+0xdf 0xff 0x3f 0xf1
+0xe4 0xbb 0x3b 0xf1
+
+# These should really be CMN
+# CHECK: cmn w3, #291, lsl #12
+# CHECK: cmn wsp, #1365
+# CHECK: cmn sp, #1092, lsl #12
+0x7f 0x8c 0x44 0x31
+0xff 0x57 0x15 0x31
+0xff 0x13 0x51 0xb1
+
+# CHECK: mov sp, x30
+# CHECK: mov wsp, w20
+# CHECK: mov x11, sp
+# CHECK: mov w24, wsp
+0xdf 0x3 0x0 0x91
+0x9f 0x2 0x0 0x11
+0xeb 0x3 0x0 0x91
+0xf8 0x3 0x0 0x11
+
+#------------------------------------------------------------------------------
+# Add-subtract (shifted register)
+#------------------------------------------------------------------------------
+
+# CHECK: add w3, w5, w7
+# CHECK: add wzr, w3, w5
+# CHECK: add w20, wzr, w4
+# CHECK: add w4, w6, wzr
+# CHECK: add w11, w13, w15
+# CHECK: add w9, w3, wzr, lsl #10
+# CHECK: add w17, w29, w20, lsl #31
+# CHECK: add w21, w22, w23, lsr #0
+# CHECK: add w24, w25, w26, lsr #18
+# CHECK: add w27, w28, w29, lsr #31
+# CHECK: add w2, w3, w4, asr #0
+# CHECK: add w5, w6, w7, asr #21
+# CHECK: add w8, w9, w10, asr #31
+0xa3 0x0 0x7 0xb
+0x7f 0x0 0x5 0xb
+0xf4 0x3 0x4 0xb
+0xc4 0x0 0x1f 0xb
+0xab 0x1 0xf 0xb
+0x69 0x28 0x1f 0xb
+0xb1 0x7f 0x14 0xb
+0xd5 0x2 0x57 0xb
+0x38 0x4b 0x5a 0xb
+0x9b 0x7f 0x5d 0xb
+0x62 0x0 0x84 0xb
+0xc5 0x54 0x87 0xb
+0x28 0x7d 0x8a 0xb
+
+# CHECK: add x3, x5, x7
+# CHECK: add xzr, x3, x5
+# CHECK: add x20, xzr, x4
+# CHECK: add x4, x6, xzr
+# CHECK: add x11, x13, x15
+# CHECK: add x9, x3, xzr, lsl #10
+# CHECK: add x17, x29, x20, lsl #63
+# CHECK: add x21, x22, x23, lsr #0
+# CHECK: add x24, x25, x26, lsr #18
+# CHECK: add x27, x28, x29, lsr #63
+# CHECK: add x2, x3, x4, asr #0
+# CHECK: add x5, x6, x7, asr #21
+# CHECK: add x8, x9, x10, asr #63
+0xa3 0x0 0x7 0x8b
+0x7f 0x0 0x5 0x8b
+0xf4 0x3 0x4 0x8b
+0xc4 0x0 0x1f 0x8b
+0xab 0x1 0xf 0x8b
+0x69 0x28 0x1f 0x8b
+0xb1 0xff 0x14 0x8b
+0xd5 0x2 0x57 0x8b
+0x38 0x4b 0x5a 0x8b
+0x9b 0xff 0x5d 0x8b
+0x62 0x0 0x84 0x8b
+0xc5 0x54 0x87 0x8b
+0x28 0xfd 0x8a 0x8b
+
+# CHECK: adds w3, w5, w7
+# CHECK: cmn w3, w5
+# CHECK: adds w20, wzr, w4
+# CHECK: adds w4, w6, wzr
+# CHECK: adds w11, w13, w15
+# CHECK: adds w9, w3, wzr, lsl #10
+# CHECK: adds w17, w29, w20, lsl #31
+# CHECK: adds w21, w22, w23, lsr #0
+# CHECK: adds w24, w25, w26, lsr #18
+# CHECK: adds w27, w28, w29, lsr #31
+# CHECK: adds w2, w3, w4, asr #0
+# CHECK: adds w5, w6, w7, asr #21
+# CHECK: adds w8, w9, w10, asr #31
+0xa3 0x0 0x7 0x2b
+0x7f 0x0 0x5 0x2b
+0xf4 0x3 0x4 0x2b
+0xc4 0x0 0x1f 0x2b
+0xab 0x1 0xf 0x2b
+0x69 0x28 0x1f 0x2b
+0xb1 0x7f 0x14 0x2b
+0xd5 0x2 0x57 0x2b
+0x38 0x4b 0x5a 0x2b
+0x9b 0x7f 0x5d 0x2b
+0x62 0x0 0x84 0x2b
+0xc5 0x54 0x87 0x2b
+0x28 0x7d 0x8a 0x2b
+
+# CHECK: adds x3, x5, x7
+# CHECK: cmn x3, x5
+# CHECK: adds x20, xzr, x4
+# CHECK: adds x4, x6, xzr
+# CHECK: adds x11, x13, x15
+# CHECK: adds x9, x3, xzr, lsl #10
+# CHECK: adds x17, x29, x20, lsl #63
+# CHECK: adds x21, x22, x23, lsr #0
+# CHECK: adds x24, x25, x26, lsr #18
+# CHECK: adds x27, x28, x29, lsr #63
+# CHECK: adds x2, x3, x4, asr #0
+# CHECK: adds x5, x6, x7, asr #21
+# CHECK: adds x8, x9, x10, asr #63
+0xa3 0x0 0x7 0xab
+0x7f 0x0 0x5 0xab
+0xf4 0x3 0x4 0xab
+0xc4 0x0 0x1f 0xab
+0xab 0x1 0xf 0xab
+0x69 0x28 0x1f 0xab
+0xb1 0xff 0x14 0xab
+0xd5 0x2 0x57 0xab
+0x38 0x4b 0x5a 0xab
+0x9b 0xff 0x5d 0xab
+0x62 0x0 0x84 0xab
+0xc5 0x54 0x87 0xab
+0x28 0xfd 0x8a 0xab
+
+# CHECK: sub w3, w5, w7
+# CHECK: sub wzr, w3, w5
+# CHECK: sub w20, wzr, w4
+# CHECK: sub w4, w6, wzr
+# CHECK: sub w11, w13, w15
+# CHECK: sub w9, w3, wzr, lsl #10
+# CHECK: sub w17, w29, w20, lsl #31
+# CHECK: sub w21, w22, w23, lsr #0
+# CHECK: sub w24, w25, w26, lsr #18
+# CHECK: sub w27, w28, w29, lsr #31
+# CHECK: sub w2, w3, w4, asr #0
+# CHECK: sub w5, w6, w7, asr #21
+# CHECK: sub w8, w9, w10, asr #31
+0xa3 0x0 0x7 0x4b
+0x7f 0x0 0x5 0x4b
+0xf4 0x3 0x4 0x4b
+0xc4 0x0 0x1f 0x4b
+0xab 0x1 0xf 0x4b
+0x69 0x28 0x1f 0x4b
+0xb1 0x7f 0x14 0x4b
+0xd5 0x2 0x57 0x4b
+0x38 0x4b 0x5a 0x4b
+0x9b 0x7f 0x5d 0x4b
+0x62 0x0 0x84 0x4b
+0xc5 0x54 0x87 0x4b
+0x28 0x7d 0x8a 0x4b
+
+# CHECK: sub x3, x5, x7
+# CHECK: sub xzr, x3, x5
+# CHECK: sub x20, xzr, x4
+# CHECK: sub x4, x6, xzr
+# CHECK: sub x11, x13, x15
+# CHECK: sub x9, x3, xzr, lsl #10
+# CHECK: sub x17, x29, x20, lsl #63
+# CHECK: sub x21, x22, x23, lsr #0
+# CHECK: sub x24, x25, x26, lsr #18
+# CHECK: sub x27, x28, x29, lsr #63
+# CHECK: sub x2, x3, x4, asr #0
+# CHECK: sub x5, x6, x7, asr #21
+# CHECK: sub x8, x9, x10, asr #63
+0xa3 0x0 0x7 0xcb
+0x7f 0x0 0x5 0xcb
+0xf4 0x3 0x4 0xcb
+0xc4 0x0 0x1f 0xcb
+0xab 0x1 0xf 0xcb
+0x69 0x28 0x1f 0xcb
+0xb1 0xff 0x14 0xcb
+0xd5 0x2 0x57 0xcb
+0x38 0x4b 0x5a 0xcb
+0x9b 0xff 0x5d 0xcb
+0x62 0x0 0x84 0xcb
+0xc5 0x54 0x87 0xcb
+0x28 0xfd 0x8a 0xcb
+
+# CHECK: subs w3, w5, w7
+# CHECK: cmp w3, w5
+# CHECK: subs w20, wzr, w4
+# CHECK: subs w4, w6, wzr
+# CHECK: subs w11, w13, w15
+# CHECK: subs w9, w3, wzr, lsl #10
+# CHECK: subs w17, w29, w20, lsl #31
+# CHECK: subs w21, w22, w23, lsr #0
+# CHECK: subs w24, w25, w26, lsr #18
+# CHECK: subs w27, w28, w29, lsr #31
+# CHECK: subs w2, w3, w4, asr #0
+# CHECK: subs w5, w6, w7, asr #21
+# CHECK: subs w8, w9, w10, asr #31
+0xa3 0x0 0x7 0x6b
+0x7f 0x0 0x5 0x6b
+0xf4 0x3 0x4 0x6b
+0xc4 0x0 0x1f 0x6b
+0xab 0x1 0xf 0x6b
+0x69 0x28 0x1f 0x6b
+0xb1 0x7f 0x14 0x6b
+0xd5 0x2 0x57 0x6b
+0x38 0x4b 0x5a 0x6b
+0x9b 0x7f 0x5d 0x6b
+0x62 0x0 0x84 0x6b
+0xc5 0x54 0x87 0x6b
+0x28 0x7d 0x8a 0x6b
+
+# CHECK: subs x3, x5, x7
+# CHECK: cmp x3, x5
+# CHECK: subs x20, xzr, x4
+# CHECK: subs x4, x6, xzr
+# CHECK: subs x11, x13, x15
+# CHECK: subs x9, x3, xzr, lsl #10
+# CHECK: subs x17, x29, x20, lsl #63
+# CHECK: subs x21, x22, x23, lsr #0
+# CHECK: subs x24, x25, x26, lsr #18
+# CHECK: subs x27, x28, x29, lsr #63
+# CHECK: subs x2, x3, x4, asr #0
+# CHECK: subs x5, x6, x7, asr #21
+# CHECK: subs x8, x9, x10, asr #63
+0xa3 0x0 0x7 0xeb
+0x7f 0x0 0x5 0xeb
+0xf4 0x3 0x4 0xeb
+0xc4 0x0 0x1f 0xeb
+0xab 0x1 0xf 0xeb
+0x69 0x28 0x1f 0xeb
+0xb1 0xff 0x14 0xeb
+0xd5 0x2 0x57 0xeb
+0x38 0x4b 0x5a 0xeb
+0x9b 0xff 0x5d 0xeb
+0x62 0x0 0x84 0xeb
+0xc5 0x54 0x87 0xeb
+0x28 0xfd 0x8a 0xeb
+
+# CHECK: cmn w0, w3
+# CHECK: cmn wzr, w4
+# CHECK: cmn w5, wzr
+# CHECK: cmn w6, w7
+# CHECK: cmn w8, w9, lsl #15
+# CHECK: cmn w10, w11, lsl #31
+# CHECK: cmn w12, w13, lsr #0
+# CHECK: cmn w14, w15, lsr #21
+# CHECK: cmn w16, w17, lsr #31
+# CHECK: cmn w18, w19, asr #0
+# CHECK: cmn w20, w21, asr #22
+# CHECK: cmn w22, w23, asr #31
+0x1f 0x0 0x3 0x2b
+0xff 0x3 0x4 0x2b
+0xbf 0x0 0x1f 0x2b
+0xdf 0x0 0x7 0x2b
+0x1f 0x3d 0x9 0x2b
+0x5f 0x7d 0xb 0x2b
+0x9f 0x1 0x4d 0x2b
+0xdf 0x55 0x4f 0x2b
+0x1f 0x7e 0x51 0x2b
+0x5f 0x2 0x93 0x2b
+0x9f 0x5a 0x95 0x2b
+0xdf 0x7e 0x97 0x2b
+
+# CHECK: cmn x0, x3
+# CHECK: cmn xzr, x4
+# CHECK: cmn x5, xzr
+# CHECK: cmn x6, x7
+# CHECK: cmn x8, x9, lsl #15
+# CHECK: cmn x10, x11, lsl #63
+# CHECK: cmn x12, x13, lsr #0
+# CHECK: cmn x14, x15, lsr #41
+# CHECK: cmn x16, x17, lsr #63
+# CHECK: cmn x18, x19, asr #0
+# CHECK: cmn x20, x21, asr #55
+# CHECK: cmn x22, x23, asr #63
+0x1f 0x0 0x3 0xab
+0xff 0x3 0x4 0xab
+0xbf 0x0 0x1f 0xab
+0xdf 0x0 0x7 0xab
+0x1f 0x3d 0x9 0xab
+0x5f 0xfd 0xb 0xab
+0x9f 0x1 0x4d 0xab
+0xdf 0xa5 0x4f 0xab
+0x1f 0xfe 0x51 0xab
+0x5f 0x2 0x93 0xab
+0x9f 0xde 0x95 0xab
+0xdf 0xfe 0x97 0xab
+
+# CHECK: cmp w0, w3
+# CHECK: cmp wzr, w4
+# CHECK: cmp w5, wzr
+# CHECK: cmp w6, w7
+# CHECK: cmp w8, w9, lsl #15
+# CHECK: cmp w10, w11, lsl #31
+# CHECK: cmp w12, w13, lsr #0
+# CHECK: cmp w14, w15, lsr #21
+# CHECK: cmp w16, w17, lsr #31
+# CHECK: cmp w18, w19, asr #0
+# CHECK: cmp w20, w21, asr #22
+# CHECK: cmp w22, w23, asr #31
+0x1f 0x0 0x3 0x6b
+0xff 0x3 0x4 0x6b
+0xbf 0x0 0x1f 0x6b
+0xdf 0x0 0x7 0x6b
+0x1f 0x3d 0x9 0x6b
+0x5f 0x7d 0xb 0x6b
+0x9f 0x1 0x4d 0x6b
+0xdf 0x55 0x4f 0x6b
+0x1f 0x7e 0x51 0x6b
+0x5f 0x2 0x93 0x6b
+0x9f 0x5a 0x95 0x6b
+0xdf 0x7e 0x97 0x6b
+
+# CHECK: cmp x0, x3
+# CHECK: cmp xzr, x4
+# CHECK: cmp x5, xzr
+# CHECK: cmp x6, x7
+# CHECK: cmp x8, x9, lsl #15
+# CHECK: cmp x10, x11, lsl #63
+# CHECK: cmp x12, x13, lsr #0
+# CHECK: cmp x14, x15, lsr #41
+# CHECK: cmp x16, x17, lsr #63
+# CHECK: cmp x18, x19, asr #0
+# CHECK: cmp x20, x21, asr #55
+# CHECK: cmp x22, x23, asr #63
+0x1f 0x0 0x3 0xeb
+0xff 0x3 0x4 0xeb
+0xbf 0x0 0x1f 0xeb
+0xdf 0x0 0x7 0xeb
+0x1f 0x3d 0x9 0xeb
+0x5f 0xfd 0xb 0xeb
+0x9f 0x1 0x4d 0xeb
+0xdf 0xa5 0x4f 0xeb
+0x1f 0xfe 0x51 0xeb
+0x5f 0x2 0x93 0xeb
+0x9f 0xde 0x95 0xeb
+0xdf 0xfe 0x97 0xeb
+
+# CHECK: sub w29, wzr, w30
+# CHECK: sub w30, wzr, wzr
+# CHECK: sub wzr, wzr, w0
+# CHECK: sub w28, wzr, w27
+# CHECK: sub w26, wzr, w25, lsl #29
+# CHECK: sub w24, wzr, w23, lsl #31
+# CHECK: sub w22, wzr, w21, lsr #0
+# CHECK: sub w20, wzr, w19, lsr #1
+# CHECK: sub w18, wzr, w17, lsr #31
+# CHECK: sub w16, wzr, w15, asr #0
+# CHECK: sub w14, wzr, w13, asr #12
+# CHECK: sub w12, wzr, w11, asr #31
+0xfd 0x3 0x1e 0x4b
+0xfe 0x3 0x1f 0x4b
+0xff 0x3 0x0 0x4b
+0xfc 0x3 0x1b 0x4b
+0xfa 0x77 0x19 0x4b
+0xf8 0x7f 0x17 0x4b
+0xf6 0x3 0x55 0x4b
+0xf4 0x7 0x53 0x4b
+0xf2 0x7f 0x51 0x4b
+0xf0 0x3 0x8f 0x4b
+0xee 0x33 0x8d 0x4b
+0xec 0x7f 0x8b 0x4b
+
+# CHECK: sub x29, xzr, x30
+# CHECK: sub x30, xzr, xzr
+# CHECK: sub xzr, xzr, x0
+# CHECK: sub x28, xzr, x27
+# CHECK: sub x26, xzr, x25, lsl #29
+# CHECK: sub x24, xzr, x23, lsl #31
+# CHECK: sub x22, xzr, x21, lsr #0
+# CHECK: sub x20, xzr, x19, lsr #1
+# CHECK: sub x18, xzr, x17, lsr #31
+# CHECK: sub x16, xzr, x15, asr #0
+# CHECK: sub x14, xzr, x13, asr #12
+# CHECK: sub x12, xzr, x11, asr #31
+0xfd 0x3 0x1e 0xcb
+0xfe 0x3 0x1f 0xcb
+0xff 0x3 0x0 0xcb
+0xfc 0x3 0x1b 0xcb
+0xfa 0x77 0x19 0xcb
+0xf8 0x7f 0x17 0xcb
+0xf6 0x3 0x55 0xcb
+0xf4 0x7 0x53 0xcb
+0xf2 0x7f 0x51 0xcb
+0xf0 0x3 0x8f 0xcb
+0xee 0x33 0x8d 0xcb
+0xec 0x7f 0x8b 0xcb
+
+# CHECK: subs w29, wzr, w30
+# CHECK: subs w30, wzr, wzr
+# CHECK: cmp wzr, w0
+# CHECK: subs w28, wzr, w27
+# CHECK: subs w26, wzr, w25, lsl #29
+# CHECK: subs w24, wzr, w23, lsl #31
+# CHECK: subs w22, wzr, w21, lsr #0
+# CHECK: subs w20, wzr, w19, lsr #1
+# CHECK: subs w18, wzr, w17, lsr #31
+# CHECK: subs w16, wzr, w15, asr #0
+# CHECK: subs w14, wzr, w13, asr #12
+# CHECK: subs w12, wzr, w11, asr #31
+0xfd 0x3 0x1e 0x6b
+0xfe 0x3 0x1f 0x6b
+0xff 0x3 0x0 0x6b
+0xfc 0x3 0x1b 0x6b
+0xfa 0x77 0x19 0x6b
+0xf8 0x7f 0x17 0x6b
+0xf6 0x3 0x55 0x6b
+0xf4 0x7 0x53 0x6b
+0xf2 0x7f 0x51 0x6b
+0xf0 0x3 0x8f 0x6b
+0xee 0x33 0x8d 0x6b
+0xec 0x7f 0x8b 0x6b
+
+# CHECK: subs x29, xzr, x30
+# CHECK: subs x30, xzr, xzr
+# CHECK: cmp xzr, x0
+# CHECK: subs x28, xzr, x27
+# CHECK: subs x26, xzr, x25, lsl #29
+# CHECK: subs x24, xzr, x23, lsl #31
+# CHECK: subs x22, xzr, x21, lsr #0
+# CHECK: subs x20, xzr, x19, lsr #1
+# CHECK: subs x18, xzr, x17, lsr #31
+# CHECK: subs x16, xzr, x15, asr #0
+# CHECK: subs x14, xzr, x13, asr #12
+# CHECK: subs x12, xzr, x11, asr #31
+0xfd 0x3 0x1e 0xeb
+0xfe 0x3 0x1f 0xeb
+0xff 0x3 0x0 0xeb
+0xfc 0x3 0x1b 0xeb
+0xfa 0x77 0x19 0xeb
+0xf8 0x7f 0x17 0xeb
+0xf6 0x3 0x55 0xeb
+0xf4 0x7 0x53 0xeb
+0xf2 0x7f 0x51 0xeb
+0xf0 0x3 0x8f 0xeb
+0xee 0x33 0x8d 0xeb
+0xec 0x7f 0x8b 0xeb
+
+#------------------------------------------------------------------------------
+# Add-subtract (shifted register)
+#------------------------------------------------------------------------------
+
+# CHECK: adc w29, w27, w25
+# CHECK: adc wzr, w3, w4
+# CHECK: adc w9, wzr, w10
+# CHECK: adc w20, w0, wzr
+0x7d 0x3 0x19 0x1a
+0x7f 0x0 0x4 0x1a
+0xe9 0x3 0xa 0x1a
+0x14 0x0 0x1f 0x1a
+
+# CHECK: adc x29, x27, x25
+# CHECK: adc xzr, x3, x4
+# CHECK: adc x9, xzr, x10
+# CHECK: adc x20, x0, xzr
+0x7d 0x3 0x19 0x9a
+0x7f 0x0 0x4 0x9a
+0xe9 0x3 0xa 0x9a
+0x14 0x0 0x1f 0x9a
+
+# CHECK: adcs w29, w27, w25
+# CHECK: adcs wzr, w3, w4
+# CHECK: adcs w9, wzr, w10
+# CHECK: adcs w20, w0, wzr
+0x7d 0x3 0x19 0x3a
+0x7f 0x0 0x4 0x3a
+0xe9 0x3 0xa 0x3a
+0x14 0x0 0x1f 0x3a
+
+# CHECK: adcs x29, x27, x25
+# CHECK: adcs xzr, x3, x4
+# CHECK: adcs x9, xzr, x10
+# CHECK: adcs x20, x0, xzr
+0x7d 0x3 0x19 0xba
+0x7f 0x0 0x4 0xba
+0xe9 0x3 0xa 0xba
+0x14 0x0 0x1f 0xba
+
+# CHECK: sbc w29, w27, w25
+# CHECK: sbc wzr, w3, w4
+# CHECK: ngc w9, w10
+# CHECK: sbc w20, w0, wzr
+0x7d 0x3 0x19 0x5a
+0x7f 0x0 0x4 0x5a
+0xe9 0x3 0xa 0x5a
+0x14 0x0 0x1f 0x5a
+
+# CHECK: sbc x29, x27, x25
+# CHECK: sbc xzr, x3, x4
+# CHECK: ngc x9, x10
+# CHECK: sbc x20, x0, xzr
+0x7d 0x3 0x19 0xda
+0x7f 0x0 0x4 0xda
+0xe9 0x3 0xa 0xda
+0x14 0x0 0x1f 0xda
+
+# CHECK: sbcs w29, w27, w25
+# CHECK: sbcs wzr, w3, w4
+# CHECK: ngcs w9, w10
+# CHECK: sbcs w20, w0, wzr
+0x7d 0x3 0x19 0x7a
+0x7f 0x0 0x4 0x7a
+0xe9 0x3 0xa 0x7a
+0x14 0x0 0x1f 0x7a
+
+# CHECK: sbcs x29, x27, x25
+# CHECK: sbcs xzr, x3, x4
+# CHECK: ngcs x9, x10
+# CHECK: sbcs x20, x0, xzr
+0x7d 0x3 0x19 0xfa
+0x7f 0x0 0x4 0xfa
+0xe9 0x3 0xa 0xfa
+0x14 0x0 0x1f 0xfa
+
+# CHECK: ngc w3, w12
+# CHECK: ngc wzr, w9
+# CHECK: ngc w23, wzr
+0xe3 0x3 0xc 0x5a
+0xff 0x3 0x9 0x5a
+0xf7 0x3 0x1f 0x5a
+
+# CHECK: ngc x29, x30
+# CHECK: ngc xzr, x0
+# CHECK: ngc x0, xzr
+0xfd 0x3 0x1e 0xda
+0xff 0x3 0x0 0xda
+0xe0 0x3 0x1f 0xda
+
+# CHECK: ngcs w3, w12
+# CHECK: ngcs wzr, w9
+# CHECK: ngcs w23, wzr
+0xe3 0x3 0xc 0x7a
+0xff 0x3 0x9 0x7a
+0xf7 0x3 0x1f 0x7a
+
+# CHECK: ngcs x29, x30
+# CHECK: ngcs xzr, x0
+# CHECK: ngcs x0, xzr
+0xfd 0x3 0x1e 0xfa
+0xff 0x3 0x0 0xfa
+0xe0 0x3 0x1f 0xfa
+
+#------------------------------------------------------------------------------
+# Compare and branch (immediate)
+#------------------------------------------------------------------------------
+
+# CHECK: sbfx x1, x2, #3, #2
+# CHECK: asr x3, x4, #63
+# CHECK: asr wzr, wzr, #31
+# CHECK: sbfx w12, w9, #0, #1
+0x41 0x10 0x43 0x93
+0x83 0xfc 0x7f 0x93
+0xff 0x7f 0x1f 0x13
+0x2c 0x1 0x0 0x13
+
+# CHECK: ubfiz x4, x5, #52, #11
+# CHECK: ubfx xzr, x4, #0, #1
+# CHECK: ubfiz x4, xzr, #1, #6
+# CHECK: lsr x5, x6, #12
+0xa4 0x28 0x4c 0xd3
+0x9f 0x0 0x40 0xd3
+0xe4 0x17 0x7f 0xd3
+0xc5 0xfc 0x4c 0xd3
+
+# CHECK: bfi x4, x5, #52, #11
+# CHECK: bfxil xzr, x4, #0, #1
+# CHECK: bfi x4, xzr, #1, #6
+# CHECK: bfxil x5, x6, #12, #52
+0xa4 0x28 0x4c 0xb3
+0x9f 0x0 0x40 0xb3
+0xe4 0x17 0x7f 0xb3
+0xc5 0xfc 0x4c 0xb3
+
+# CHECK: sxtb w1, w2
+# CHECK: sxtb xzr, w3
+# CHECK: sxth w9, w10
+# CHECK: sxth x0, w1
+# CHECK: sxtw x3, w30
+0x41 0x1c 0x0 0x13
+0x7f 0x1c 0x40 0x93
+0x49 0x3d 0x0 0x13
+0x20 0x3c 0x40 0x93
+0xc3 0x7f 0x40 0x93
+
+# CHECK: uxtb w1, w2
+# CHECK: uxth w9, w10
+# CHECK: ubfx x3, x30, #0, #32
+0x41 0x1c 0x0 0x53
+0x49 0x3d 0x0 0x53
+0xc3 0x7f 0x40 0xd3
+
+# CHECK: asr w3, w2, #0
+# CHECK: asr w9, w10, #31
+# CHECK: asr x20, x21, #63
+# CHECK: asr w1, wzr, #3
+0x43 0x7c 0x0 0x13
+0x49 0x7d 0x1f 0x13
+0xb4 0xfe 0x7f 0x93
+0xe1 0x7f 0x3 0x13
+
+# CHECK: lsr w3, w2, #0
+# CHECK: lsr w9, w10, #31
+# CHECK: lsr x20, x21, #63
+# CHECK: lsr wzr, wzr, #3
+0x43 0x7c 0x0 0x53
+0x49 0x7d 0x1f 0x53
+0xb4 0xfe 0x7f 0xd3
+0xff 0x7f 0x3 0x53
+
+# CHECK: lsr w3, w2, #0
+# CHECK: lsl w9, w10, #31
+# CHECK: lsl x20, x21, #63
+# CHECK: lsl w1, wzr, #3
+0x43 0x7c 0x0 0x53
+0x49 0x1 0x1 0x53
+0xb4 0x2 0x41 0xd3
+0xe1 0x73 0x1d 0x53
+
+# CHECK: sbfx w9, w10, #0, #1
+# CHECK: sbfiz x2, x3, #63, #1
+# CHECK: asr x19, x20, #0
+# CHECK: sbfiz x9, x10, #5, #59
+# CHECK: asr w9, w10, #0
+# CHECK: sbfiz w11, w12, #31, #1
+# CHECK: sbfiz w13, w14, #29, #3
+# CHECK: sbfiz xzr, xzr, #10, #11
+0x49 0x1 0x0 0x13
+0x62 0x0 0x41 0x93
+0x93 0xfe 0x40 0x93
+0x49 0xe9 0x7b 0x93
+0x49 0x7d 0x0 0x13
+0x8b 0x1 0x1 0x13
+0xcd 0x9 0x3 0x13
+0xff 0x2b 0x76 0x93
+
+# CHECK: sbfx w9, w10, #0, #1
+# CHECK: asr x2, x3, #63
+# CHECK: asr x19, x20, #0
+# CHECK: asr x9, x10, #5
+# CHECK: asr w9, w10, #0
+# CHECK: asr w11, w12, #31
+# CHECK: asr w13, w14, #29
+# CHECK: sbfx xzr, xzr, #10, #11
+0x49 0x1 0x0 0x13
+0x62 0xfc 0x7f 0x93
+0x93 0xfe 0x40 0x93
+0x49 0xfd 0x45 0x93
+0x49 0x7d 0x0 0x13
+0x8b 0x7d 0x1f 0x13
+0xcd 0x7d 0x1d 0x13
+0xff 0x53 0x4a 0x93
+
+# CHECK: bfxil w9, w10, #0, #1
+# CHECK: bfi x2, x3, #63, #1
+# CHECK: bfxil x19, x20, #0, #64
+# CHECK: bfi x9, x10, #5, #59
+# CHECK: bfxil w9, w10, #0, #32
+# CHECK: bfi w11, w12, #31, #1
+# CHECK: bfi w13, w14, #29, #3
+# CHECK: bfi xzr, xzr, #10, #11
+0x49 0x1 0x0 0x33
+0x62 0x0 0x41 0xb3
+0x93 0xfe 0x40 0xb3
+0x49 0xe9 0x7b 0xb3
+0x49 0x7d 0x0 0x33
+0x8b 0x1 0x1 0x33
+0xcd 0x9 0x3 0x33
+0xff 0x2b 0x76 0xb3
+
+# CHECK: bfxil w9, w10, #0, #1
+# CHECK: bfxil x2, x3, #63, #1
+# CHECK: bfxil x19, x20, #0, #64
+# CHECK: bfxil x9, x10, #5, #59
+# CHECK: bfxil w9, w10, #0, #32
+# CHECK: bfxil w11, w12, #31, #1
+# CHECK: bfxil w13, w14, #29, #3
+# CHECK: bfxil xzr, xzr, #10, #11
+0x49 0x1 0x0 0x33
+0x62 0xfc 0x7f 0xb3
+0x93 0xfe 0x40 0xb3
+0x49 0xfd 0x45 0xb3
+0x49 0x7d 0x0 0x33
+0x8b 0x7d 0x1f 0x33
+0xcd 0x7d 0x1d 0x33
+0xff 0x53 0x4a 0xb3
+
+# CHECK: ubfx w9, w10, #0, #1
+# CHECK: lsl x2, x3, #63
+# CHECK: lsr x19, x20, #0
+# CHECK: lsl x9, x10, #5
+# CHECK: lsr w9, w10, #0
+# CHECK: lsl w11, w12, #31
+# CHECK: lsl w13, w14, #29
+# CHECK: ubfiz xzr, xzr, #10, #11
+0x49 0x1 0x0 0x53
+0x62 0x0 0x41 0xd3
+0x93 0xfe 0x40 0xd3
+0x49 0xe9 0x7b 0xd3
+0x49 0x7d 0x0 0x53
+0x8b 0x1 0x1 0x53
+0xcd 0x9 0x3 0x53
+0xff 0x2b 0x76 0xd3
+
+# CHECK: ubfx w9, w10, #0, #1
+# CHECK: lsr x2, x3, #63
+# CHECK: lsr x19, x20, #0
+# CHECK: lsr x9, x10, #5
+# CHECK: lsr w9, w10, #0
+# CHECK: lsr w11, w12, #31
+# CHECK: lsr w13, w14, #29
+# CHECK: ubfx xzr, xzr, #10, #11
+0x49 0x1 0x0 0x53
+0x62 0xfc 0x7f 0xd3
+0x93 0xfe 0x40 0xd3
+0x49 0xfd 0x45 0xd3
+0x49 0x7d 0x0 0x53
+0x8b 0x7d 0x1f 0x53
+0xcd 0x7d 0x1d 0x53
+0xff 0x53 0x4a 0xd3
+
+
+#------------------------------------------------------------------------------
+# Compare and branch (immediate)
+#------------------------------------------------------------------------------
+
+# CHECK: cbz w5, #4
+# CHECK: cbz x5, #0
+# CHECK: cbnz x2, #-4
+# CHECK: cbnz x26, #1048572
+0x25 0x0 0x0 0x34
+0x05 0x0 0x0 0xb4
+0xe2 0xff 0xff 0xb5
+0xfa 0xff 0x7f 0xb5
+
+# CHECK: cbz wzr, #0
+# CHECK: cbnz xzr, #0
+0x1f 0x0 0x0 0x34
+0x1f 0x0 0x0 0xb5
+
+#------------------------------------------------------------------------------
+# Conditional branch (immediate)
+#------------------------------------------------------------------------------
+
+# CHECK: b.ne #4
+# CHECK: b.ge #1048572
+# CHECK: b.ge #-4
+0x21 0x00 0x00 0x54
+0xea 0xff 0x7f 0x54
+0xea 0xff 0xff 0x54
+
+#------------------------------------------------------------------------------
+# Conditional compare (immediate)
+#------------------------------------------------------------------------------
+
+# CHECK: ccmp w1, #31, #0, eq
+# CHECK: ccmp w3, #0, #15, hs
+# CHECK: ccmp wzr, #15, #13, hs
+0x20 0x08 0x5f 0x7a
+0x6f 0x28 0x40 0x7a
+0xed 0x2b 0x4f 0x7a
+
+# CHECK: ccmp x9, #31, #0, le
+# CHECK: ccmp x3, #0, #15, gt
+# CHECK: ccmp xzr, #5, #7, ne
+0x20 0xd9 0x5f 0xfa
+0x6f 0xc8 0x40 0xfa
+0xe7 0x1b 0x45 0xfa
+
+# CHECK: ccmn w1, #31, #0, eq
+# CHECK: ccmn w3, #0, #15, hs
+# CHECK: ccmn wzr, #15, #13, hs
+0x20 0x08 0x5f 0x3a
+0x6f 0x28 0x40 0x3a
+0xed 0x2b 0x4f 0x3a
+
+# CHECK: ccmn x9, #31, #0, le
+# CHECK: ccmn x3, #0, #15, gt
+# CHECK: ccmn xzr, #5, #7, ne
+0x20 0xd9 0x5f 0xba
+0x6f 0xc8 0x40 0xba
+0xe7 0x1b 0x45 0xba
+
+#------------------------------------------------------------------------------
+# Conditional compare (register)
+#------------------------------------------------------------------------------
+
+# CHECK: ccmp w1, wzr, #0, eq
+# CHECK: ccmp w3, w0, #15, hs
+# CHECK: ccmp wzr, w15, #13, hs
+0x20 0x00 0x5f 0x7a
+0x6f 0x20 0x40 0x7a
+0xed 0x23 0x4f 0x7a
+
+# CHECK: ccmp x9, xzr, #0, le
+# CHECK: ccmp x3, x0, #15, gt
+# CHECK: ccmp xzr, x5, #7, ne
+0x20 0xd1 0x5f 0xfa
+0x6f 0xc0 0x40 0xfa
+0xe7 0x13 0x45 0xfa
+
+# CHECK: ccmn w1, wzr, #0, eq
+# CHECK: ccmn w3, w0, #15, hs
+# CHECK: ccmn wzr, w15, #13, hs
+0x20 0x00 0x5f 0x3a
+0x6f 0x20 0x40 0x3a
+0xed 0x23 0x4f 0x3a
+
+# CHECK: ccmn x9, xzr, #0, le
+# CHECK: ccmn x3, x0, #15, gt
+# CHECK: ccmn xzr, x5, #7, ne
+0x20 0xd1 0x5f 0xba
+0x6f 0xc0 0x40 0xba
+0xe7 0x13 0x45 0xba
+
+#------------------------------------------------------------------------------
+# Conditional branch (immediate)
+#------------------------------------------------------------------------------
+# CHECK: csel w1, w0, w19, ne
+# CHECK: csel wzr, w5, w9, eq
+# CHECK: csel w9, wzr, w30, gt
+# CHECK: csel w1, w28, wzr, mi
+# CHECK: csel x19, x23, x29, lt
+# CHECK: csel xzr, x3, x4, ge
+# CHECK: csel x5, xzr, x6, hs
+# CHECK: csel x7, x8, xzr, lo
+0x1 0x10 0x93 0x1a
+0xbf 0x0 0x89 0x1a
+0xe9 0xc3 0x9e 0x1a
+0x81 0x43 0x9f 0x1a
+0xf3 0xb2 0x9d 0x9a
+0x7f 0xa0 0x84 0x9a
+0xe5 0x23 0x86 0x9a
+0x7 0x31 0x9f 0x9a
+
+# CHECK: csinc w1, w0, w19, ne
+# CHECK: csinc wzr, w5, w9, eq
+# CHECK: csinc w9, wzr, w30, gt
+# CHECK: csinc w1, w28, wzr, mi
+# CHECK: csinc x19, x23, x29, lt
+# CHECK: csinc xzr, x3, x4, ge
+# CHECK: csinc x5, xzr, x6, hs
+# CHECK: csinc x7, x8, xzr, lo
+0x1 0x14 0x93 0x1a
+0xbf 0x4 0x89 0x1a
+0xe9 0xc7 0x9e 0x1a
+0x81 0x47 0x9f 0x1a
+0xf3 0xb6 0x9d 0x9a
+0x7f 0xa4 0x84 0x9a
+0xe5 0x27 0x86 0x9a
+0x7 0x35 0x9f 0x9a
+
+# CHECK: csinv w1, w0, w19, ne
+# CHECK: csinv wzr, w5, w9, eq
+# CHECK: csinv w9, wzr, w30, gt
+# CHECK: csinv w1, w28, wzr, mi
+# CHECK: csinv x19, x23, x29, lt
+# CHECK: csinv xzr, x3, x4, ge
+# CHECK: csinv x5, xzr, x6, hs
+# CHECK: csinv x7, x8, xzr, lo
+0x1 0x10 0x93 0x5a
+0xbf 0x0 0x89 0x5a
+0xe9 0xc3 0x9e 0x5a
+0x81 0x43 0x9f 0x5a
+0xf3 0xb2 0x9d 0xda
+0x7f 0xa0 0x84 0xda
+0xe5 0x23 0x86 0xda
+0x7 0x31 0x9f 0xda
+
+# CHECK: csneg w1, w0, w19, ne
+# CHECK: csneg wzr, w5, w9, eq
+# CHECK: csneg w9, wzr, w30, gt
+# CHECK: csneg w1, w28, wzr, mi
+# CHECK: csneg x19, x23, x29, lt
+# CHECK: csneg xzr, x3, x4, ge
+# CHECK: csneg x5, xzr, x6, hs
+# CHECK: csneg x7, x8, xzr, lo
+0x1 0x14 0x93 0x5a
+0xbf 0x4 0x89 0x5a
+0xe9 0xc7 0x9e 0x5a
+0x81 0x47 0x9f 0x5a
+0xf3 0xb6 0x9d 0xda
+0x7f 0xa4 0x84 0xda
+0xe5 0x27 0x86 0xda
+0x7 0x35 0x9f 0xda
+
+# CHECK: csinc w3, wzr, wzr, ne
+# CHECK: csinc x9, xzr, xzr, mi
+# CHECK: csinv w20, wzr, wzr, eq
+# CHECK: csinv x30, xzr, xzr, lt
+0xe3 0x17 0x9f 0x1a
+0xe9 0x47 0x9f 0x9a
+0xf4 0x3 0x9f 0x5a
+0xfe 0xb3 0x9f 0xda
+
+# CHECK: csinc w3, w5, w5, le
+# CHECK: csinc wzr, w4, w4, gt
+# CHECK: csinc w9, wzr, wzr, ge
+# CHECK: csinc x3, x5, x5, le
+# CHECK: csinc xzr, x4, x4, gt
+# CHECK: csinc x9, xzr, xzr, ge
+0xa3 0xd4 0x85 0x1a
+0x9f 0xc4 0x84 0x1a
+0xe9 0xa7 0x9f 0x1a
+0xa3 0xd4 0x85 0x9a
+0x9f 0xc4 0x84 0x9a
+0xe9 0xa7 0x9f 0x9a
+
+# CHECK: csinv w3, w5, w5, le
+# CHECK: csinv wzr, w4, w4, gt
+# CHECK: csinv w9, wzr, wzr, ge
+# CHECK: csinv x3, x5, x5, le
+# CHECK: csinv xzr, x4, x4, gt
+# CHECK: csinv x9, xzr, xzr, ge
+0xa3 0xd0 0x85 0x5a
+0x9f 0xc0 0x84 0x5a
+0xe9 0xa3 0x9f 0x5a
+0xa3 0xd0 0x85 0xda
+0x9f 0xc0 0x84 0xda
+0xe9 0xa3 0x9f 0xda
+
+# CHECK: csneg w3, w5, w5, le
+# CHECK: csneg wzr, w4, w4, gt
+# CHECK: csneg w9, wzr, wzr, ge
+# CHECK: csneg x3, x5, x5, le
+# CHECK: csneg xzr, x4, x4, gt
+# CHECK: csneg x9, xzr, xzr, ge
+0xa3 0xd4 0x85 0x5a
+0x9f 0xc4 0x84 0x5a
+0xe9 0xa7 0x9f 0x5a
+0xa3 0xd4 0x85 0xda
+0x9f 0xc4 0x84 0xda
+0xe9 0xa7 0x9f 0xda
+
+#------------------------------------------------------------------------------
+# Data-processing (1 source)
+#------------------------------------------------------------------------------
+
+# CHECK: rbit w0, w7
+# CHECK: rbit x18, x3
+# CHECK: rev16 w17, w1
+# CHECK: rev16 x5, x2
+# CHECK: rev w18, w0
+# CHECK: rev32 x20, x1
+0xe0 0x00 0xc0 0x5a
+0x72 0x00 0xc0 0xda
+0x31 0x04 0xc0 0x5a
+0x45 0x04 0xc0 0xda
+0x12 0x08 0xc0 0x5a
+0x34 0x08 0xc0 0xda
+
+# CHECK: rev x22, x2
+# CHECK: clz w24, w3
+# CHECK: clz x26, x4
+# CHECK: cls w3, w5
+# CHECK: cls x20, x5
+0x56 0x0c 0xc0 0xda
+0x78 0x10 0xc0 0x5a
+0x9a 0x10 0xc0 0xda
+0xa3 0x14 0xc0 0x5a
+0xb4 0x14 0xc0 0xda
+
+#------------------------------------------------------------------------------
+# Data-processing (2 source)
+#------------------------------------------------------------------------------
+
+# CHECK: udiv w0, w7, w10
+# CHECK: udiv x9, x22, x4
+# CHECK: sdiv w12, w21, w0
+# CHECK: sdiv x13, x2, x1
+# CHECK: lsl w11, w12, w13
+# CHECK: lsl x14, x15, x16
+# CHECK: lsr w17, w18, w19
+# CHECK: lsr x20, x21, x22
+# CHECK: asr w23, w24, w25
+# CHECK: asr x26, x27, x28
+# CHECK: ror w0, w1, w2
+# CHECK: ror x3, x4, x5
+0xe0 0x08 0xca 0x1a
+0xc9 0x0a 0xc4 0x9a
+0xac 0x0e 0xc0 0x1a
+0x4d 0x0c 0xc1 0x9a
+0x8b 0x21 0xcd 0x1a
+0xee 0x21 0xd0 0x9a
+0x51 0x26 0xd3 0x1a
+0xb4 0x26 0xd6 0x9a
+0x17 0x2b 0xd9 0x1a
+0x7a 0x2b 0xdc 0x9a
+0x20 0x2c 0xc2 0x1a
+0x83 0x2c 0xc5 0x9a
+
+# CHECK: lsl w6, w7, w8
+# CHECK: lsl x9, x10, x11
+# CHECK: lsr w12, w13, w14
+# CHECK: lsr x15, x16, x17
+# CHECK: asr w18, w19, w20
+# CHECK: asr x21, x22, x23
+# CHECK: ror w24, w25, w26
+# CHECK: ror x27, x28, x29
+0xe6 0x20 0xc8 0x1a
+0x49 0x21 0xcb 0x9a
+0xac 0x25 0xce 0x1a
+0x0f 0x26 0xd1 0x9a
+0x72 0x2a 0xd4 0x1a
+0xd5 0x2a 0xd7 0x9a
+0x38 0x2f 0xda 0x1a
+0x9b 0x2f 0xdd 0x9a
+
+#------------------------------------------------------------------------------
+# Data-processing (3 sources)
+#------------------------------------------------------------------------------
+
+# First check some non-canonical encodings where Ra is not 0b11111 (only umulh
+# and smulh have them).
+
+# CHECK: smulh x30, x29, x28
+# CHECK: smulh xzr, x27, x26
+# CHECK: umulh x30, x29, x28
+# CHECK: umulh x23, x30, xzr
+0xbe 0x73 0x5c 0x9b
+0x7f 0x2f 0x5a 0x9b
+0xbe 0x3f 0xdc 0x9b
+0xd7 0x77 0xdf 0x9b
+
+# Now onto the boilerplate stuff
+
+# CHECK: madd w1, w3, w7, w4
+# CHECK: madd wzr, w0, w9, w11
+# CHECK: madd w13, wzr, w4, w4
+# CHECK: madd w19, w30, wzr, w29
+# CHECK: mul w4, w5, w6
+0x61 0x10 0x7 0x1b
+0x1f 0x2c 0x9 0x1b
+0xed 0x13 0x4 0x1b
+0xd3 0x77 0x1f 0x1b
+0xa4 0x7c 0x6 0x1b
+
+# CHECK: madd x1, x3, x7, x4
+# CHECK: madd xzr, x0, x9, x11
+# CHECK: madd x13, xzr, x4, x4
+# CHECK: madd x19, x30, xzr, x29
+# CHECK: mul x4, x5, x6
+0x61 0x10 0x7 0x9b
+0x1f 0x2c 0x9 0x9b
+0xed 0x13 0x4 0x9b
+0xd3 0x77 0x1f 0x9b
+0xa4 0x7c 0x6 0x9b
+
+# CHECK: msub w1, w3, w7, w4
+# CHECK: msub wzr, w0, w9, w11
+# CHECK: msub w13, wzr, w4, w4
+# CHECK: msub w19, w30, wzr, w29
+# CHECK: mneg w4, w5, w6
+0x61 0x90 0x7 0x1b
+0x1f 0xac 0x9 0x1b
+0xed 0x93 0x4 0x1b
+0xd3 0xf7 0x1f 0x1b
+0xa4 0xfc 0x6 0x1b
+
+# CHECK: msub x1, x3, x7, x4
+# CHECK: msub xzr, x0, x9, x11
+# CHECK: msub x13, xzr, x4, x4
+# CHECK: msub x19, x30, xzr, x29
+# CHECK: mneg x4, x5, x6
+0x61 0x90 0x7 0x9b
+0x1f 0xac 0x9 0x9b
+0xed 0x93 0x4 0x9b
+0xd3 0xf7 0x1f 0x9b
+0xa4 0xfc 0x6 0x9b
+
+# CHECK: smaddl x3, w5, w2, x9
+# CHECK: smaddl xzr, w10, w11, x12
+# CHECK: smaddl x13, wzr, w14, x15
+# CHECK: smaddl x16, w17, wzr, x18
+# CHECK: smull x19, w20, w21
+0xa3 0x24 0x22 0x9b
+0x5f 0x31 0x2b 0x9b
+0xed 0x3f 0x2e 0x9b
+0x30 0x4a 0x3f 0x9b
+0x93 0x7e 0x35 0x9b
+
+# CHECK: smsubl x3, w5, w2, x9
+# CHECK: smsubl xzr, w10, w11, x12
+# CHECK: smsubl x13, wzr, w14, x15
+# CHECK: smsubl x16, w17, wzr, x18
+# CHECK: smnegl x19, w20, w21
+0xa3 0xa4 0x22 0x9b
+0x5f 0xb1 0x2b 0x9b
+0xed 0xbf 0x2e 0x9b
+0x30 0xca 0x3f 0x9b
+0x93 0xfe 0x35 0x9b
+
+# CHECK: umaddl x3, w5, w2, x9
+# CHECK: umaddl xzr, w10, w11, x12
+# CHECK: umaddl x13, wzr, w14, x15
+# CHECK: umaddl x16, w17, wzr, x18
+# CHECK: umull x19, w20, w21
+0xa3 0x24 0xa2 0x9b
+0x5f 0x31 0xab 0x9b
+0xed 0x3f 0xae 0x9b
+0x30 0x4a 0xbf 0x9b
+0x93 0x7e 0xb5 0x9b
+
+# CHECK: umsubl x3, w5, w2, x9
+# CHECK: umsubl xzr, w10, w11, x12
+# CHECK: umsubl x13, wzr, w14, x15
+# CHECK: umsubl x16, w17, wzr, x18
+# CHECK: umnegl x19, w20, w21
+0xa3 0xa4 0xa2 0x9b
+0x5f 0xb1 0xab 0x9b
+0xed 0xbf 0xae 0x9b
+0x30 0xca 0xbf 0x9b
+0x93 0xfe 0xb5 0x9b
+
+# CHECK: smulh x30, x29, x28
+# CHECK: smulh xzr, x27, x26
+# CHECK: smulh x25, xzr, x24
+# CHECK: smulh x23, x22, xzr
+0xbe 0x7f 0x5c 0x9b
+0x7f 0x7f 0x5a 0x9b
+0xf9 0x7f 0x58 0x9b
+0xd7 0x7e 0x5f 0x9b
+
+# CHECK: umulh x30, x29, x28
+# CHECK: umulh xzr, x27, x26
+# CHECK: umulh x25, xzr, x24
+# CHECK: umulh x23, x22, xzr
+0xbe 0x7f 0xdc 0x9b
+0x7f 0x7f 0xda 0x9b
+0xf9 0x7f 0xd8 0x9b
+0xd7 0x7e 0xdf 0x9b
+
+# CHECK: mul w3, w4, w5
+# CHECK: mul wzr, w6, w7
+# CHECK: mul w8, wzr, w9
+# CHECK: mul w10, w11, wzr
+# CHECK: mul x12, x13, x14
+# CHECK: mul xzr, x15, x16
+# CHECK: mul x17, xzr, x18
+# CHECK: mul x19, x20, xzr
+0x83 0x7c 0x5 0x1b
+0xdf 0x7c 0x7 0x1b
+0xe8 0x7f 0x9 0x1b
+0x6a 0x7d 0x1f 0x1b
+0xac 0x7d 0xe 0x9b
+0xff 0x7d 0x10 0x9b
+0xf1 0x7f 0x12 0x9b
+0x93 0x7e 0x1f 0x9b
+
+# CHECK: mneg w21, w22, w23
+# CHECK: mneg wzr, w24, w25
+# CHECK: mneg w26, wzr, w27
+# CHECK: mneg w28, w29, wzr
+0xd5 0xfe 0x17 0x1b
+0x1f 0xff 0x19 0x1b
+0xfa 0xff 0x1b 0x1b
+0xbc 0xff 0x1f 0x1b
+
+# CHECK: smull x11, w13, w17
+# CHECK: umull x11, w13, w17
+# CHECK: smnegl x11, w13, w17
+# CHECK: umnegl x11, w13, w17
+0xab 0x7d 0x31 0x9b
+0xab 0x7d 0xb1 0x9b
+0xab 0xfd 0x31 0x9b
+0xab 0xfd 0xb1 0x9b
+
+#------------------------------------------------------------------------------
+# Exception generation
+#------------------------------------------------------------------------------
+
+# CHECK: svc #0
+# CHECK: svc #65535
+0x1 0x0 0x0 0xd4
+0xe1 0xff 0x1f 0xd4
+
+# CHECK: hvc #1
+# CHECK: smc #12000
+# CHECK: brk #12
+# CHECK: hlt #123
+0x22 0x0 0x0 0xd4
+0x3 0xdc 0x5 0xd4
+0x80 0x1 0x20 0xd4
+0x60 0xf 0x40 0xd4
+
+# CHECK: dcps1 #42
+# CHECK: dcps2 #9
+# CHECK: dcps3 #1000
+0x41 0x5 0xa0 0xd4
+0x22 0x1 0xa0 0xd4
+0x3 0x7d 0xa0 0xd4
+
+# CHECK: dcps1
+# CHECK: dcps2
+# CHECK: dcps3
+0x1 0x0 0xa0 0xd4
+0x2 0x0 0xa0 0xd4
+0x3 0x0 0xa0 0xd4
+
+#------------------------------------------------------------------------------
+# Extract (immediate)
+#------------------------------------------------------------------------------
+
+# CHECK: extr w3, w5, w7, #0
+# CHECK: extr w11, w13, w17, #31
+0xa3 0x0 0x87 0x13
+0xab 0x7d 0x91 0x13
+
+# CHECK: extr x3, x5, x7, #15
+# CHECK: extr x11, x13, x17, #63
+0xa3 0x3c 0xc7 0x93
+0xab 0xfd 0xd1 0x93
+
+# CHECK: extr x19, x23, x23, #24
+# CHECK: extr x29, xzr, xzr, #63
+# CHECK: extr w9, w13, w13, #31
+0xf3 0x62 0xd7 0x93
+0xfd 0xff 0xdf 0x93
+0xa9 0x7d 0x8d 0x13
+
+#------------------------------------------------------------------------------
+# Floating-point compare
+#------------------------------------------------------------------------------
+
+# CHECK: fcmp s3, s5
+# CHECK: fcmp s31, #0.0
+# CHECK: fcmp s31, #0.0
+0x60 0x20 0x25 0x1e
+0xe8 0x23 0x20 0x1e
+0xe8 0x23 0x3f 0x1e
+
+# CHECK: fcmpe s29, s30
+# CHECK: fcmpe s15, #0.0
+# CHECK: fcmpe s15, #0.0
+0xb0 0x23 0x3e 0x1e
+0xf8 0x21 0x20 0x1e
+0xf8 0x21 0x2f 0x1e
+
+# CHECK: fcmp d4, d12
+# CHECK: fcmp d23, #0.0
+# CHECK: fcmp d23, #0.0
+0x80 0x20 0x6c 0x1e
+0xe8 0x22 0x60 0x1e
+0xe8 0x22 0x77 0x1e
+
+# CHECK: fcmpe d26, d22
+# CHECK: fcmpe d29, #0.0
+# CHECK: fcmpe d29, #0.0
+0x50 0x23 0x76 0x1e
+0xb8 0x23 0x60 0x1e
+0xb8 0x23 0x6d 0x1e
+
+#------------------------------------------------------------------------------
+# Floating-point conditional compare
+#------------------------------------------------------------------------------
+
+# CHECK: fccmp s1, s31, #0, eq
+# CHECK: fccmp s3, s0, #15, hs
+# CHECK: fccmp s31, s15, #13, hs
+0x20 0x04 0x3f 0x1e
+0x6f 0x24 0x20 0x1e
+0xed 0x27 0x2f 0x1e
+
+# CHECK: fccmp d9, d31, #0, le
+# CHECK: fccmp d3, d0, #15, gt
+# CHECK: fccmp d31, d5, #7, ne
+0x20 0xd5 0x7f 0x1e
+0x6f 0xc4 0x60 0x1e
+0xe7 0x17 0x65 0x1e
+
+# CHECK: fccmpe s1, s31, #0, eq
+# CHECK: fccmpe s3, s0, #15, hs
+# CHECK: fccmpe s31, s15, #13, hs
+0x30 0x04 0x3f 0x1e
+0x7f 0x24 0x20 0x1e
+0xfd 0x27 0x2f 0x1e
+
+# CHECK: fccmpe d9, d31, #0, le
+# CHECK: fccmpe d3, d0, #15, gt
+# CHECK: fccmpe d31, d5, #7, ne
+0x30 0xd5 0x7f 0x1e
+0x7f 0xc4 0x60 0x1e
+0xf7 0x17 0x65 0x1e
+
+#-------------------------------------------------------------------------------
+# Floating-point conditional compare
+#-------------------------------------------------------------------------------
+
+# CHECK: fcsel s3, s20, s9, pl
+# CHECK: fcsel d9, d10, d11, mi
+0x83 0x5e 0x29 0x1e
+0x49 0x4d 0x6b 0x1e
+
+#------------------------------------------------------------------------------
+# Floating-point data-processing (1 source)
+#------------------------------------------------------------------------------
+
+# CHECK: fmov s0, s1
+# CHECK: fabs s2, s3
+# CHECK: fneg s4, s5
+# CHECK: fsqrt s6, s7
+# CHECK: fcvt d8, s9
+# CHECK: fcvt h10, s11
+# CHECK: frintn s12, s13
+# CHECK: frintp s14, s15
+# CHECK: frintm s16, s17
+# CHECK: frintz s18, s19
+# CHECK: frinta s20, s21
+# CHECK: frintx s22, s23
+# CHECK: frinti s24, s25
+0x20 0x40 0x20 0x1e
+0x62 0xc0 0x20 0x1e
+0xa4 0x40 0x21 0x1e
+0xe6 0xc0 0x21 0x1e
+0x28 0xc1 0x22 0x1e
+0x6a 0xc1 0x23 0x1e
+0xac 0x41 0x24 0x1e
+0xee 0xc1 0x24 0x1e
+0x30 0x42 0x25 0x1e
+0x72 0xc2 0x25 0x1e
+0xb4 0x42 0x26 0x1e
+0xf6 0x42 0x27 0x1e
+0x38 0xc3 0x27 0x1e
+
+# CHECK: fmov d0, d1
+# CHECK: fabs d2, d3
+# CHECK: fneg d4, d5
+# CHECK: fsqrt d6, d7
+# CHECK: fcvt s8, d9
+# CHECK: fcvt h10, d11
+# CHECK: frintn d12, d13
+# CHECK: frintp d14, d15
+# CHECK: frintm d16, d17
+# CHECK: frintz d18, d19
+# CHECK: frinta d20, d21
+# CHECK: frintx d22, d23
+# CHECK: frinti d24, d25
+0x20 0x40 0x60 0x1e
+0x62 0xc0 0x60 0x1e
+0xa4 0x40 0x61 0x1e
+0xe6 0xc0 0x61 0x1e
+0x28 0x41 0x62 0x1e
+0x6a 0xc1 0x63 0x1e
+0xac 0x41 0x64 0x1e
+0xee 0xc1 0x64 0x1e
+0x30 0x42 0x65 0x1e
+0x72 0xc2 0x65 0x1e
+0xb4 0x42 0x66 0x1e
+0xf6 0x42 0x67 0x1e
+0x38 0xc3 0x67 0x1e
+
+# CHECK: fcvt s26, h27
+# CHECK: fcvt d28, h29
+0x7a 0x43 0xe2 0x1e
+0xbc 0xc3 0xe2 0x1e
+
+#------------------------------------------------------------------------------
+# Floating-point data-processing (2 sources)
+#------------------------------------------------------------------------------
+
+# CHECK: fmul s20, s19, s17
+# CHECK: fdiv s1, s2, s3
+# CHECK: fadd s4, s5, s6
+# CHECK: fsub s7, s8, s9
+# CHECK: fmax s10, s11, s12
+# CHECK: fmin s13, s14, s15
+# CHECK: fmaxnm s16, s17, s18
+# CHECK: fminnm s19, s20, s21
+# CHECK: fnmul s22, s23, s2
+0x74 0xa 0x31 0x1e
+0x41 0x18 0x23 0x1e
+0xa4 0x28 0x26 0x1e
+0x7 0x39 0x29 0x1e
+0x6a 0x49 0x2c 0x1e
+0xcd 0x59 0x2f 0x1e
+0x30 0x6a 0x32 0x1e
+0x93 0x7a 0x35 0x1e
+0xf6 0x8a 0x38 0x1e
+
+
+# CHECK: fmul d20, d19, d17
+# CHECK: fdiv d1, d2, d3
+# CHECK: fadd d4, d5, d6
+# CHECK: fsub d7, d8, d9
+# CHECK: fmax d10, d11, d12
+# CHECK: fmin d13, d14, d15
+# CHECK: fmaxnm d16, d17, d18
+# CHECK: fminnm d19, d20, d21
+# CHECK: fnmul d22, d23, d24
+0x74 0xa 0x71 0x1e
+0x41 0x18 0x63 0x1e
+0xa4 0x28 0x66 0x1e
+0x7 0x39 0x69 0x1e
+0x6a 0x49 0x6c 0x1e
+0xcd 0x59 0x6f 0x1e
+0x30 0x6a 0x72 0x1e
+0x93 0x7a 0x75 0x1e
+0xf6 0x8a 0x78 0x1e
+
+#------------------------------------------------------------------------------
+# Floating-point data-processing (1 source)
+#------------------------------------------------------------------------------
+
+# CHECK: fmadd s3, s5, s6, s31
+# CHECK: fmadd d3, d13, d0, d23
+# CHECK: fmsub s3, s5, s6, s31
+# CHECK: fmsub d3, d13, d0, d23
+# CHECK: fnmadd s3, s5, s6, s31
+# CHECK: fnmadd d3, d13, d0, d23
+# CHECK: fnmsub s3, s5, s6, s31
+# CHECK: fnmsub d3, d13, d0, d23
+0xa3 0x7c 0x06 0x1f
+0xa3 0x5d 0x40 0x1f
+0xa3 0xfc 0x06 0x1f
+0xa3 0xdd 0x40 0x1f
+0xa3 0x7c 0x26 0x1f
+0xa3 0x5d 0x60 0x1f
+0xa3 0xfc 0x26 0x1f
+0xa3 0xdd 0x60 0x1f
+
+#------------------------------------------------------------------------------
+# Floating-point <-> fixed-point conversion
+#------------------------------------------------------------------------------
+
+# CHECK: fcvtzs w3, s5, #1
+# CHECK: fcvtzs wzr, s20, #13
+# CHECK: fcvtzs w19, s0, #32
+0xa3 0xfc 0x18 0x1e
+0x9f 0xce 0x18 0x1e
+0x13 0x80 0x18 0x1e
+
+# CHECK: fcvtzs x3, s5, #1
+# CHECK: fcvtzs x12, s30, #45
+# CHECK: fcvtzs x19, s0, #64
+0xa3 0xfc 0x18 0x9e
+0xcc 0x4f 0x18 0x9e
+0x13 0x00 0x18 0x9e
+
+# CHECK: fcvtzs w3, d5, #1
+# CHECK: fcvtzs wzr, d20, #13
+# CHECK: fcvtzs w19, d0, #32
+0xa3 0xfc 0x58 0x1e
+0x9f 0xce 0x58 0x1e
+0x13 0x80 0x58 0x1e
+
+# CHECK: fcvtzs x3, d5, #1
+# CHECK: fcvtzs x12, d30, #45
+# CHECK: fcvtzs x19, d0, #64
+0xa3 0xfc 0x58 0x9e
+0xcc 0x4f 0x58 0x9e
+0x13 0x00 0x58 0x9e
+
+# CHECK: fcvtzu w3, s5, #1
+# CHECK: fcvtzu wzr, s20, #13
+# CHECK: fcvtzu w19, s0, #32
+0xa3 0xfc 0x19 0x1e
+0x9f 0xce 0x19 0x1e
+0x13 0x80 0x19 0x1e
+
+# CHECK: fcvtzu x3, s5, #1
+# CHECK: fcvtzu x12, s30, #45
+# CHECK: fcvtzu x19, s0, #64
+0xa3 0xfc 0x19 0x9e
+0xcc 0x4f 0x19 0x9e
+0x13 0x00 0x19 0x9e
+
+# CHECK: fcvtzu w3, d5, #1
+# CHECK: fcvtzu wzr, d20, #13
+# CHECK: fcvtzu w19, d0, #32
+0xa3 0xfc 0x59 0x1e
+0x9f 0xce 0x59 0x1e
+0x13 0x80 0x59 0x1e
+
+# CHECK: fcvtzu x3, d5, #1
+# CHECK: fcvtzu x12, d30, #45
+# CHECK: fcvtzu x19, d0, #64
+0xa3 0xfc 0x59 0x9e
+0xcc 0x4f 0x59 0x9e
+0x13 0x00 0x59 0x9e
+
+# CHECK: scvtf s23, w19, #1
+# CHECK: scvtf s31, wzr, #20
+# CHECK: scvtf s14, w0, #32
+0x77 0xfe 0x02 0x1e
+0xff 0xb3 0x02 0x1e
+0x0e 0x80 0x02 0x1e
+
+# CHECK: scvtf s23, x19, #1
+# CHECK: scvtf s31, xzr, #20
+# CHECK: scvtf s14, x0, #64
+0x77 0xfe 0x02 0x9e
+0xff 0xb3 0x02 0x9e
+0x0e 0x00 0x02 0x9e
+
+# CHECK: scvtf d23, w19, #1
+# CHECK: scvtf d31, wzr, #20
+# CHECK: scvtf d14, w0, #32
+0x77 0xfe 0x42 0x1e
+0xff 0xb3 0x42 0x1e
+0x0e 0x80 0x42 0x1e
+
+# CHECK: scvtf d23, x19, #1
+# CHECK: scvtf d31, xzr, #20
+# CHECK: scvtf d14, x0, #64
+0x77 0xfe 0x42 0x9e
+0xff 0xb3 0x42 0x9e
+0x0e 0x00 0x42 0x9e
+
+# CHECK: ucvtf s23, w19, #1
+# CHECK: ucvtf s31, wzr, #20
+# CHECK: ucvtf s14, w0, #32
+0x77 0xfe 0x03 0x1e
+0xff 0xb3 0x03 0x1e
+0x0e 0x80 0x03 0x1e
+
+# CHECK: ucvtf s23, x19, #1
+# CHECK: ucvtf s31, xzr, #20
+# CHECK: ucvtf s14, x0, #64
+0x77 0xfe 0x03 0x9e
+0xff 0xb3 0x03 0x9e
+0x0e 0x00 0x03 0x9e
+
+# CHECK: ucvtf d23, w19, #1
+# CHECK: ucvtf d31, wzr, #20
+# CHECK: ucvtf d14, w0, #32
+0x77 0xfe 0x43 0x1e
+0xff 0xb3 0x43 0x1e
+0x0e 0x80 0x43 0x1e
+
+# CHECK: ucvtf d23, x19, #1
+# CHECK: ucvtf d31, xzr, #20
+# CHECK: ucvtf d14, x0, #64
+0x77 0xfe 0x43 0x9e
+0xff 0xb3 0x43 0x9e
+0x0e 0x00 0x43 0x9e
+
+#------------------------------------------------------------------------------
+# Floating-point <-> integer conversion
+#------------------------------------------------------------------------------
+# CHECK: fcvtns w3, s31
+# CHECK: fcvtns xzr, s12
+# CHECK: fcvtnu wzr, s12
+# CHECK: fcvtnu x0, s0
+0xe3 0x3 0x20 0x1e
+0x9f 0x1 0x20 0x9e
+0x9f 0x1 0x21 0x1e
+0x0 0x0 0x21 0x9e
+
+# CHECK: fcvtps wzr, s9
+# CHECK: fcvtps x12, s20
+# CHECK: fcvtpu w30, s23
+# CHECK: fcvtpu x29, s3
+0x3f 0x1 0x28 0x1e
+0x8c 0x2 0x28 0x9e
+0xfe 0x2 0x29 0x1e
+0x7d 0x0 0x29 0x9e
+
+# CHECK: fcvtms w2, s3
+# CHECK: fcvtms x4, s5
+# CHECK: fcvtmu w6, s7
+# CHECK: fcvtmu x8, s9
+0x62 0x0 0x30 0x1e
+0xa4 0x0 0x30 0x9e
+0xe6 0x0 0x31 0x1e
+0x28 0x1 0x31 0x9e
+
+# CHECK: fcvtzs w10, s11
+# CHECK: fcvtzs x12, s13
+# CHECK: fcvtzu w14, s15
+# CHECK: fcvtzu x15, s16
+0x6a 0x1 0x38 0x1e
+0xac 0x1 0x38 0x9e
+0xee 0x1 0x39 0x1e
+0xf 0x2 0x39 0x9e
+
+# CHECK: scvtf s17, w18
+# CHECK: scvtf s19, x20
+# CHECK: ucvtf s21, w22
+# CHECK: scvtf s23, x24
+0x51 0x2 0x22 0x1e
+0x93 0x2 0x22 0x9e
+0xd5 0x2 0x23 0x1e
+0x17 0x3 0x22 0x9e
+
+# CHECK: fcvtas w25, s26
+# CHECK: fcvtas x27, s28
+# CHECK: fcvtau w29, s30
+# CHECK: fcvtau xzr, s0
+0x59 0x3 0x24 0x1e
+0x9b 0x3 0x24 0x9e
+0xdd 0x3 0x25 0x1e
+0x1f 0x0 0x25 0x9e
+
+# CHECK: fcvtns w3, d31
+# CHECK: fcvtns xzr, d12
+# CHECK: fcvtnu wzr, d12
+# CHECK: fcvtnu x0, d0
+0xe3 0x3 0x60 0x1e
+0x9f 0x1 0x60 0x9e
+0x9f 0x1 0x61 0x1e
+0x0 0x0 0x61 0x9e
+
+# CHECK: fcvtps wzr, d9
+# CHECK: fcvtps x12, d20
+# CHECK: fcvtpu w30, d23
+# CHECK: fcvtpu x29, d3
+0x3f 0x1 0x68 0x1e
+0x8c 0x2 0x68 0x9e
+0xfe 0x2 0x69 0x1e
+0x7d 0x0 0x69 0x9e
+
+# CHECK: fcvtms w2, d3
+# CHECK: fcvtms x4, d5
+# CHECK: fcvtmu w6, d7
+# CHECK: fcvtmu x8, d9
+0x62 0x0 0x70 0x1e
+0xa4 0x0 0x70 0x9e
+0xe6 0x0 0x71 0x1e
+0x28 0x1 0x71 0x9e
+
+# CHECK: fcvtzs w10, d11
+# CHECK: fcvtzs x12, d13
+# CHECK: fcvtzu w14, d15
+# CHECK: fcvtzu x15, d16
+0x6a 0x1 0x78 0x1e
+0xac 0x1 0x78 0x9e
+0xee 0x1 0x79 0x1e
+0xf 0x2 0x79 0x9e
+
+# CHECK: scvtf d17, w18
+# CHECK: scvtf d19, x20
+# CHECK: ucvtf d21, w22
+# CHECK: ucvtf d23, x24
+0x51 0x2 0x62 0x1e
+0x93 0x2 0x62 0x9e
+0xd5 0x2 0x63 0x1e
+0x17 0x3 0x63 0x9e
+
+# CHECK: fcvtas w25, d26
+# CHECK: fcvtas x27, d28
+# CHECK: fcvtau w29, d30
+# CHECK: fcvtau xzr, d0
+0x59 0x3 0x64 0x1e
+0x9b 0x3 0x64 0x9e
+0xdd 0x3 0x65 0x1e
+0x1f 0x0 0x65 0x9e
+
+# CHECK: fmov w3, s9
+# CHECK: fmov s9, w3
+0x23 0x1 0x26 0x1e
+0x69 0x0 0x27 0x1e
+
+# CHECK: fmov x20, d31
+# CHECK: fmov d1, x15
+0xf4 0x3 0x66 0x9e
+0xe1 0x1 0x67 0x9e
+
+# CHECK: fmov x3, v12.d[1]
+# CHECK: fmov v1.d[1], x19
+0x83 0x1 0xae 0x9e
+0x61 0x2 0xaf 0x9e
+
+#------------------------------------------------------------------------------
+# Floating-point immediate
+#------------------------------------------------------------------------------
+
+# CHECK: fmov s2, #0.12500000
+# CHECK: fmov s3, #1.00000000
+# CHECK: fmov d30, #16.00000000
+0x2 0x10 0x28 0x1e
+0x3 0x10 0x2e 0x1e
+0x1e 0x10 0x66 0x1e
+
+# CHECK: fmov s4, #1.06250000
+# CHECK: fmov d10, #1.93750000
+0x4 0x30 0x2e 0x1e
+0xa 0xf0 0x6f 0x1e
+
+# CHECK: fmov s12, #-1.00000000
+0xc 0x10 0x3e 0x1e
+
+# CHECK: fmov d16, #8.50000000
+0x10 0x30 0x64 0x1e
+
+#------------------------------------------------------------------------------
+# Load-register (literal)
+#------------------------------------------------------------------------------
+
+# CHECK: ldr w3, #0
+# CHECK: ldr x29, #4
+# CHECK: ldrsw xzr, #-4
+0x03 0x00 0x00 0x18
+0x3d 0x00 0x00 0x58
+0xff 0xff 0xff 0x98
+
+# CHECK: ldr s0, #8
+# CHECK: ldr d0, #1048572
+# CHECK: ldr q0, #-1048576
+0x40 0x00 0x00 0x1c
+0xe0 0xff 0x7f 0x5c
+0x00 0x00 0x80 0x9c
+
+# CHECK: prfm pldl1strm, #0
+# CHECK: prfm #22, #0
+0x01 0x00 0x00 0xd8
+0x16 0x00 0x00 0xd8
+
+#------------------------------------------------------------------------------
+# Load/store exclusive
+#------------------------------------------------------------------------------
+
+#CHECK: stxrb w18, w8, [sp]
+#CHECK: stxrh w24, w15, [x16]
+#CHECK: stxr w5, w6, [x17]
+#CHECK: stxr w1, x10, [x21]
+#CHECK: stxr w1, x10, [x21]
+0xe8 0x7f 0x12 0x08
+0x0f 0x7e 0x18 0x48
+0x26 0x7e 0x05 0x88
+0xaa 0x7e 0x01 0xc8
+0xaa 0x7a 0x01 0xc8
+
+#CHECK: ldxrb w30, [x0]
+#CHECK: ldxrh w17, [x4]
+#CHECK: ldxr w22, [sp]
+#CHECK: ldxr x11, [x29]
+#CHECK: ldxr x11, [x29]
+#CHECK: ldxr x11, [x29]
+0x1e 0x7c 0x5f 0x08
+0x91 0x7c 0x5f 0x48
+0xf6 0x7f 0x5f 0x88
+0xab 0x7f 0x5f 0xc8
+0xab 0x6f 0x5f 0xc8
+0xab 0x7f 0x5e 0xc8
+
+#CHECK: stxp w12, w11, w10, [sp]
+#CHECK: stxp wzr, x27, x9, [x12]
+0xeb 0x2b 0x2c 0x88
+0x9b 0x25 0x3f 0xc8
+
+#CHECK: ldxp w0, wzr, [sp]
+#CHECK: ldxp x17, x0, [x18]
+#CHECK: ldxp x17, x0, [x18]
+0xe0 0x7f 0x7f 0x88
+0x51 0x02 0x7f 0xc8
+0x51 0x02 0x7e 0xc8
+
+#CHECK: stlxrb w12, w22, [x0]
+#CHECK: stlxrh w10, w1, [x1]
+#CHECK: stlxr w9, w2, [x2]
+#CHECK: stlxr w9, x3, [sp]
+
+0x16 0xfc 0x0c 0x08
+0x21 0xfc 0x0a 0x48
+0x42 0xfc 0x09 0x88
+0xe3 0xff 0x09 0xc8
+
+#CHECK: ldaxrb w8, [x4]
+#CHECK: ldaxrh w7, [x5]
+#CHECK: ldaxr w6, [sp]
+#CHECK: ldaxr x5, [x6]
+#CHECK: ldaxr x5, [x6]
+#CHECK: ldaxr x5, [x6]
+0x88 0xfc 0x5f 0x08
+0xa7 0xfc 0x5f 0x48
+0xe6 0xff 0x5f 0x88
+0xc5 0xfc 0x5f 0xc8
+0xc5 0xec 0x5f 0xc8
+0xc5 0xfc 0x5e 0xc8
+
+#CHECK: stlxp w4, w5, w6, [sp]
+#CHECK: stlxp wzr, x6, x7, [x1]
+0xe5 0x9b 0x24 0x88
+0x26 0x9c 0x3f 0xc8
+
+#CHECK: ldaxp w5, w18, [sp]
+#CHECK: ldaxp x6, x19, [x22]
+#CHECK: ldaxp x6, x19, [x22]
+0xe5 0xcb 0x7f 0x88
+0xc6 0xce 0x7f 0xc8
+0xc6 0xce 0x7e 0xc8
+
+#CHECK: stlrb w24, [sp]
+#CHECK: stlrh w25, [x30]
+#CHECK: stlr w26, [x29]
+#CHECK: stlr x27, [x28]
+#CHECK: stlr x27, [x28]
+#CHECK: stlr x27, [x28]
+0xf8 0xff 0x9f 0x08
+0xd9 0xff 0x9f 0x48
+0xba 0xff 0x9f 0x88
+0x9b 0xff 0x9f 0xc8
+0x9b 0xef 0x9f 0xc8
+0x9b 0xff 0x9e 0xc8
+
+#CHECK: ldarb w23, [sp]
+#CHECK: ldarh w22, [x30]
+#CHECK: ldar wzr, [x29]
+#CHECK: ldar x21, [x28]
+#CHECK: ldar x21, [x28]
+#CHECK: ldar x21, [x28]
+0xf7 0xff 0xdf 0x08
+0xd6 0xff 0xdf 0x48
+0xbf 0xff 0xdf 0x88
+0x95 0xff 0xdf 0xc8
+0x95 0xef 0xdf 0xc8
+0x95 0xff 0xde 0xc8
+
+#------------------------------------------------------------------------------
+# Load/store (unscaled immediate)
+#------------------------------------------------------------------------------
+
+# CHECK: sturb w9, [sp]
+# CHECK: sturh wzr, [x12, #255]
+# CHECK: stur w16, [x0, #-256]
+# CHECK: stur x28, [x14, #1]
+0xe9 0x3 0x0 0x38
+0x9f 0xf1 0xf 0x78
+0x10 0x0 0x10 0xb8
+0xdc 0x11 0x0 0xf8
+
+# CHECK: ldurb w1, [x20, #255]
+# CHECK: ldurh w20, [x1, #255]
+# CHECK: ldur w12, [sp, #255]
+# CHECK: ldur xzr, [x12, #255]
+0x81 0xf2 0x4f 0x38
+0x34 0xf0 0x4f 0x78
+0xec 0xf3 0x4f 0xb8
+0x9f 0xf1 0x4f 0xf8
+
+# CHECK: ldursb x9, [x7, #-256]
+# CHECK: ldursh x17, [x19, #-256]
+# CHECK: ldursw x20, [x15, #-256]
+# CHECK: prfum pldl2keep, [sp, #-256]
+# CHECK: ldursb w19, [x1, #-256]
+# CHECK: ldursh w15, [x21, #-256]
+0xe9 0x0 0x90 0x38
+0x71 0x2 0x90 0x78
+0xf4 0x1 0x90 0xb8
+0xe2 0x3 0x90 0xf8
+0x33 0x0 0xd0 0x38
+0xaf 0x2 0xd0 0x78
+
+# CHECK: stur b0, [sp, #1]
+# CHECK: stur h12, [x12, #-1]
+# CHECK: stur s15, [x0, #255]
+# CHECK: stur d31, [x5, #25]
+# CHECK: stur q9, [x5]
+0xe0 0x13 0x0 0x3c
+0x8c 0xf1 0x1f 0x7c
+0xf 0xf0 0xf 0xbc
+0xbf 0x90 0x1 0xfc
+0xa9 0x0 0x80 0x3c
+
+# CHECK: ldur b3, [sp]
+# CHECK: ldur h5, [x4, #-256]
+# CHECK: ldur s7, [x12, #-1]
+# CHECK: ldur d11, [x19, #4]
+# CHECK: ldur q13, [x1, #2]
+0xe3 0x3 0x40 0x3c
+0x85 0x0 0x50 0x7c
+0x87 0xf1 0x5f 0xbc
+0x6b 0x42 0x40 0xfc
+0x2d 0x20 0xc0 0x3c
+
+#------------------------------------------------------------------------------
+# Load/store (immediate post-indexed)
+#------------------------------------------------------------------------------
+
+# E.g. "str xzr, [sp], #4" is *not* unpredictable
+# CHECK-NOT: warning: potentially undefined instruction encoding
+0xff 0x47 0x40 0xb8
+
+# CHECK: strb w9, [x2], #255
+# CHECK: strb w10, [x3], #1
+# CHECK: strb w10, [x3], #-256
+# CHECK: strh w9, [x2], #255
+# CHECK: strh w9, [x2], #1
+# CHECK: strh w10, [x3], #-256
+0x49 0xf4 0xf 0x38
+0x6a 0x14 0x0 0x38
+0x6a 0x4 0x10 0x38
+0x49 0xf4 0xf 0x78
+0x49 0x14 0x0 0x78
+0x6a 0x4 0x10 0x78
+
+# CHECK: str w19, [sp], #255
+# CHECK: str w20, [x30], #1
+# CHECK: str w21, [x12], #-256
+# CHECK: str xzr, [x9], #255
+# CHECK: str x2, [x3], #1
+# CHECK: str x19, [x12], #-256
+0xf3 0xf7 0xf 0xb8
+0xd4 0x17 0x0 0xb8
+0x95 0x5 0x10 0xb8
+0x3f 0xf5 0xf 0xf8
+0x62 0x14 0x0 0xf8
+0x93 0x5 0x10 0xf8
+
+# CHECK: ldrb w9, [x2], #255
+# CHECK: ldrb w10, [x3], #1
+# CHECK: ldrb w10, [x3], #-256
+# CHECK: ldrh w9, [x2], #255
+# CHECK: ldrh w9, [x2], #1
+# CHECK: ldrh w10, [x3], #-256
+0x49 0xf4 0x4f 0x38
+0x6a 0x14 0x40 0x38
+0x6a 0x4 0x50 0x38
+0x49 0xf4 0x4f 0x78
+0x49 0x14 0x40 0x78
+0x6a 0x4 0x50 0x78
+
+# CHECK: ldr w19, [sp], #255
+# CHECK: ldr w20, [x30], #1
+# CHECK: ldr w21, [x12], #-256
+# CHECK: ldr xzr, [x9], #255
+# CHECK: ldr x2, [x3], #1
+# CHECK: ldr x19, [x12], #-256
+0xf3 0xf7 0x4f 0xb8
+0xd4 0x17 0x40 0xb8
+0x95 0x5 0x50 0xb8
+0x3f 0xf5 0x4f 0xf8
+0x62 0x14 0x40 0xf8
+0x93 0x5 0x50 0xf8
+
+# CHECK: ldrsb xzr, [x9], #255
+# CHECK: ldrsb x2, [x3], #1
+# CHECK: ldrsb x19, [x12], #-256
+# CHECK: ldrsh xzr, [x9], #255
+# CHECK: ldrsh x2, [x3], #1
+# CHECK: ldrsh x19, [x12], #-256
+# CHECK: ldrsw xzr, [x9], #255
+# CHECK: ldrsw x2, [x3], #1
+# CHECK: ldrsw x19, [x12], #-256
+0x3f 0xf5 0x8f 0x38
+0x62 0x14 0x80 0x38
+0x93 0x5 0x90 0x38
+0x3f 0xf5 0x8f 0x78
+0x62 0x14 0x80 0x78
+0x93 0x5 0x90 0x78
+0x3f 0xf5 0x8f 0xb8
+0x62 0x14 0x80 0xb8
+0x93 0x5 0x90 0xb8
+
+# CHECK: ldrsb wzr, [x9], #255
+# CHECK: ldrsb w2, [x3], #1
+# CHECK: ldrsb w19, [x12], #-256
+# CHECK: ldrsh wzr, [x9], #255
+# CHECK: ldrsh w2, [x3], #1
+# CHECK: ldrsh w19, [x12], #-256
+0x3f 0xf5 0xcf 0x38
+0x62 0x14 0xc0 0x38
+0x93 0x5 0xd0 0x38
+0x3f 0xf5 0xcf 0x78
+0x62 0x14 0xc0 0x78
+0x93 0x5 0xd0 0x78
+
+# CHECK: str b0, [x0], #255
+# CHECK: str b3, [x3], #1
+# CHECK: str b5, [sp], #-256
+# CHECK: str h10, [x10], #255
+# CHECK: str h13, [x23], #1
+# CHECK: str h15, [sp], #-256
+# CHECK: str s20, [x20], #255
+# CHECK: str s23, [x23], #1
+# CHECK: str s25, [x0], #-256
+# CHECK: str d20, [x20], #255
+# CHECK: str d23, [x23], #1
+# CHECK: str d25, [x0], #-256
+0x0 0xf4 0xf 0x3c
+0x63 0x14 0x0 0x3c
+0xe5 0x7 0x10 0x3c
+0x4a 0xf5 0xf 0x7c
+0xed 0x16 0x0 0x7c
+0xef 0x7 0x10 0x7c
+0x94 0xf6 0xf 0xbc
+0xf7 0x16 0x0 0xbc
+0x19 0x4 0x10 0xbc
+0x94 0xf6 0xf 0xfc
+0xf7 0x16 0x0 0xfc
+0x19 0x4 0x10 0xfc
+
+# CHECK: ldr b0, [x0], #255
+# CHECK: ldr b3, [x3], #1
+# CHECK: ldr b5, [sp], #-256
+# CHECK: ldr h10, [x10], #255
+# CHECK: ldr h13, [x23], #1
+# CHECK: ldr h15, [sp], #-256
+# CHECK: ldr s20, [x20], #255
+# CHECK: ldr s23, [x23], #1
+# CHECK: ldr s25, [x0], #-256
+# CHECK: ldr d20, [x20], #255
+# CHECK: ldr d23, [x23], #1
+# CHECK: ldr d25, [x0], #-256
+0x0 0xf4 0x4f 0x3c
+0x63 0x14 0x40 0x3c
+0xe5 0x7 0x50 0x3c
+0x4a 0xf5 0x4f 0x7c
+0xed 0x16 0x40 0x7c
+0xef 0x7 0x50 0x7c
+0x94 0xf6 0x4f 0xbc
+0xf7 0x16 0x40 0xbc
+0x19 0x4 0x50 0xbc
+0x94 0xf6 0x4f 0xfc
+0xf7 0x16 0x40 0xfc
+0x19 0x4 0x50 0xfc
+0x34 0xf4 0xcf 0x3c
+
+# CHECK: ldr q20, [x1], #255
+# CHECK: ldr q23, [x9], #1
+# CHECK: ldr q25, [x20], #-256
+# CHECK: str q10, [x1], #255
+# CHECK: str q22, [sp], #1
+# CHECK: str q21, [x20], #-256
+0x37 0x15 0xc0 0x3c
+0x99 0x6 0xd0 0x3c
+0x2a 0xf4 0x8f 0x3c
+0xf6 0x17 0x80 0x3c
+0x95 0x6 0x90 0x3c
+
+#-------------------------------------------------------------------------------
+# Load-store register (immediate pre-indexed)
+#-------------------------------------------------------------------------------
+
+# E.g. "str xzr, [sp, #4]!" is *not* unpredictable
+# CHECK-NOT: warning: potentially undefined instruction encoding
+0xff 0xf 0x40 0xf8
+
+# CHECK: ldr x3, [x4, #0]!
+0x83 0xc 0x40 0xf8
+
+# CHECK: strb w9, [x2, #255]!
+# CHECK: strb w10, [x3, #1]!
+# CHECK: strb w10, [x3, #-256]!
+# CHECK: strh w9, [x2, #255]!
+# CHECK: strh w9, [x2, #1]!
+# CHECK: strh w10, [x3, #-256]!
+0x49 0xfc 0xf 0x38
+0x6a 0x1c 0x0 0x38
+0x6a 0xc 0x10 0x38
+0x49 0xfc 0xf 0x78
+0x49 0x1c 0x0 0x78
+0x6a 0xc 0x10 0x78
+
+# CHECK: str w19, [sp, #255]!
+# CHECK: str w20, [x30, #1]!
+# CHECK: str w21, [x12, #-256]!
+# CHECK: str xzr, [x9, #255]!
+# CHECK: str x2, [x3, #1]!
+# CHECK: str x19, [x12, #-256]!
+0xf3 0xff 0xf 0xb8
+0xd4 0x1f 0x0 0xb8
+0x95 0xd 0x10 0xb8
+0x3f 0xfd 0xf 0xf8
+0x62 0x1c 0x0 0xf8
+0x93 0xd 0x10 0xf8
+
+# CHECK: ldrb w9, [x2, #255]!
+# CHECK: ldrb w10, [x3, #1]!
+# CHECK: ldrb w10, [x3, #-256]!
+# CHECK: ldrh w9, [x2, #255]!
+# CHECK: ldrh w9, [x2, #1]!
+# CHECK: ldrh w10, [x3, #-256]!
+0x49 0xfc 0x4f 0x38
+0x6a 0x1c 0x40 0x38
+0x6a 0xc 0x50 0x38
+0x49 0xfc 0x4f 0x78
+0x49 0x1c 0x40 0x78
+0x6a 0xc 0x50 0x78
+
+# CHECK: ldr w19, [sp, #255]!
+# CHECK: ldr w20, [x30, #1]!
+# CHECK: ldr w21, [x12, #-256]!
+# CHECK: ldr xzr, [x9, #255]!
+# CHECK: ldr x2, [x3, #1]!
+# CHECK: ldr x19, [x12, #-256]!
+0xf3 0xff 0x4f 0xb8
+0xd4 0x1f 0x40 0xb8
+0x95 0xd 0x50 0xb8
+0x3f 0xfd 0x4f 0xf8
+0x62 0x1c 0x40 0xf8
+0x93 0xd 0x50 0xf8
+
+# CHECK: ldrsb xzr, [x9, #255]!
+# CHECK: ldrsb x2, [x3, #1]!
+# CHECK: ldrsb x19, [x12, #-256]!
+# CHECK: ldrsh xzr, [x9, #255]!
+# CHECK: ldrsh x2, [x3, #1]!
+# CHECK: ldrsh x19, [x12, #-256]!
+# CHECK: ldrsw xzr, [x9, #255]!
+# CHECK: ldrsw x2, [x3, #1]!
+# CHECK: ldrsw x19, [x12, #-256]!
+0x3f 0xfd 0x8f 0x38
+0x62 0x1c 0x80 0x38
+0x93 0xd 0x90 0x38
+0x3f 0xfd 0x8f 0x78
+0x62 0x1c 0x80 0x78
+0x93 0xd 0x90 0x78
+0x3f 0xfd 0x8f 0xb8
+0x62 0x1c 0x80 0xb8
+0x93 0xd 0x90 0xb8
+
+# CHECK: ldrsb wzr, [x9, #255]!
+# CHECK: ldrsb w2, [x3, #1]!
+# CHECK: ldrsb w19, [x12, #-256]!
+# CHECK: ldrsh wzr, [x9, #255]!
+# CHECK: ldrsh w2, [x3, #1]!
+# CHECK: ldrsh w19, [x12, #-256]!
+0x3f 0xfd 0xcf 0x38
+0x62 0x1c 0xc0 0x38
+0x93 0xd 0xd0 0x38
+0x3f 0xfd 0xcf 0x78
+0x62 0x1c 0xc0 0x78
+0x93 0xd 0xd0 0x78
+
+# CHECK: str b0, [x0, #255]!
+# CHECK: str b3, [x3, #1]!
+# CHECK: str b5, [sp, #-256]!
+# CHECK: str h10, [x10, #255]!
+# CHECK: str h13, [x23, #1]!
+# CHECK: str h15, [sp, #-256]!
+# CHECK: str s20, [x20, #255]!
+# CHECK: str s23, [x23, #1]!
+# CHECK: str s25, [x0, #-256]!
+# CHECK: str d20, [x20, #255]!
+# CHECK: str d23, [x23, #1]!
+# CHECK: str d25, [x0, #-256]!
+0x0 0xfc 0xf 0x3c
+0x63 0x1c 0x0 0x3c
+0xe5 0xf 0x10 0x3c
+0x4a 0xfd 0xf 0x7c
+0xed 0x1e 0x0 0x7c
+0xef 0xf 0x10 0x7c
+0x94 0xfe 0xf 0xbc
+0xf7 0x1e 0x0 0xbc
+0x19 0xc 0x10 0xbc
+0x94 0xfe 0xf 0xfc
+0xf7 0x1e 0x0 0xfc
+0x19 0xc 0x10 0xfc
+
+# CHECK: ldr b0, [x0, #255]!
+# CHECK: ldr b3, [x3, #1]!
+# CHECK: ldr b5, [sp, #-256]!
+# CHECK: ldr h10, [x10, #255]!
+# CHECK: ldr h13, [x23, #1]!
+# CHECK: ldr h15, [sp, #-256]!
+# CHECK: ldr s20, [x20, #255]!
+# CHECK: ldr s23, [x23, #1]!
+# CHECK: ldr s25, [x0, #-256]!
+# CHECK: ldr d20, [x20, #255]!
+# CHECK: ldr d23, [x23, #1]!
+# CHECK: ldr d25, [x0, #-256]!
+0x0 0xfc 0x4f 0x3c
+0x63 0x1c 0x40 0x3c
+0xe5 0xf 0x50 0x3c
+0x4a 0xfd 0x4f 0x7c
+0xed 0x1e 0x40 0x7c
+0xef 0xf 0x50 0x7c
+0x94 0xfe 0x4f 0xbc
+0xf7 0x1e 0x40 0xbc
+0x19 0xc 0x50 0xbc
+0x94 0xfe 0x4f 0xfc
+0xf7 0x1e 0x40 0xfc
+0x19 0xc 0x50 0xfc
+
+# CHECK: ldr q20, [x1, #255]!
+# CHECK: ldr q23, [x9, #1]!
+# CHECK: ldr q25, [x20, #-256]!
+# CHECK: str q10, [x1, #255]!
+# CHECK: str q22, [sp, #1]!
+# CHECK: str q21, [x20, #-256]!
+0x34 0xfc 0xcf 0x3c
+0x37 0x1d 0xc0 0x3c
+0x99 0xe 0xd0 0x3c
+0x2a 0xfc 0x8f 0x3c
+0xf6 0x1f 0x80 0x3c
+0x95 0xe 0x90 0x3c
+
+#------------------------------------------------------------------------------
+# Load/store (unprivileged)
+#------------------------------------------------------------------------------
+
+# CHECK: sttrb w9, [sp]
+# CHECK: sttrh wzr, [x12, #255]
+# CHECK: sttr w16, [x0, #-256]
+# CHECK: sttr x28, [x14, #1]
+0xe9 0x0b 0x0 0x38
+0x9f 0xf9 0xf 0x78
+0x10 0x08 0x10 0xb8
+0xdc 0x19 0x0 0xf8
+
+# CHECK: ldtrb w1, [x20, #255]
+# CHECK: ldtrh w20, [x1, #255]
+# CHECK: ldtr w12, [sp, #255]
+# CHECK: ldtr xzr, [x12, #255]
+0x81 0xfa 0x4f 0x38
+0x34 0xf8 0x4f 0x78
+0xec 0xfb 0x4f 0xb8
+0x9f 0xf9 0x4f 0xf8
+
+# CHECK: ldtrsb x9, [x7, #-256]
+# CHECK: ldtrsh x17, [x19, #-256]
+# CHECK: ldtrsw x20, [x15, #-256]
+# CHECK: ldtrsb w19, [x1, #-256]
+# CHECK: ldtrsh w15, [x21, #-256]
+0xe9 0x08 0x90 0x38
+0x71 0x0a 0x90 0x78
+0xf4 0x09 0x90 0xb8
+0x33 0x08 0xd0 0x38
+0xaf 0x0a 0xd0 0x78
+
+#------------------------------------------------------------------------------
+# Load/store (unsigned immediate)
+#------------------------------------------------------------------------------
+
+# CHECK: ldr x0, [x0]
+# CHECK: ldr x4, [x29]
+# CHECK: ldr x30, [x12, #32760]
+# CHECK: ldr x20, [sp, #8]
+0x0 0x0 0x40 0xf9
+0xa4 0x3 0x40 0xf9
+0x9e 0xfd 0x7f 0xf9
+0xf4 0x7 0x40 0xf9
+
+# CHECK: ldr xzr, [sp]
+0xff 0x3 0x40 0xf9
+
+# CHECK: ldr w2, [sp]
+# CHECK: ldr w17, [sp, #16380]
+# CHECK: ldr w13, [x2, #4]
+0xe2 0x3 0x40 0xb9
+0xf1 0xff 0x7f 0xb9
+0x4d 0x4 0x40 0xb9
+
+# CHECK: ldrsw x2, [x5, #4]
+# CHECK: ldrsw x23, [sp, #16380]
+0xa2 0x4 0x80 0xb9
+0xf7 0xff 0xbf 0xb9
+
+# CHECK: ldrh w2, [x4]
+# CHECK: ldrsh w23, [x6, #8190]
+# CHECK: ldrsh wzr, [sp, #2]
+# CHECK: ldrsh x29, [x2, #2]
+0x82 0x0 0x40 0x79
+0xd7 0xfc 0xff 0x79
+0xff 0x7 0xc0 0x79
+0x5d 0x4 0x80 0x79
+
+# CHECK: ldrb w26, [x3, #121]
+# CHECK: ldrb w12, [x2]
+# CHECK: ldrsb w27, [sp, #4095]
+# CHECK: ldrsb xzr, [x15]
+0x7a 0xe4 0x41 0x39
+0x4c 0x0 0x40 0x39
+0xfb 0xff 0xff 0x39
+0xff 0x1 0x80 0x39
+
+# CHECK: str x30, [sp]
+# CHECK: str w20, [x4, #16380]
+# CHECK: strh w20, [x10, #14]
+# CHECK: strh w17, [sp, #8190]
+# CHECK: strb w23, [x3, #4095]
+# CHECK: strb wzr, [x2]
+0xfe 0x3 0x0 0xf9
+0x94 0xfc 0x3f 0xb9
+0x54 0x1d 0x0 0x79
+0xf1 0xff 0x3f 0x79
+0x77 0xfc 0x3f 0x39
+0x5f 0x0 0x0 0x39
+
+# CHECK: ldr b31, [sp, #4095]
+# CHECK: ldr h20, [x2, #8190]
+# CHECK: ldr s10, [x19, #16380]
+# CHECK: ldr d3, [x10, #32760]
+# CHECK: str q12, [sp, #65520]
+0xff 0xff 0x7f 0x3d
+0x54 0xfc 0x7f 0x7d
+0x6a 0xfe 0x7f 0xbd
+0x43 0xfd 0x7f 0xfd
+0xec 0xff 0xbf 0x3d
+
+#------------------------------------------------------------------------------
+# Load/store (register offset)
+#------------------------------------------------------------------------------
+
+# CHECK: ldrb w3, [sp, x5]
+# CHECK: ldrb w9, [x27, x6]
+# CHECK: ldrsb w10, [x30, x7]
+# CHECK: ldrb w11, [x29, x3, sxtx]
+# CHECK: strb w12, [x28, xzr, sxtx]
+# CHECK: ldrb w14, [x26, w6, uxtw]
+# CHECK: ldrsb w15, [x25, w7, uxtw]
+# CHECK: ldrb w17, [x23, w9, sxtw]
+# CHECK: ldrsb x18, [x22, w10, sxtw]
+0xe3 0x6b 0x65 0x38
+0x69 0x6b 0x66 0x38
+0xca 0x6b 0xe7 0x38
+0xab 0xeb 0x63 0x38
+0x8c 0xeb 0x3f 0x38
+0x4e 0x4b 0x66 0x38
+0x2f 0x4b 0xe7 0x38
+0xf1 0xca 0x69 0x38
+0xd2 0xca 0xaa 0x38
+
+# CHECK: ldrsh w3, [sp, x5]
+# CHECK: ldrsh w9, [x27, x6]
+# CHECK: ldrh w10, [x30, x7, lsl #1]
+# CHECK: strh w11, [x29, x3, sxtx]
+# CHECK: ldrh w12, [x28, xzr, sxtx]
+# CHECK: ldrsh x13, [x27, x5, sxtx #1]
+# CHECK: ldrh w14, [x26, w6, uxtw]
+# CHECK: ldrh w15, [x25, w7, uxtw]
+# CHECK: ldrsh w16, [x24, w8, uxtw #1]
+# CHECK: ldrh w17, [x23, w9, sxtw]
+# CHECK: ldrh w18, [x22, w10, sxtw]
+# CHECK: strh w19, [x21, wzr, sxtw #1]
+0xe3 0x6b 0xe5 0x78
+0x69 0x6b 0xe6 0x78
+0xca 0x7b 0x67 0x78
+0xab 0xeb 0x23 0x78
+0x8c 0xeb 0x7f 0x78
+0x6d 0xfb 0xa5 0x78
+0x4e 0x4b 0x66 0x78
+0x2f 0x4b 0x67 0x78
+0x10 0x5b 0xe8 0x78
+0xf1 0xca 0x69 0x78
+0xd2 0xca 0x6a 0x78
+0xb3 0xda 0x3f 0x78
+
+# CHECK: ldr w3, [sp, x5]
+# CHECK: ldr s9, [x27, x6]
+# CHECK: ldr w10, [x30, x7, lsl #2]
+# CHECK: ldr w11, [x29, x3, sxtx]
+# CHECK: str s12, [x28, xzr, sxtx]
+# CHECK: str w13, [x27, x5, sxtx #2]
+# CHECK: str w14, [x26, w6, uxtw]
+# CHECK: ldr w15, [x25, w7, uxtw]
+# CHECK: ldr w16, [x24, w8, uxtw #2]
+# CHECK: ldrsw x17, [x23, w9, sxtw]
+# CHECK: ldr w18, [x22, w10, sxtw]
+# CHECK: ldrsw x19, [x21, wzr, sxtw #2]
+0xe3 0x6b 0x65 0xb8
+0x69 0x6b 0x66 0xbc
+0xca 0x7b 0x67 0xb8
+0xab 0xeb 0x63 0xb8
+0x8c 0xeb 0x3f 0xbc
+0x6d 0xfb 0x25 0xb8
+0x4e 0x4b 0x26 0xb8
+0x2f 0x4b 0x67 0xb8
+0x10 0x5b 0x68 0xb8
+0xf1 0xca 0xa9 0xb8
+0xd2 0xca 0x6a 0xb8
+0xb3 0xda 0xbf 0xb8
+
+# CHECK: ldr x3, [sp, x5]
+# CHECK: str x9, [x27, x6]
+# CHECK: ldr d10, [x30, x7, lsl #3]
+# CHECK: str x11, [x29, x3, sxtx]
+# CHECK: ldr x12, [x28, xzr, sxtx]
+# CHECK: ldr x13, [x27, x5, sxtx #3]
+# CHECK: prfm pldl1keep, [x26, w6, uxtw]
+# CHECK: ldr x15, [x25, w7, uxtw]
+# CHECK: ldr x16, [x24, w8, uxtw #3]
+# CHECK: ldr x17, [x23, w9, sxtw]
+# CHECK: ldr x18, [x22, w10, sxtw]
+# CHECK: str d19, [x21, wzr, sxtw #3]
+0xe3 0x6b 0x65 0xf8
+0x69 0x6b 0x26 0xf8
+0xca 0x7b 0x67 0xfc
+0xab 0xeb 0x23 0xf8
+0x8c 0xeb 0x7f 0xf8
+0x6d 0xfb 0x65 0xf8
+0x40 0x4b 0xa6 0xf8
+0x2f 0x4b 0x67 0xf8
+0x10 0x5b 0x68 0xf8
+0xf1 0xca 0x69 0xf8
+0xd2 0xca 0x6a 0xf8
+0xb3 0xda 0x3f 0xfc
+
+# CHECK: ldr q3, [sp, x5]
+# CHECK: ldr q9, [x27, x6]
+# CHECK: ldr q10, [x30, x7, lsl #4]
+# CHECK: str q11, [x29, x3, sxtx]
+# CHECK: str q12, [x28, xzr, sxtx]
+# CHECK: str q13, [x27, x5, sxtx #4]
+# CHECK: ldr q14, [x26, w6, uxtw]
+# CHECK: ldr q15, [x25, w7, uxtw]
+# CHECK: ldr q16, [x24, w8, uxtw #4]
+# CHECK: ldr q17, [x23, w9, sxtw]
+# CHECK: str q18, [x22, w10, sxtw]
+# CHECK: ldr q19, [x21, wzr, sxtw #4]
+0xe3 0x6b 0xe5 0x3c
+0x69 0x6b 0xe6 0x3c
+0xca 0x7b 0xe7 0x3c
+0xab 0xeb 0xa3 0x3c
+0x8c 0xeb 0xbf 0x3c
+0x6d 0xfb 0xa5 0x3c
+0x4e 0x4b 0xe6 0x3c
+0x2f 0x4b 0xe7 0x3c
+0x10 0x5b 0xe8 0x3c
+0xf1 0xca 0xe9 0x3c
+0xd2 0xca 0xaa 0x3c
+0xb3 0xda 0xff 0x3c
+
+#------------------------------------------------------------------------------
+# Load/store register pair (offset)
+#------------------------------------------------------------------------------
+
+# CHECK: ldp w3, w5, [sp]
+# CHECK: stp wzr, w9, [sp, #252]
+# CHECK: ldp w2, wzr, [sp, #-256]
+# CHECK: ldp w9, w10, [sp, #4]
+0xe3 0x17 0x40 0x29
+0xff 0xa7 0x1f 0x29
+0xe2 0x7f 0x60 0x29
+0xe9 0xab 0x40 0x29
+
+# CHECK: ldpsw x9, x10, [sp, #4]
+# CHECK: ldpsw x9, x10, [x2, #-256]
+# CHECK: ldpsw x20, x30, [sp, #252]
+0xe9 0xab 0x40 0x69
+0x49 0x28 0x60 0x69
+0xf4 0xfb 0x5f 0x69
+
+# CHECK: ldp x21, x29, [x2, #504]
+# CHECK: ldp x22, x23, [x3, #-512]
+# CHECK: ldp x24, x25, [x4, #8]
+0x55 0xf4 0x5f 0xa9
+0x76 0x5c 0x60 0xa9
+0x98 0xe4 0x40 0xa9
+
+# CHECK: ldp s29, s28, [sp, #252]
+# CHECK: stp s27, s26, [sp, #-256]
+# CHECK: ldp s1, s2, [x3, #44]
+0xfd 0xf3 0x5f 0x2d
+0xfb 0x6b 0x20 0x2d
+0x61 0x88 0x45 0x2d
+
+# CHECK: stp d3, d5, [x9, #504]
+# CHECK: stp d7, d11, [x10, #-512]
+# CHECK: ldp d2, d3, [x30, #-8]
+0x23 0x95 0x1f 0x6d
+0x47 0x2d 0x20 0x6d
+0xc2 0x8f 0x7f 0x6d
+
+# CHECK: stp q3, q5, [sp]
+# CHECK: stp q17, q19, [sp, #1008]
+# CHECK: ldp q23, q29, [x1, #-1024]
+0xe3 0x17 0x0 0xad
+0xf1 0xcf 0x1f 0xad
+0x37 0x74 0x60 0xad
+
+#------------------------------------------------------------------------------
+# Load/store register pair (post-indexed)
+#------------------------------------------------------------------------------
+
+# CHECK: ldp w3, w5, [sp], #0
+# CHECK: stp wzr, w9, [sp], #252
+# CHECK: ldp w2, wzr, [sp], #-256
+# CHECK: ldp w9, w10, [sp], #4
+0xe3 0x17 0xc0 0x28
+0xff 0xa7 0x9f 0x28
+0xe2 0x7f 0xe0 0x28
+0xe9 0xab 0xc0 0x28
+
+# CHECK: ldpsw x9, x10, [sp], #4
+# CHECK: ldpsw x9, x10, [x2], #-256
+# CHECK: ldpsw x20, x30, [sp], #252
+0xe9 0xab 0xc0 0x68
+0x49 0x28 0xe0 0x68
+0xf4 0xfb 0xdf 0x68
+
+# CHECK: ldp x21, x29, [x2], #504
+# CHECK: ldp x22, x23, [x3], #-512
+# CHECK: ldp x24, x25, [x4], #8
+0x55 0xf4 0xdf 0xa8
+0x76 0x5c 0xe0 0xa8
+0x98 0xe4 0xc0 0xa8
+
+# CHECK: ldp s29, s28, [sp], #252
+# CHECK: stp s27, s26, [sp], #-256
+# CHECK: ldp s1, s2, [x3], #44
+0xfd 0xf3 0xdf 0x2c
+0xfb 0x6b 0xa0 0x2c
+0x61 0x88 0xc5 0x2c
+
+# CHECK: stp d3, d5, [x9], #504
+# CHECK: stp d7, d11, [x10], #-512
+# CHECK: ldp d2, d3, [x30], #-8
+0x23 0x95 0x9f 0x6c
+0x47 0x2d 0xa0 0x6c
+0xc2 0x8f 0xff 0x6c
+
+# CHECK: stp q3, q5, [sp], #0
+# CHECK: stp q17, q19, [sp], #1008
+# CHECK: ldp q23, q29, [x1], #-1024
+0xe3 0x17 0x80 0xac
+0xf1 0xcf 0x9f 0xac
+0x37 0x74 0xe0 0xac
+
+#------------------------------------------------------------------------------
+# Load/store register pair (pre-indexed)
+#------------------------------------------------------------------------------
+
+# CHECK: ldp w3, w5, [sp, #0]!
+# CHECK: stp wzr, w9, [sp, #252]!
+# CHECK: ldp w2, wzr, [sp, #-256]!
+# CHECK: ldp w9, w10, [sp, #4]!
+0xe3 0x17 0xc0 0x29
+0xff 0xa7 0x9f 0x29
+0xe2 0x7f 0xe0 0x29
+0xe9 0xab 0xc0 0x29
+
+# CHECK: ldpsw x9, x10, [sp, #4]!
+# CHECK: ldpsw x9, x10, [x2, #-256]!
+# CHECK: ldpsw x20, x30, [sp, #252]!
+0xe9 0xab 0xc0 0x69
+0x49 0x28 0xe0 0x69
+0xf4 0xfb 0xdf 0x69
+
+# CHECK: ldp x21, x29, [x2, #504]!
+# CHECK: ldp x22, x23, [x3, #-512]!
+# CHECK: ldp x24, x25, [x4, #8]!
+0x55 0xf4 0xdf 0xa9
+0x76 0x5c 0xe0 0xa9
+0x98 0xe4 0xc0 0xa9
+
+# CHECK: ldp s29, s28, [sp, #252]!
+# CHECK: stp s27, s26, [sp, #-256]!
+# CHECK: ldp s1, s2, [x3, #44]!
+0xfd 0xf3 0xdf 0x2d
+0xfb 0x6b 0xa0 0x2d
+0x61 0x88 0xc5 0x2d
+
+# CHECK: stp d3, d5, [x9, #504]!
+# CHECK: stp d7, d11, [x10, #-512]!
+# CHECK: ldp d2, d3, [x30, #-8]!
+0x23 0x95 0x9f 0x6d
+0x47 0x2d 0xa0 0x6d
+0xc2 0x8f 0xff 0x6d
+
+# CHECK: stp q3, q5, [sp, #0]!
+# CHECK: stp q17, q19, [sp, #1008]!
+# CHECK: ldp q23, q29, [x1, #-1024]!
+0xe3 0x17 0x80 0xad
+0xf1 0xcf 0x9f 0xad
+0x37 0x74 0xe0 0xad
+
+#------------------------------------------------------------------------------
+# Load/store register pair (offset)
+#------------------------------------------------------------------------------
+
+# CHECK: ldnp w3, w5, [sp]
+# CHECK: stnp wzr, w9, [sp, #252]
+# CHECK: ldnp w2, wzr, [sp, #-256]
+# CHECK: ldnp w9, w10, [sp, #4]
+0xe3 0x17 0x40 0x28
+0xff 0xa7 0x1f 0x28
+0xe2 0x7f 0x60 0x28
+0xe9 0xab 0x40 0x28
+
+# CHECK: ldnp x21, x29, [x2, #504]
+# CHECK: ldnp x22, x23, [x3, #-512]
+# CHECK: ldnp x24, x25, [x4, #8]
+0x55 0xf4 0x5f 0xa8
+0x76 0x5c 0x60 0xa8
+0x98 0xe4 0x40 0xa8
+
+# CHECK: ldnp s29, s28, [sp, #252]
+# CHECK: stnp s27, s26, [sp, #-256]
+# CHECK: ldnp s1, s2, [x3, #44]
+0xfd 0xf3 0x5f 0x2c
+0xfb 0x6b 0x20 0x2c
+0x61 0x88 0x45 0x2c
+
+# CHECK: stnp d3, d5, [x9, #504]
+# CHECK: stnp d7, d11, [x10, #-512]
+# CHECK: ldnp d2, d3, [x30, #-8]
+0x23 0x95 0x1f 0x6c
+0x47 0x2d 0x20 0x6c
+0xc2 0x8f 0x7f 0x6c
+
+# CHECK: stnp q3, q5, [sp]
+# CHECK: stnp q17, q19, [sp, #1008]
+# CHECK: ldnp q23, q29, [x1, #-1024]
+0xe3 0x17 0x0 0xac
+0xf1 0xcf 0x1f 0xac
+0x37 0x74 0x60 0xac
+
+#------------------------------------------------------------------------------
+# Logical (immediate)
+#------------------------------------------------------------------------------
+# CHECK: orr w3, w9, #0xffff0000
+# CHECK: orr wsp, w10, #0xe00000ff
+# CHECK: orr w9, w10, #0x3ff
+0x23 0x3d 0x10 0x32
+0x5f 0x29 0x3 0x32
+0x49 0x25 0x0 0x32
+
+# CHECK: and w14, w15, #0x80008000
+# CHECK: and w12, w13, #0xffc3ffc3
+# CHECK: and w11, wzr, #0x30003
+0xee 0x81 0x1 0x12
+0xac 0xad 0xa 0x12
+0xeb 0x87 0x0 0x12
+
+# CHECK: eor w3, w6, #0xe0e0e0e0
+# CHECK: eor wsp, wzr, #0x3030303
+# CHECK: eor w16, w17, #0x81818181
+0xc3 0xc8 0x3 0x52
+0xff 0xc7 0x0 0x52
+0x30 0xc6 0x1 0x52
+
+# CHECK: ands wzr, w18, #0xcccccccc
+# CHECK: ands w19, w20, #0x33333333
+# CHECK: ands w21, w22, #0x99999999
+0x5f 0xe6 0x2 0x72
+0x93 0xe6 0x0 0x72
+0xd5 0xe6 0x1 0x72
+
+# CHECK: ands wzr, w3, #0xaaaaaaaa
+# CHECK: ands wzr, wzr, #0x55555555
+0x7f 0xf0 0x1 0x72
+0xff 0xf3 0x0 0x72
+
+# CHECK: eor x3, x5, #0xffffffffc000000
+# CHECK: and x9, x10, #0x7fffffffffff
+# CHECK: orr x11, x12, #0x8000000000000fff
+0xa3 0x84 0x66 0xd2
+0x49 0xb9 0x40 0x92
+0x8b 0x31 0x41 0xb2
+
+# CHECK: orr x3, x9, #0xffff0000ffff0000
+# CHECK: orr sp, x10, #0xe00000ffe00000ff
+# CHECK: orr x9, x10, #0x3ff000003ff
+0x23 0x3d 0x10 0xb2
+0x5f 0x29 0x3 0xb2
+0x49 0x25 0x0 0xb2
+
+# CHECK: and x14, x15, #0x8000800080008000
+# CHECK: and x12, x13, #0xffc3ffc3ffc3ffc3
+# CHECK: and x11, xzr, #0x3000300030003
+0xee 0x81 0x1 0x92
+0xac 0xad 0xa 0x92
+0xeb 0x87 0x0 0x92
+
+# CHECK: eor x3, x6, #0xe0e0e0e0e0e0e0e0
+# CHECK: eor sp, xzr, #0x303030303030303
+# CHECK: eor x16, x17, #0x8181818181818181
+0xc3 0xc8 0x3 0xd2
+0xff 0xc7 0x0 0xd2
+0x30 0xc6 0x1 0xd2
+
+# CHECK: ands xzr, x18, #0xcccccccccccccccc
+# CHECK: ands x19, x20, #0x3333333333333333
+# CHECK: ands x21, x22, #0x9999999999999999
+0x5f 0xe6 0x2 0xf2
+0x93 0xe6 0x0 0xf2
+0xd5 0xe6 0x1 0xf2
+
+# CHECK: ands xzr, x3, #0xaaaaaaaaaaaaaaaa
+# CHECK: ands xzr, xzr, #0x5555555555555555
+0x7f 0xf0 0x1 0xf2
+0xff 0xf3 0x0 0xf2
+
+# CHECK: orr w3, wzr, #0xf000f
+# CHECK: orr x10, xzr, #0xaaaaaaaaaaaaaaaa
+0xe3 0x8f 0x0 0x32
+0xea 0xf3 0x1 0xb2
+
+# CHECK: orr w3, wzr, #0xffff
+# CHECK: orr x9, xzr, #0xffff00000000
+0xe3 0x3f 0x0 0x32
+0xe9 0x3f 0x60 0xb2
+
+#------------------------------------------------------------------------------
+# Logical (shifted register)
+#------------------------------------------------------------------------------
+
+# CHECK: and w12, w23, w21
+# CHECK: and w16, w15, w1, lsl #1
+# CHECK: and w9, w4, w10, lsl #31
+# CHECK: and w3, w30, w11
+# CHECK: and x3, x5, x7, lsl #63
+0xec 0x2 0x15 0xa
+0xf0 0x5 0x1 0xa
+0x89 0x7c 0xa 0xa
+0xc3 0x3 0xb 0xa
+0xa3 0xfc 0x7 0x8a
+
+# CHECK: and x5, x14, x19, asr #4
+# CHECK: and w3, w17, w19, ror #31
+# CHECK: and w0, w2, wzr, lsr #17
+# CHECK: and w3, w30, w11, asr
+0xc5 0x11 0x93 0x8a
+0x23 0x7e 0xd3 0xa
+0x40 0x44 0x5f 0xa
+0xc3 0x3 0x8b 0xa
+
+# CHECK: and xzr, x4, x26
+# CHECK: and w3, wzr, w20, ror
+# CHECK: and x7, x20, xzr, asr #63
+0x9f 0x0 0x1a 0x8a
+0xe3 0x3 0xd4 0xa
+0x87 0xfe 0x9f 0x8a
+
+# CHECK: bic x13, x20, x14, lsl #47
+# CHECK: bic w2, w7, w9
+# CHECK: orr w2, w7, w0, asr #31
+# CHECK: orr x8, x9, x10, lsl #12
+# CHECK: orn x3, x5, x7, asr
+# CHECK: orn w2, w5, w29
+0x8d 0xbe 0x2e 0x8a
+0xe2 0x0 0x29 0xa
+0xe2 0x7c 0x80 0x2a
+0x28 0x31 0xa 0xaa
+0xa3 0x0 0xa7 0xaa
+0xa2 0x0 0x3d 0x2a
+
+# CHECK: ands w7, wzr, w9, lsl #1
+# CHECK: ands x3, x5, x20, ror #63
+# CHECK: bics w3, w5, w7
+# CHECK: bics x3, xzr, x3, lsl #1
+# CHECK: tst w3, w7, lsl #31
+# CHECK: tst x2, x20, asr
+0xe7 0x7 0x9 0x6a
+0xa3 0xfc 0xd4 0xea
+0xa3 0x0 0x27 0x6a
+0xe3 0x7 0x23 0xea
+0x7f 0x7c 0x7 0x6a
+0x5f 0x0 0x94 0xea
+
+# CHECK: mov x3, x6
+# CHECK: mov x3, xzr
+# CHECK: mov wzr, w2
+# CHECK: mov w3, w5
+0xe3 0x3 0x6 0xaa
+0xe3 0x3 0x1f 0xaa
+0xff 0x3 0x2 0x2a
+0xe3 0x3 0x5 0x2a
+
+#------------------------------------------------------------------------------
+# Move wide (immediate)
+#------------------------------------------------------------------------------
+
+# N.b. (FIXME) canonical aliases aren't produced here because of
+# limitation in InstAlias. Lots of the "mov[nz]" instructions should
+# be "mov".
+
+# CHECK: movz w1, #65535
+# CHECK: movz w2, #0, lsl #16
+# CHECK: movn w2, #1234
+0xe1 0xff 0x9f 0x52
+0x2 0x0 0xa0 0x52
+0x42 0x9a 0x80 0x12
+
+# CHECK: movz x2, #1234, lsl #32
+# CHECK: movk xzr, #4321, lsl #48
+0x42 0x9a 0xc0 0xd2
+0x3f 0x1c 0xe2 0xf2
+
+# CHECK: movz x2, #0
+# CHECK: movk w3, #0
+# CHECK: movz x4, #0, lsl #16
+# CHECK: movk w5, #0, lsl #16
+# CHECK: movz x6, #0, lsl #32
+# CHECK: movk x7, #0, lsl #32
+# CHECK: movz x8, #0, lsl #48
+# CHECK: movk x9, #0, lsl #48
+0x2 0x0 0x80 0xd2
+0x3 0x0 0x80 0x72
+0x4 0x0 0xa0 0xd2
+0x5 0x0 0xa0 0x72
+0x6 0x0 0xc0 0xd2
+0x7 0x0 0xc0 0xf2
+0x8 0x0 0xe0 0xd2
+0x9 0x0 0xe0 0xf2
+
+#------------------------------------------------------------------------------
+# PC-relative addressing
+#------------------------------------------------------------------------------
+
+# It's slightly dodgy using immediates here, but harmless enough when
+# it's all that's available.
+
+# CHECK: adr x2, #1600
+# CHECK: adrp x21, #6553600
+# CHECK: adr x0, #262144
+0x02 0x32 0x00 0x10
+0x15 0x32 0x00 0x90
+0x00 0x00 0x20 0x10
+
+#------------------------------------------------------------------------------
+# System
+#------------------------------------------------------------------------------
+
+# CHECK: nop
+# CHECK: hint #127
+# CHECK: nop
+# CHECK: yield
+# CHECK: wfe
+# CHECK: wfi
+# CHECK: sev
+# CHECK: sevl
+0x1f 0x20 0x3 0xd5
+0xff 0x2f 0x3 0xd5
+0x1f 0x20 0x3 0xd5
+0x3f 0x20 0x3 0xd5
+0x5f 0x20 0x3 0xd5
+0x7f 0x20 0x3 0xd5
+0x9f 0x20 0x3 0xd5
+0xbf 0x20 0x3 0xd5
+
+# CHECK: clrex
+# CHECK: clrex #0
+# CHECK: clrex #7
+# CHECK: clrex
+0x5f 0x3f 0x3 0xd5
+0x5f 0x30 0x3 0xd5
+0x5f 0x37 0x3 0xd5
+0x5f 0x3f 0x3 0xd5
+
+# CHECK: dsb #0
+# CHECK: dsb #12
+# CHECK: dsb sy
+# CHECK: dsb oshld
+# CHECK: dsb oshst
+# CHECK: dsb osh
+# CHECK: dsb nshld
+# CHECK: dsb nshst
+# CHECK: dsb nsh
+# CHECK: dsb ishld
+# CHECK: dsb ishst
+# CHECK: dsb ish
+# CHECK: dsb ld
+# CHECK: dsb st
+# CHECK: dsb sy
+0x9f 0x30 0x3 0xd5
+0x9f 0x3c 0x3 0xd5
+0x9f 0x3f 0x3 0xd5
+0x9f 0x31 0x3 0xd5
+0x9f 0x32 0x3 0xd5
+0x9f 0x33 0x3 0xd5
+0x9f 0x35 0x3 0xd5
+0x9f 0x36 0x3 0xd5
+0x9f 0x37 0x3 0xd5
+0x9f 0x39 0x3 0xd5
+0x9f 0x3a 0x3 0xd5
+0x9f 0x3b 0x3 0xd5
+0x9f 0x3d 0x3 0xd5
+0x9f 0x3e 0x3 0xd5
+0x9f 0x3f 0x3 0xd5
+
+# CHECK: dmb #0
+# CHECK: dmb #12
+# CHECK: dmb sy
+# CHECK: dmb oshld
+# CHECK: dmb oshst
+# CHECK: dmb osh
+# CHECK: dmb nshld
+# CHECK: dmb nshst
+# CHECK: dmb nsh
+# CHECK: dmb ishld
+# CHECK: dmb ishst
+# CHECK: dmb ish
+# CHECK: dmb ld
+# CHECK: dmb st
+# CHECK: dmb sy
+0xbf 0x30 0x3 0xd5
+0xbf 0x3c 0x3 0xd5
+0xbf 0x3f 0x3 0xd5
+0xbf 0x31 0x3 0xd5
+0xbf 0x32 0x3 0xd5
+0xbf 0x33 0x3 0xd5
+0xbf 0x35 0x3 0xd5
+0xbf 0x36 0x3 0xd5
+0xbf 0x37 0x3 0xd5
+0xbf 0x39 0x3 0xd5
+0xbf 0x3a 0x3 0xd5
+0xbf 0x3b 0x3 0xd5
+0xbf 0x3d 0x3 0xd5
+0xbf 0x3e 0x3 0xd5
+0xbf 0x3f 0x3 0xd5
+
+# CHECK: isb
+# CHECK: isb #12
+0xdf 0x3f 0x3 0xd5
+0xdf 0x3c 0x3 0xd5
+
+# CHECK: msr spsel, #0
+# CHECK: msr daifset, #15
+# CHECK: msr daifclr, #12
+0xbf 0x40 0x0 0xd5
+0xdf 0x4f 0x3 0xd5
+0xff 0x4c 0x3 0xd5
+
+# CHECK: sys #7, c5, c9, #7, x5
+# CHECK: sys #0, c15, c15, #2
+# CHECK: sysl x9, #7, c5, c9, #7
+# CHECK: sysl x1, #0, c15, c15, #2
+0xe5 0x59 0xf 0xd5
+0x5f 0xff 0x8 0xd5
+0xe9 0x59 0x2f 0xd5
+0x41 0xff 0x28 0xd5
+
+# CHECK: sys #0, c7, c1, #0, xzr
+# CHECK: sys #0, c7, c5, #0, xzr
+# CHECK: sys #3, c7, c5, #1, x9
+0x1f 0x71 0x8 0xd5
+0x1f 0x75 0x8 0xd5
+0x29 0x75 0xb 0xd5
+
+# CHECK: sys #3, c7, c4, #1, x12
+# CHECK: sys #0, c7, c6, #1, xzr
+# CHECK: sys #0, c7, c6, #2, x2
+# CHECK: sys #3, c7, c10, #1, x9
+# CHECK: sys #0, c7, c10, #2, x10
+# CHECK: sys #3, c7, c11, #1, x0
+# CHECK: sys #3, c7, c14, #1, x3
+# CHECK: sys #0, c7, c14, #2, x30
+0x2c 0x74 0xb 0xd5
+0x3f 0x76 0x8 0xd5
+0x42 0x76 0x8 0xd5
+0x29 0x7a 0xb 0xd5
+0x4a 0x7a 0x8 0xd5
+0x20 0x7b 0xb 0xd5
+0x23 0x7e 0xb 0xd5
+0x5e 0x7e 0x8 0xd5
+
+
+# CHECK: msr teecr32_el1, x12
+# CHECK: msr osdtrrx_el1, x12
+# CHECK: msr mdccint_el1, x12
+# CHECK: msr mdscr_el1, x12
+# CHECK: msr osdtrtx_el1, x12
+# CHECK: msr dbgdtr_el0, x12
+# CHECK: msr dbgdtrtx_el0, x12
+# CHECK: msr oseccr_el1, x12
+# CHECK: msr dbgvcr32_el2, x12
+# CHECK: msr dbgbvr0_el1, x12
+# CHECK: msr dbgbvr1_el1, x12
+# CHECK: msr dbgbvr2_el1, x12
+# CHECK: msr dbgbvr3_el1, x12
+# CHECK: msr dbgbvr4_el1, x12
+# CHECK: msr dbgbvr5_el1, x12
+# CHECK: msr dbgbvr6_el1, x12
+# CHECK: msr dbgbvr7_el1, x12
+# CHECK: msr dbgbvr8_el1, x12
+# CHECK: msr dbgbvr9_el1, x12
+# CHECK: msr dbgbvr10_el1, x12
+# CHECK: msr dbgbvr11_el1, x12
+# CHECK: msr dbgbvr12_el1, x12
+# CHECK: msr dbgbvr13_el1, x12
+# CHECK: msr dbgbvr14_el1, x12
+# CHECK: msr dbgbvr15_el1, x12
+# CHECK: msr dbgbcr0_el1, x12
+# CHECK: msr dbgbcr1_el1, x12
+# CHECK: msr dbgbcr2_el1, x12
+# CHECK: msr dbgbcr3_el1, x12
+# CHECK: msr dbgbcr4_el1, x12
+# CHECK: msr dbgbcr5_el1, x12
+# CHECK: msr dbgbcr6_el1, x12
+# CHECK: msr dbgbcr7_el1, x12
+# CHECK: msr dbgbcr8_el1, x12
+# CHECK: msr dbgbcr9_el1, x12
+# CHECK: msr dbgbcr10_el1, x12
+# CHECK: msr dbgbcr11_el1, x12
+# CHECK: msr dbgbcr12_el1, x12
+# CHECK: msr dbgbcr13_el1, x12
+# CHECK: msr dbgbcr14_el1, x12
+# CHECK: msr dbgbcr15_el1, x12
+# CHECK: msr dbgwvr0_el1, x12
+# CHECK: msr dbgwvr1_el1, x12
+# CHECK: msr dbgwvr2_el1, x12
+# CHECK: msr dbgwvr3_el1, x12
+# CHECK: msr dbgwvr4_el1, x12
+# CHECK: msr dbgwvr5_el1, x12
+# CHECK: msr dbgwvr6_el1, x12
+# CHECK: msr dbgwvr7_el1, x12
+# CHECK: msr dbgwvr8_el1, x12
+# CHECK: msr dbgwvr9_el1, x12
+# CHECK: msr dbgwvr10_el1, x12
+# CHECK: msr dbgwvr11_el1, x12
+# CHECK: msr dbgwvr12_el1, x12
+# CHECK: msr dbgwvr13_el1, x12
+# CHECK: msr dbgwvr14_el1, x12
+# CHECK: msr dbgwvr15_el1, x12
+# CHECK: msr dbgwcr0_el1, x12
+# CHECK: msr dbgwcr1_el1, x12
+# CHECK: msr dbgwcr2_el1, x12
+# CHECK: msr dbgwcr3_el1, x12
+# CHECK: msr dbgwcr4_el1, x12
+# CHECK: msr dbgwcr5_el1, x12
+# CHECK: msr dbgwcr6_el1, x12
+# CHECK: msr dbgwcr7_el1, x12
+# CHECK: msr dbgwcr8_el1, x12
+# CHECK: msr dbgwcr9_el1, x12
+# CHECK: msr dbgwcr10_el1, x12
+# CHECK: msr dbgwcr11_el1, x12
+# CHECK: msr dbgwcr12_el1, x12
+# CHECK: msr dbgwcr13_el1, x12
+# CHECK: msr dbgwcr14_el1, x12
+# CHECK: msr dbgwcr15_el1, x12
+# CHECK: msr teehbr32_el1, x12
+# CHECK: msr oslar_el1, x12
+# CHECK: msr osdlr_el1, x12
+# CHECK: msr dbgprcr_el1, x12
+# CHECK: msr dbgclaimset_el1, x12
+# CHECK: msr dbgclaimclr_el1, x12
+# CHECK: msr csselr_el1, x12
+# CHECK: msr vpidr_el2, x12
+# CHECK: msr vmpidr_el2, x12
+# CHECK: msr sctlr_el1, x12
+# CHECK: msr sctlr_el2, x12
+# CHECK: msr sctlr_el3, x12
+# CHECK: msr actlr_el1, x12
+# CHECK: msr actlr_el2, x12
+# CHECK: msr actlr_el3, x12
+# CHECK: msr cpacr_el1, x12
+# CHECK: msr hcr_el2, x12
+# CHECK: msr scr_el3, x12
+# CHECK: msr mdcr_el2, x12
+# CHECK: msr sder32_el3, x12
+# CHECK: msr cptr_el2, x12
+# CHECK: msr cptr_el3, x12
+# CHECK: msr hstr_el2, x12
+# CHECK: msr hacr_el2, x12
+# CHECK: msr mdcr_el3, x12
+# CHECK: msr ttbr0_el1, x12
+# CHECK: msr ttbr0_el2, x12
+# CHECK: msr ttbr0_el3, x12
+# CHECK: msr ttbr1_el1, x12
+# CHECK: msr tcr_el1, x12
+# CHECK: msr tcr_el2, x12
+# CHECK: msr tcr_el3, x12
+# CHECK: msr vttbr_el2, x12
+# CHECK: msr vtcr_el2, x12
+# CHECK: msr dacr32_el2, x12
+# CHECK: msr spsr_el1, x12
+# CHECK: msr spsr_el2, x12
+# CHECK: msr spsr_el3, x12
+# CHECK: msr elr_el1, x12
+# CHECK: msr elr_el2, x12
+# CHECK: msr elr_el3, x12
+# CHECK: msr sp_el0, x12
+# CHECK: msr sp_el1, x12
+# CHECK: msr sp_el2, x12
+# CHECK: msr spsel, x12
+# CHECK: msr nzcv, x12
+# CHECK: msr daif, x12
+# CHECK: msr currentel, x12
+# CHECK: msr spsr_irq, x12
+# CHECK: msr spsr_abt, x12
+# CHECK: msr spsr_und, x12
+# CHECK: msr spsr_fiq, x12
+# CHECK: msr fpcr, x12
+# CHECK: msr fpsr, x12
+# CHECK: msr dspsr_el0, x12
+# CHECK: msr dlr_el0, x12
+# CHECK: msr ifsr32_el2, x12
+# CHECK: msr afsr0_el1, x12
+# CHECK: msr afsr0_el2, x12
+# CHECK: msr afsr0_el3, x12
+# CHECK: msr afsr1_el1, x12
+# CHECK: msr afsr1_el2, x12
+# CHECK: msr afsr1_el3, x12
+# CHECK: msr esr_el1, x12
+# CHECK: msr esr_el2, x12
+# CHECK: msr esr_el3, x12
+# CHECK: msr fpexc32_el2, x12
+# CHECK: msr far_el1, x12
+# CHECK: msr far_el2, x12
+# CHECK: msr far_el3, x12
+# CHECK: msr hpfar_el2, x12
+# CHECK: msr par_el1, x12
+# CHECK: msr pmcr_el0, x12
+# CHECK: msr pmcntenset_el0, x12
+# CHECK: msr pmcntenclr_el0, x12
+# CHECK: msr pmovsclr_el0, x12
+# CHECK: msr pmselr_el0, x12
+# CHECK: msr pmccntr_el0, x12
+# CHECK: msr pmxevtyper_el0, x12
+# CHECK: msr pmxevcntr_el0, x12
+# CHECK: msr pmuserenr_el0, x12
+# CHECK: msr pmintenset_el1, x12
+# CHECK: msr pmintenclr_el1, x12
+# CHECK: msr pmovsset_el0, x12
+# CHECK: msr mair_el1, x12
+# CHECK: msr mair_el2, x12
+# CHECK: msr mair_el3, x12
+# CHECK: msr amair_el1, x12
+# CHECK: msr amair_el2, x12
+# CHECK: msr amair_el3, x12
+# CHECK: msr vbar_el1, x12
+# CHECK: msr vbar_el2, x12
+# CHECK: msr vbar_el3, x12
+# CHECK: msr rmr_el1, x12
+# CHECK: msr rmr_el2, x12
+# CHECK: msr rmr_el3, x12
+# CHECK: msr tpidr_el0, x12
+# CHECK: msr tpidr_el2, x12
+# CHECK: msr tpidr_el3, x12
+# CHECK: msr tpidrro_el0, x12
+# CHECK: msr tpidr_el1, x12
+# CHECK: msr cntfrq_el0, x12
+# CHECK: msr cntvoff_el2, x12
+# CHECK: msr cntkctl_el1, x12
+# CHECK: msr cnthctl_el2, x12
+# CHECK: msr cntp_tval_el0, x12
+# CHECK: msr cnthp_tval_el2, x12
+# CHECK: msr cntps_tval_el1, x12
+# CHECK: msr cntp_ctl_el0, x12
+# CHECK: msr cnthp_ctl_el2, x12
+# CHECK: msr cntps_ctl_el1, x12
+# CHECK: msr cntp_cval_el0, x12
+# CHECK: msr cnthp_cval_el2, x12
+# CHECK: msr cntps_cval_el1, x12
+# CHECK: msr cntv_tval_el0, x12
+# CHECK: msr cntv_ctl_el0, x12
+# CHECK: msr cntv_cval_el0, x12
+# CHECK: msr pmevcntr0_el0, x12
+# CHECK: msr pmevcntr1_el0, x12
+# CHECK: msr pmevcntr2_el0, x12
+# CHECK: msr pmevcntr3_el0, x12
+# CHECK: msr pmevcntr4_el0, x12
+# CHECK: msr pmevcntr5_el0, x12
+# CHECK: msr pmevcntr6_el0, x12
+# CHECK: msr pmevcntr7_el0, x12
+# CHECK: msr pmevcntr8_el0, x12
+# CHECK: msr pmevcntr9_el0, x12
+# CHECK: msr pmevcntr10_el0, x12
+# CHECK: msr pmevcntr11_el0, x12
+# CHECK: msr pmevcntr12_el0, x12
+# CHECK: msr pmevcntr13_el0, x12
+# CHECK: msr pmevcntr14_el0, x12
+# CHECK: msr pmevcntr15_el0, x12
+# CHECK: msr pmevcntr16_el0, x12
+# CHECK: msr pmevcntr17_el0, x12
+# CHECK: msr pmevcntr18_el0, x12
+# CHECK: msr pmevcntr19_el0, x12
+# CHECK: msr pmevcntr20_el0, x12
+# CHECK: msr pmevcntr21_el0, x12
+# CHECK: msr pmevcntr22_el0, x12
+# CHECK: msr pmevcntr23_el0, x12
+# CHECK: msr pmevcntr24_el0, x12
+# CHECK: msr pmevcntr25_el0, x12
+# CHECK: msr pmevcntr26_el0, x12
+# CHECK: msr pmevcntr27_el0, x12
+# CHECK: msr pmevcntr28_el0, x12
+# CHECK: msr pmevcntr29_el0, x12
+# CHECK: msr pmevcntr30_el0, x12
+# CHECK: msr pmccfiltr_el0, x12
+# CHECK: msr pmevtyper0_el0, x12
+# CHECK: msr pmevtyper1_el0, x12
+# CHECK: msr pmevtyper2_el0, x12
+# CHECK: msr pmevtyper3_el0, x12
+# CHECK: msr pmevtyper4_el0, x12
+# CHECK: msr pmevtyper5_el0, x12
+# CHECK: msr pmevtyper6_el0, x12
+# CHECK: msr pmevtyper7_el0, x12
+# CHECK: msr pmevtyper8_el0, x12
+# CHECK: msr pmevtyper9_el0, x12
+# CHECK: msr pmevtyper10_el0, x12
+# CHECK: msr pmevtyper11_el0, x12
+# CHECK: msr pmevtyper12_el0, x12
+# CHECK: msr pmevtyper13_el0, x12
+# CHECK: msr pmevtyper14_el0, x12
+# CHECK: msr pmevtyper15_el0, x12
+# CHECK: msr pmevtyper16_el0, x12
+# CHECK: msr pmevtyper17_el0, x12
+# CHECK: msr pmevtyper18_el0, x12
+# CHECK: msr pmevtyper19_el0, x12
+# CHECK: msr pmevtyper20_el0, x12
+# CHECK: msr pmevtyper21_el0, x12
+# CHECK: msr pmevtyper22_el0, x12
+# CHECK: msr pmevtyper23_el0, x12
+# CHECK: msr pmevtyper24_el0, x12
+# CHECK: msr pmevtyper25_el0, x12
+# CHECK: msr pmevtyper26_el0, x12
+# CHECK: msr pmevtyper27_el0, x12
+# CHECK: msr pmevtyper28_el0, x12
+# CHECK: msr pmevtyper29_el0, x12
+# CHECK: msr pmevtyper30_el0, x12
+# CHECK: mrs x9, teecr32_el1
+# CHECK: mrs x9, osdtrrx_el1
+# CHECK: mrs x9, mdccsr_el0
+# CHECK: mrs x9, mdccint_el1
+# CHECK: mrs x9, mdscr_el1
+# CHECK: mrs x9, osdtrtx_el1
+# CHECK: mrs x9, dbgdtr_el0
+# CHECK: mrs x9, dbgdtrrx_el0
+# CHECK: mrs x9, oseccr_el1
+# CHECK: mrs x9, dbgvcr32_el2
+# CHECK: mrs x9, dbgbvr0_el1
+# CHECK: mrs x9, dbgbvr1_el1
+# CHECK: mrs x9, dbgbvr2_el1
+# CHECK: mrs x9, dbgbvr3_el1
+# CHECK: mrs x9, dbgbvr4_el1
+# CHECK: mrs x9, dbgbvr5_el1
+# CHECK: mrs x9, dbgbvr6_el1
+# CHECK: mrs x9, dbgbvr7_el1
+# CHECK: mrs x9, dbgbvr8_el1
+# CHECK: mrs x9, dbgbvr9_el1
+# CHECK: mrs x9, dbgbvr10_el1
+# CHECK: mrs x9, dbgbvr11_el1
+# CHECK: mrs x9, dbgbvr12_el1
+# CHECK: mrs x9, dbgbvr13_el1
+# CHECK: mrs x9, dbgbvr14_el1
+# CHECK: mrs x9, dbgbvr15_el1
+# CHECK: mrs x9, dbgbcr0_el1
+# CHECK: mrs x9, dbgbcr1_el1
+# CHECK: mrs x9, dbgbcr2_el1
+# CHECK: mrs x9, dbgbcr3_el1
+# CHECK: mrs x9, dbgbcr4_el1
+# CHECK: mrs x9, dbgbcr5_el1
+# CHECK: mrs x9, dbgbcr6_el1
+# CHECK: mrs x9, dbgbcr7_el1
+# CHECK: mrs x9, dbgbcr8_el1
+# CHECK: mrs x9, dbgbcr9_el1
+# CHECK: mrs x9, dbgbcr10_el1
+# CHECK: mrs x9, dbgbcr11_el1
+# CHECK: mrs x9, dbgbcr12_el1
+# CHECK: mrs x9, dbgbcr13_el1
+# CHECK: mrs x9, dbgbcr14_el1
+# CHECK: mrs x9, dbgbcr15_el1
+# CHECK: mrs x9, dbgwvr0_el1
+# CHECK: mrs x9, dbgwvr1_el1
+# CHECK: mrs x9, dbgwvr2_el1
+# CHECK: mrs x9, dbgwvr3_el1
+# CHECK: mrs x9, dbgwvr4_el1
+# CHECK: mrs x9, dbgwvr5_el1
+# CHECK: mrs x9, dbgwvr6_el1
+# CHECK: mrs x9, dbgwvr7_el1
+# CHECK: mrs x9, dbgwvr8_el1
+# CHECK: mrs x9, dbgwvr9_el1
+# CHECK: mrs x9, dbgwvr10_el1
+# CHECK: mrs x9, dbgwvr11_el1
+# CHECK: mrs x9, dbgwvr12_el1
+# CHECK: mrs x9, dbgwvr13_el1
+# CHECK: mrs x9, dbgwvr14_el1
+# CHECK: mrs x9, dbgwvr15_el1
+# CHECK: mrs x9, dbgwcr0_el1
+# CHECK: mrs x9, dbgwcr1_el1
+# CHECK: mrs x9, dbgwcr2_el1
+# CHECK: mrs x9, dbgwcr3_el1
+# CHECK: mrs x9, dbgwcr4_el1
+# CHECK: mrs x9, dbgwcr5_el1
+# CHECK: mrs x9, dbgwcr6_el1
+# CHECK: mrs x9, dbgwcr7_el1
+# CHECK: mrs x9, dbgwcr8_el1
+# CHECK: mrs x9, dbgwcr9_el1
+# CHECK: mrs x9, dbgwcr10_el1
+# CHECK: mrs x9, dbgwcr11_el1
+# CHECK: mrs x9, dbgwcr12_el1
+# CHECK: mrs x9, dbgwcr13_el1
+# CHECK: mrs x9, dbgwcr14_el1
+# CHECK: mrs x9, dbgwcr15_el1
+# CHECK: mrs x9, mdrar_el1
+# CHECK: mrs x9, teehbr32_el1
+# CHECK: mrs x9, oslsr_el1
+# CHECK: mrs x9, osdlr_el1
+# CHECK: mrs x9, dbgprcr_el1
+# CHECK: mrs x9, dbgclaimset_el1
+# CHECK: mrs x9, dbgclaimclr_el1
+# CHECK: mrs x9, dbgauthstatus_el1
+# CHECK: mrs x9, midr_el1
+# CHECK: mrs x9, ccsidr_el1
+# CHECK: mrs x9, csselr_el1
+# CHECK: mrs x9, vpidr_el2
+# CHECK: mrs x9, clidr_el1
+# CHECK: mrs x9, ctr_el0
+# CHECK: mrs x9, mpidr_el1
+# CHECK: mrs x9, vmpidr_el2
+# CHECK: mrs x9, revidr_el1
+# CHECK: mrs x9, aidr_el1
+# CHECK: mrs x9, dczid_el0
+# CHECK: mrs x9, id_pfr0_el1
+# CHECK: mrs x9, id_pfr1_el1
+# CHECK: mrs x9, id_dfr0_el1
+# CHECK: mrs x9, id_afr0_el1
+# CHECK: mrs x9, id_mmfr0_el1
+# CHECK: mrs x9, id_mmfr1_el1
+# CHECK: mrs x9, id_mmfr2_el1
+# CHECK: mrs x9, id_mmfr3_el1
+# CHECK: mrs x9, id_isar0_el1
+# CHECK: mrs x9, id_isar1_el1
+# CHECK: mrs x9, id_isar2_el1
+# CHECK: mrs x9, id_isar3_el1
+# CHECK: mrs x9, id_isar4_el1
+# CHECK: mrs x9, id_isar5_el1
+# CHECK: mrs x9, mvfr0_el1
+# CHECK: mrs x9, mvfr1_el1
+# CHECK: mrs x9, mvfr2_el1
+# CHECK: mrs x9, id_aa64pfr0_el1
+# CHECK: mrs x9, id_aa64pfr1_el1
+# CHECK: mrs x9, id_aa64dfr0_el1
+# CHECK: mrs x9, id_aa64dfr1_el1
+# CHECK: mrs x9, id_aa64afr0_el1
+# CHECK: mrs x9, id_aa64afr1_el1
+# CHECK: mrs x9, id_aa64isar0_el1
+# CHECK: mrs x9, id_aa64isar1_el1
+# CHECK: mrs x9, id_aa64mmfr0_el1
+# CHECK: mrs x9, id_aa64mmfr1_el1
+# CHECK: mrs x9, sctlr_el1
+# CHECK: mrs x9, sctlr_el2
+# CHECK: mrs x9, sctlr_el3
+# CHECK: mrs x9, actlr_el1
+# CHECK: mrs x9, actlr_el2
+# CHECK: mrs x9, actlr_el3
+# CHECK: mrs x9, cpacr_el1
+# CHECK: mrs x9, hcr_el2
+# CHECK: mrs x9, scr_el3
+# CHECK: mrs x9, mdcr_el2
+# CHECK: mrs x9, sder32_el3
+# CHECK: mrs x9, cptr_el2
+# CHECK: mrs x9, cptr_el3
+# CHECK: mrs x9, hstr_el2
+# CHECK: mrs x9, hacr_el2
+# CHECK: mrs x9, mdcr_el3
+# CHECK: mrs x9, ttbr0_el1
+# CHECK: mrs x9, ttbr0_el2
+# CHECK: mrs x9, ttbr0_el3
+# CHECK: mrs x9, ttbr1_el1
+# CHECK: mrs x9, tcr_el1
+# CHECK: mrs x9, tcr_el2
+# CHECK: mrs x9, tcr_el3
+# CHECK: mrs x9, vttbr_el2
+# CHECK: mrs x9, vtcr_el2
+# CHECK: mrs x9, dacr32_el2
+# CHECK: mrs x9, spsr_el1
+# CHECK: mrs x9, spsr_el2
+# CHECK: mrs x9, spsr_el3
+# CHECK: mrs x9, elr_el1
+# CHECK: mrs x9, elr_el2
+# CHECK: mrs x9, elr_el3
+# CHECK: mrs x9, sp_el0
+# CHECK: mrs x9, sp_el1
+# CHECK: mrs x9, sp_el2
+# CHECK: mrs x9, spsel
+# CHECK: mrs x9, nzcv
+# CHECK: mrs x9, daif
+# CHECK: mrs x9, currentel
+# CHECK: mrs x9, spsr_irq
+# CHECK: mrs x9, spsr_abt
+# CHECK: mrs x9, spsr_und
+# CHECK: mrs x9, spsr_fiq
+# CHECK: mrs x9, fpcr
+# CHECK: mrs x9, fpsr
+# CHECK: mrs x9, dspsr_el0
+# CHECK: mrs x9, dlr_el0
+# CHECK: mrs x9, ifsr32_el2
+# CHECK: mrs x9, afsr0_el1
+# CHECK: mrs x9, afsr0_el2
+# CHECK: mrs x9, afsr0_el3
+# CHECK: mrs x9, afsr1_el1
+# CHECK: mrs x9, afsr1_el2
+# CHECK: mrs x9, afsr1_el3
+# CHECK: mrs x9, esr_el1
+# CHECK: mrs x9, esr_el2
+# CHECK: mrs x9, esr_el3
+# CHECK: mrs x9, fpexc32_el2
+# CHECK: mrs x9, far_el1
+# CHECK: mrs x9, far_el2
+# CHECK: mrs x9, far_el3
+# CHECK: mrs x9, hpfar_el2
+# CHECK: mrs x9, par_el1
+# CHECK: mrs x9, pmcr_el0
+# CHECK: mrs x9, pmcntenset_el0
+# CHECK: mrs x9, pmcntenclr_el0
+# CHECK: mrs x9, pmovsclr_el0
+# CHECK: mrs x9, pmselr_el0
+# CHECK: mrs x9, pmceid0_el0
+# CHECK: mrs x9, pmceid1_el0
+# CHECK: mrs x9, pmccntr_el0
+# CHECK: mrs x9, pmxevtyper_el0
+# CHECK: mrs x9, pmxevcntr_el0
+# CHECK: mrs x9, pmuserenr_el0
+# CHECK: mrs x9, pmintenset_el1
+# CHECK: mrs x9, pmintenclr_el1
+# CHECK: mrs x9, pmovsset_el0
+# CHECK: mrs x9, mair_el1
+# CHECK: mrs x9, mair_el2
+# CHECK: mrs x9, mair_el3
+# CHECK: mrs x9, amair_el1
+# CHECK: mrs x9, amair_el2
+# CHECK: mrs x9, amair_el3
+# CHECK: mrs x9, vbar_el1
+# CHECK: mrs x9, vbar_el2
+# CHECK: mrs x9, vbar_el3
+# CHECK: mrs x9, rvbar_el1
+# CHECK: mrs x9, rvbar_el2
+# CHECK: mrs x9, rvbar_el3
+# CHECK: mrs x9, rmr_el1
+# CHECK: mrs x9, rmr_el2
+# CHECK: mrs x9, rmr_el3
+# CHECK: mrs x9, isr_el1
+# CHECK: mrs x9, contextidr_el1
+# CHECK: mrs x9, tpidr_el0
+# CHECK: mrs x9, tpidr_el2
+# CHECK: mrs x9, tpidr_el3
+# CHECK: mrs x9, tpidrro_el0
+# CHECK: mrs x9, tpidr_el1
+# CHECK: mrs x9, cntfrq_el0
+# CHECK: mrs x9, cntpct_el0
+# CHECK: mrs x9, cntvct_el0
+# CHECK: mrs x9, cntvoff_el2
+# CHECK: mrs x9, cntkctl_el1
+# CHECK: mrs x9, cnthctl_el2
+# CHECK: mrs x9, cntp_tval_el0
+# CHECK: mrs x9, cnthp_tval_el2
+# CHECK: mrs x9, cntps_tval_el1
+# CHECK: mrs x9, cntp_ctl_el0
+# CHECK: mrs x9, cnthp_ctl_el2
+# CHECK: mrs x9, cntps_ctl_el1
+# CHECK: mrs x9, cntp_cval_el0
+# CHECK: mrs x9, cnthp_cval_el2
+# CHECK: mrs x9, cntps_cval_el1
+# CHECK: mrs x9, cntv_tval_el0
+# CHECK: mrs x9, cntv_ctl_el0
+# CHECK: mrs x9, cntv_cval_el0
+# CHECK: mrs x9, pmevcntr0_el0
+# CHECK: mrs x9, pmevcntr1_el0
+# CHECK: mrs x9, pmevcntr2_el0
+# CHECK: mrs x9, pmevcntr3_el0
+# CHECK: mrs x9, pmevcntr4_el0
+# CHECK: mrs x9, pmevcntr5_el0
+# CHECK: mrs x9, pmevcntr6_el0
+# CHECK: mrs x9, pmevcntr7_el0
+# CHECK: mrs x9, pmevcntr8_el0
+# CHECK: mrs x9, pmevcntr9_el0
+# CHECK: mrs x9, pmevcntr10_el0
+# CHECK: mrs x9, pmevcntr11_el0
+# CHECK: mrs x9, pmevcntr12_el0
+# CHECK: mrs x9, pmevcntr13_el0
+# CHECK: mrs x9, pmevcntr14_el0
+# CHECK: mrs x9, pmevcntr15_el0
+# CHECK: mrs x9, pmevcntr16_el0
+# CHECK: mrs x9, pmevcntr17_el0
+# CHECK: mrs x9, pmevcntr18_el0
+# CHECK: mrs x9, pmevcntr19_el0
+# CHECK: mrs x9, pmevcntr20_el0
+# CHECK: mrs x9, pmevcntr21_el0
+# CHECK: mrs x9, pmevcntr22_el0
+# CHECK: mrs x9, pmevcntr23_el0
+# CHECK: mrs x9, pmevcntr24_el0
+# CHECK: mrs x9, pmevcntr25_el0
+# CHECK: mrs x9, pmevcntr26_el0
+# CHECK: mrs x9, pmevcntr27_el0
+# CHECK: mrs x9, pmevcntr28_el0
+# CHECK: mrs x9, pmevcntr29_el0
+# CHECK: mrs x9, pmevcntr30_el0
+# CHECK: mrs x9, pmccfiltr_el0
+# CHECK: mrs x9, pmevtyper0_el0
+# CHECK: mrs x9, pmevtyper1_el0
+# CHECK: mrs x9, pmevtyper2_el0
+# CHECK: mrs x9, pmevtyper3_el0
+# CHECK: mrs x9, pmevtyper4_el0
+# CHECK: mrs x9, pmevtyper5_el0
+# CHECK: mrs x9, pmevtyper6_el0
+# CHECK: mrs x9, pmevtyper7_el0
+# CHECK: mrs x9, pmevtyper8_el0
+# CHECK: mrs x9, pmevtyper9_el0
+# CHECK: mrs x9, pmevtyper10_el0
+# CHECK: mrs x9, pmevtyper11_el0
+# CHECK: mrs x9, pmevtyper12_el0
+# CHECK: mrs x9, pmevtyper13_el0
+# CHECK: mrs x9, pmevtyper14_el0
+# CHECK: mrs x9, pmevtyper15_el0
+# CHECK: mrs x9, pmevtyper16_el0
+# CHECK: mrs x9, pmevtyper17_el0
+# CHECK: mrs x9, pmevtyper18_el0
+# CHECK: mrs x9, pmevtyper19_el0
+# CHECK: mrs x9, pmevtyper20_el0
+# CHECK: mrs x9, pmevtyper21_el0
+# CHECK: mrs x9, pmevtyper22_el0
+# CHECK: mrs x9, pmevtyper23_el0
+# CHECK: mrs x9, pmevtyper24_el0
+# CHECK: mrs x9, pmevtyper25_el0
+# CHECK: mrs x9, pmevtyper26_el0
+# CHECK: mrs x9, pmevtyper27_el0
+# CHECK: mrs x9, pmevtyper28_el0
+# CHECK: mrs x9, pmevtyper29_el0
+# CHECK: mrs x9, pmevtyper30_el0
+
+0xc 0x0 0x12 0xd5
+0x4c 0x0 0x10 0xd5
+0xc 0x2 0x10 0xd5
+0x4c 0x2 0x10 0xd5
+0x4c 0x3 0x10 0xd5
+0xc 0x4 0x13 0xd5
+0xc 0x5 0x13 0xd5
+0x4c 0x6 0x10 0xd5
+0xc 0x7 0x14 0xd5
+0x8c 0x0 0x10 0xd5
+0x8c 0x1 0x10 0xd5
+0x8c 0x2 0x10 0xd5
+0x8c 0x3 0x10 0xd5
+0x8c 0x4 0x10 0xd5
+0x8c 0x5 0x10 0xd5
+0x8c 0x6 0x10 0xd5
+0x8c 0x7 0x10 0xd5
+0x8c 0x8 0x10 0xd5
+0x8c 0x9 0x10 0xd5
+0x8c 0xa 0x10 0xd5
+0x8c 0xb 0x10 0xd5
+0x8c 0xc 0x10 0xd5
+0x8c 0xd 0x10 0xd5
+0x8c 0xe 0x10 0xd5
+0x8c 0xf 0x10 0xd5
+0xac 0x0 0x10 0xd5
+0xac 0x1 0x10 0xd5
+0xac 0x2 0x10 0xd5
+0xac 0x3 0x10 0xd5
+0xac 0x4 0x10 0xd5
+0xac 0x5 0x10 0xd5
+0xac 0x6 0x10 0xd5
+0xac 0x7 0x10 0xd5
+0xac 0x8 0x10 0xd5
+0xac 0x9 0x10 0xd5
+0xac 0xa 0x10 0xd5
+0xac 0xb 0x10 0xd5
+0xac 0xc 0x10 0xd5
+0xac 0xd 0x10 0xd5
+0xac 0xe 0x10 0xd5
+0xac 0xf 0x10 0xd5
+0xcc 0x0 0x10 0xd5
+0xcc 0x1 0x10 0xd5
+0xcc 0x2 0x10 0xd5
+0xcc 0x3 0x10 0xd5
+0xcc 0x4 0x10 0xd5
+0xcc 0x5 0x10 0xd5
+0xcc 0x6 0x10 0xd5
+0xcc 0x7 0x10 0xd5
+0xcc 0x8 0x10 0xd5
+0xcc 0x9 0x10 0xd5
+0xcc 0xa 0x10 0xd5
+0xcc 0xb 0x10 0xd5
+0xcc 0xc 0x10 0xd5
+0xcc 0xd 0x10 0xd5
+0xcc 0xe 0x10 0xd5
+0xcc 0xf 0x10 0xd5
+0xec 0x0 0x10 0xd5
+0xec 0x1 0x10 0xd5
+0xec 0x2 0x10 0xd5
+0xec 0x3 0x10 0xd5
+0xec 0x4 0x10 0xd5
+0xec 0x5 0x10 0xd5
+0xec 0x6 0x10 0xd5
+0xec 0x7 0x10 0xd5
+0xec 0x8 0x10 0xd5
+0xec 0x9 0x10 0xd5
+0xec 0xa 0x10 0xd5
+0xec 0xb 0x10 0xd5
+0xec 0xc 0x10 0xd5
+0xec 0xd 0x10 0xd5
+0xec 0xe 0x10 0xd5
+0xec 0xf 0x10 0xd5
+0xc 0x10 0x12 0xd5
+0x8c 0x10 0x10 0xd5
+0x8c 0x13 0x10 0xd5
+0x8c 0x14 0x10 0xd5
+0xcc 0x78 0x10 0xd5
+0xcc 0x79 0x10 0xd5
+0xc 0x0 0x1a 0xd5
+0xc 0x0 0x1c 0xd5
+0xac 0x0 0x1c 0xd5
+0xc 0x10 0x18 0xd5
+0xc 0x10 0x1c 0xd5
+0xc 0x10 0x1e 0xd5
+0x2c 0x10 0x18 0xd5
+0x2c 0x10 0x1c 0xd5
+0x2c 0x10 0x1e 0xd5
+0x4c 0x10 0x18 0xd5
+0xc 0x11 0x1c 0xd5
+0xc 0x11 0x1e 0xd5
+0x2c 0x11 0x1c 0xd5
+0x2c 0x11 0x1e 0xd5
+0x4c 0x11 0x1c 0xd5
+0x4c 0x11 0x1e 0xd5
+0x6c 0x11 0x1c 0xd5
+0xec 0x11 0x1c 0xd5
+0x2c 0x13 0x1e 0xd5
+0xc 0x20 0x18 0xd5
+0xc 0x20 0x1c 0xd5
+0xc 0x20 0x1e 0xd5
+0x2c 0x20 0x18 0xd5
+0x4c 0x20 0x18 0xd5
+0x4c 0x20 0x1c 0xd5
+0x4c 0x20 0x1e 0xd5
+0xc 0x21 0x1c 0xd5
+0x4c 0x21 0x1c 0xd5
+0xc 0x30 0x1c 0xd5
+0xc 0x40 0x18 0xd5
+0xc 0x40 0x1c 0xd5
+0xc 0x40 0x1e 0xd5
+0x2c 0x40 0x18 0xd5
+0x2c 0x40 0x1c 0xd5
+0x2c 0x40 0x1e 0xd5
+0xc 0x41 0x18 0xd5
+0xc 0x41 0x1c 0xd5
+0xc 0x41 0x1e 0xd5
+0xc 0x42 0x18 0xd5
+0xc 0x42 0x1b 0xd5
+0x2c 0x42 0x1b 0xd5
+0x4c 0x42 0x18 0xd5
+0xc 0x43 0x1c 0xd5
+0x2c 0x43 0x1c 0xd5
+0x4c 0x43 0x1c 0xd5
+0x6c 0x43 0x1c 0xd5
+0xc 0x44 0x1b 0xd5
+0x2c 0x44 0x1b 0xd5
+0xc 0x45 0x1b 0xd5
+0x2c 0x45 0x1b 0xd5
+0x2c 0x50 0x1c 0xd5
+0xc 0x51 0x18 0xd5
+0xc 0x51 0x1c 0xd5
+0xc 0x51 0x1e 0xd5
+0x2c 0x51 0x18 0xd5
+0x2c 0x51 0x1c 0xd5
+0x2c 0x51 0x1e 0xd5
+0xc 0x52 0x18 0xd5
+0xc 0x52 0x1c 0xd5
+0xc 0x52 0x1e 0xd5
+0xc 0x53 0x1c 0xd5
+0xc 0x60 0x18 0xd5
+0xc 0x60 0x1c 0xd5
+0xc 0x60 0x1e 0xd5
+0x8c 0x60 0x1c 0xd5
+0xc 0x74 0x18 0xd5
+0xc 0x9c 0x1b 0xd5
+0x2c 0x9c 0x1b 0xd5
+0x4c 0x9c 0x1b 0xd5
+0x6c 0x9c 0x1b 0xd5
+0xac 0x9c 0x1b 0xd5
+0xc 0x9d 0x1b 0xd5
+0x2c 0x9d 0x1b 0xd5
+0x4c 0x9d 0x1b 0xd5
+0xc 0x9e 0x1b 0xd5
+0x2c 0x9e 0x18 0xd5
+0x4c 0x9e 0x18 0xd5
+0x6c 0x9e 0x1b 0xd5
+0xc 0xa2 0x18 0xd5
+0xc 0xa2 0x1c 0xd5
+0xc 0xa2 0x1e 0xd5
+0xc 0xa3 0x18 0xd5
+0xc 0xa3 0x1c 0xd5
+0xc 0xa3 0x1e 0xd5
+0xc 0xc0 0x18 0xd5
+0xc 0xc0 0x1c 0xd5
+0xc 0xc0 0x1e 0xd5
+0x4c 0xc0 0x18 0xd5
+0x4c 0xc0 0x1c 0xd5
+0x4c 0xc0 0x1e 0xd5
+0x4c 0xd0 0x1b 0xd5
+0x4c 0xd0 0x1c 0xd5
+0x4c 0xd0 0x1e 0xd5
+0x6c 0xd0 0x1b 0xd5
+0x8c 0xd0 0x18 0xd5
+0xc 0xe0 0x1b 0xd5
+0x6c 0xe0 0x1c 0xd5
+0xc 0xe1 0x18 0xd5
+0xc 0xe1 0x1c 0xd5
+0xc 0xe2 0x1b 0xd5
+0xc 0xe2 0x1c 0xd5
+0xc 0xe2 0x1f 0xd5
+0x2c 0xe2 0x1b 0xd5
+0x2c 0xe2 0x1c 0xd5
+0x2c 0xe2 0x1f 0xd5
+0x4c 0xe2 0x1b 0xd5
+0x4c 0xe2 0x1c 0xd5
+0x4c 0xe2 0x1f 0xd5
+0xc 0xe3 0x1b 0xd5
+0x2c 0xe3 0x1b 0xd5
+0x4c 0xe3 0x1b 0xd5
+0xc 0xe8 0x1b 0xd5
+0x2c 0xe8 0x1b 0xd5
+0x4c 0xe8 0x1b 0xd5
+0x6c 0xe8 0x1b 0xd5
+0x8c 0xe8 0x1b 0xd5
+0xac 0xe8 0x1b 0xd5
+0xcc 0xe8 0x1b 0xd5
+0xec 0xe8 0x1b 0xd5
+0xc 0xe9 0x1b 0xd5
+0x2c 0xe9 0x1b 0xd5
+0x4c 0xe9 0x1b 0xd5
+0x6c 0xe9 0x1b 0xd5
+0x8c 0xe9 0x1b 0xd5
+0xac 0xe9 0x1b 0xd5
+0xcc 0xe9 0x1b 0xd5
+0xec 0xe9 0x1b 0xd5
+0xc 0xea 0x1b 0xd5
+0x2c 0xea 0x1b 0xd5
+0x4c 0xea 0x1b 0xd5
+0x6c 0xea 0x1b 0xd5
+0x8c 0xea 0x1b 0xd5
+0xac 0xea 0x1b 0xd5
+0xcc 0xea 0x1b 0xd5
+0xec 0xea 0x1b 0xd5
+0xc 0xeb 0x1b 0xd5
+0x2c 0xeb 0x1b 0xd5
+0x4c 0xeb 0x1b 0xd5
+0x6c 0xeb 0x1b 0xd5
+0x8c 0xeb 0x1b 0xd5
+0xac 0xeb 0x1b 0xd5
+0xcc 0xeb 0x1b 0xd5
+0xec 0xef 0x1b 0xd5
+0xc 0xec 0x1b 0xd5
+0x2c 0xec 0x1b 0xd5
+0x4c 0xec 0x1b 0xd5
+0x6c 0xec 0x1b 0xd5
+0x8c 0xec 0x1b 0xd5
+0xac 0xec 0x1b 0xd5
+0xcc 0xec 0x1b 0xd5
+0xec 0xec 0x1b 0xd5
+0xc 0xed 0x1b 0xd5
+0x2c 0xed 0x1b 0xd5
+0x4c 0xed 0x1b 0xd5
+0x6c 0xed 0x1b 0xd5
+0x8c 0xed 0x1b 0xd5
+0xac 0xed 0x1b 0xd5
+0xcc 0xed 0x1b 0xd5
+0xec 0xed 0x1b 0xd5
+0xc 0xee 0x1b 0xd5
+0x2c 0xee 0x1b 0xd5
+0x4c 0xee 0x1b 0xd5
+0x6c 0xee 0x1b 0xd5
+0x8c 0xee 0x1b 0xd5
+0xac 0xee 0x1b 0xd5
+0xcc 0xee 0x1b 0xd5
+0xec 0xee 0x1b 0xd5
+0xc 0xef 0x1b 0xd5
+0x2c 0xef 0x1b 0xd5
+0x4c 0xef 0x1b 0xd5
+0x6c 0xef 0x1b 0xd5
+0x8c 0xef 0x1b 0xd5
+0xac 0xef 0x1b 0xd5
+0xcc 0xef 0x1b 0xd5
+0x9 0x0 0x32 0xd5
+0x49 0x0 0x30 0xd5
+0x9 0x1 0x33 0xd5
+0x9 0x2 0x30 0xd5
+0x49 0x2 0x30 0xd5
+0x49 0x3 0x30 0xd5
+0x9 0x4 0x33 0xd5
+0x9 0x5 0x33 0xd5
+0x49 0x6 0x30 0xd5
+0x9 0x7 0x34 0xd5
+0x89 0x0 0x30 0xd5
+0x89 0x1 0x30 0xd5
+0x89 0x2 0x30 0xd5
+0x89 0x3 0x30 0xd5
+0x89 0x4 0x30 0xd5
+0x89 0x5 0x30 0xd5
+0x89 0x6 0x30 0xd5
+0x89 0x7 0x30 0xd5
+0x89 0x8 0x30 0xd5
+0x89 0x9 0x30 0xd5
+0x89 0xa 0x30 0xd5
+0x89 0xb 0x30 0xd5
+0x89 0xc 0x30 0xd5
+0x89 0xd 0x30 0xd5
+0x89 0xe 0x30 0xd5
+0x89 0xf 0x30 0xd5
+0xa9 0x0 0x30 0xd5
+0xa9 0x1 0x30 0xd5
+0xa9 0x2 0x30 0xd5
+0xa9 0x3 0x30 0xd5
+0xa9 0x4 0x30 0xd5
+0xa9 0x5 0x30 0xd5
+0xa9 0x6 0x30 0xd5
+0xa9 0x7 0x30 0xd5
+0xa9 0x8 0x30 0xd5
+0xa9 0x9 0x30 0xd5
+0xa9 0xa 0x30 0xd5
+0xa9 0xb 0x30 0xd5
+0xa9 0xc 0x30 0xd5
+0xa9 0xd 0x30 0xd5
+0xa9 0xe 0x30 0xd5
+0xa9 0xf 0x30 0xd5
+0xc9 0x0 0x30 0xd5
+0xc9 0x1 0x30 0xd5
+0xc9 0x2 0x30 0xd5
+0xc9 0x3 0x30 0xd5
+0xc9 0x4 0x30 0xd5
+0xc9 0x5 0x30 0xd5
+0xc9 0x6 0x30 0xd5
+0xc9 0x7 0x30 0xd5
+0xc9 0x8 0x30 0xd5
+0xc9 0x9 0x30 0xd5
+0xc9 0xa 0x30 0xd5
+0xc9 0xb 0x30 0xd5
+0xc9 0xc 0x30 0xd5
+0xc9 0xd 0x30 0xd5
+0xc9 0xe 0x30 0xd5
+0xc9 0xf 0x30 0xd5
+0xe9 0x0 0x30 0xd5
+0xe9 0x1 0x30 0xd5
+0xe9 0x2 0x30 0xd5
+0xe9 0x3 0x30 0xd5
+0xe9 0x4 0x30 0xd5
+0xe9 0x5 0x30 0xd5
+0xe9 0x6 0x30 0xd5
+0xe9 0x7 0x30 0xd5
+0xe9 0x8 0x30 0xd5
+0xe9 0x9 0x30 0xd5
+0xe9 0xa 0x30 0xd5
+0xe9 0xb 0x30 0xd5
+0xe9 0xc 0x30 0xd5
+0xe9 0xd 0x30 0xd5
+0xe9 0xe 0x30 0xd5
+0xe9 0xf 0x30 0xd5
+0x9 0x10 0x30 0xd5
+0x9 0x10 0x32 0xd5
+0x89 0x11 0x30 0xd5
+0x89 0x13 0x30 0xd5
+0x89 0x14 0x30 0xd5
+0xc9 0x78 0x30 0xd5
+0xc9 0x79 0x30 0xd5
+0xc9 0x7e 0x30 0xd5
+0x9 0x0 0x38 0xd5
+0x9 0x0 0x39 0xd5
+0x9 0x0 0x3a 0xd5
+0x9 0x0 0x3c 0xd5
+0x29 0x0 0x39 0xd5
+0x29 0x0 0x3b 0xd5
+0xa9 0x0 0x38 0xd5
+0xa9 0x0 0x3c 0xd5
+0xc9 0x0 0x38 0xd5
+0xe9 0x0 0x39 0xd5
+0xe9 0x0 0x3b 0xd5
+0x9 0x1 0x38 0xd5
+0x29 0x1 0x38 0xd5
+0x49 0x1 0x38 0xd5
+0x69 0x1 0x38 0xd5
+0x89 0x1 0x38 0xd5
+0xa9 0x1 0x38 0xd5
+0xc9 0x1 0x38 0xd5
+0xe9 0x1 0x38 0xd5
+0x9 0x2 0x38 0xd5
+0x29 0x2 0x38 0xd5
+0x49 0x2 0x38 0xd5
+0x69 0x2 0x38 0xd5
+0x89 0x2 0x38 0xd5
+0xa9 0x2 0x38 0xd5
+0x9 0x3 0x38 0xd5
+0x29 0x3 0x38 0xd5
+0x49 0x3 0x38 0xd5
+0x9 0x4 0x38 0xd5
+0x29 0x4 0x38 0xd5
+0x9 0x5 0x38 0xd5
+0x29 0x5 0x38 0xd5
+0x89 0x5 0x38 0xd5
+0xa9 0x5 0x38 0xd5
+0x9 0x6 0x38 0xd5
+0x29 0x6 0x38 0xd5
+0x9 0x7 0x38 0xd5
+0x29 0x7 0x38 0xd5
+0x9 0x10 0x38 0xd5
+0x9 0x10 0x3c 0xd5
+0x9 0x10 0x3e 0xd5
+0x29 0x10 0x38 0xd5
+0x29 0x10 0x3c 0xd5
+0x29 0x10 0x3e 0xd5
+0x49 0x10 0x38 0xd5
+0x9 0x11 0x3c 0xd5
+0x9 0x11 0x3e 0xd5
+0x29 0x11 0x3c 0xd5
+0x29 0x11 0x3e 0xd5
+0x49 0x11 0x3c 0xd5
+0x49 0x11 0x3e 0xd5
+0x69 0x11 0x3c 0xd5
+0xe9 0x11 0x3c 0xd5
+0x29 0x13 0x3e 0xd5
+0x9 0x20 0x38 0xd5
+0x9 0x20 0x3c 0xd5
+0x9 0x20 0x3e 0xd5
+0x29 0x20 0x38 0xd5
+0x49 0x20 0x38 0xd5
+0x49 0x20 0x3c 0xd5
+0x49 0x20 0x3e 0xd5
+0x9 0x21 0x3c 0xd5
+0x49 0x21 0x3c 0xd5
+0x9 0x30 0x3c 0xd5
+0x9 0x40 0x38 0xd5
+0x9 0x40 0x3c 0xd5
+0x9 0x40 0x3e 0xd5
+0x29 0x40 0x38 0xd5
+0x29 0x40 0x3c 0xd5
+0x29 0x40 0x3e 0xd5
+0x9 0x41 0x38 0xd5
+0x9 0x41 0x3c 0xd5
+0x9 0x41 0x3e 0xd5
+0x9 0x42 0x38 0xd5
+0x9 0x42 0x3b 0xd5
+0x29 0x42 0x3b 0xd5
+0x49 0x42 0x38 0xd5
+0x9 0x43 0x3c 0xd5
+0x29 0x43 0x3c 0xd5
+0x49 0x43 0x3c 0xd5
+0x69 0x43 0x3c 0xd5
+0x9 0x44 0x3b 0xd5
+0x29 0x44 0x3b 0xd5
+0x9 0x45 0x3b 0xd5
+0x29 0x45 0x3b 0xd5
+0x29 0x50 0x3c 0xd5
+0x9 0x51 0x38 0xd5
+0x9 0x51 0x3c 0xd5
+0x9 0x51 0x3e 0xd5
+0x29 0x51 0x38 0xd5
+0x29 0x51 0x3c 0xd5
+0x29 0x51 0x3e 0xd5
+0x9 0x52 0x38 0xd5
+0x9 0x52 0x3c 0xd5
+0x9 0x52 0x3e 0xd5
+0x9 0x53 0x3c 0xd5
+0x9 0x60 0x38 0xd5
+0x9 0x60 0x3c 0xd5
+0x9 0x60 0x3e 0xd5
+0x89 0x60 0x3c 0xd5
+0x9 0x74 0x38 0xd5
+0x9 0x9c 0x3b 0xd5
+0x29 0x9c 0x3b 0xd5
+0x49 0x9c 0x3b 0xd5
+0x69 0x9c 0x3b 0xd5
+0xa9 0x9c 0x3b 0xd5
+0xc9 0x9c 0x3b 0xd5
+0xe9 0x9c 0x3b 0xd5
+0x9 0x9d 0x3b 0xd5
+0x29 0x9d 0x3b 0xd5
+0x49 0x9d 0x3b 0xd5
+0x9 0x9e 0x3b 0xd5
+0x29 0x9e 0x38 0xd5
+0x49 0x9e 0x38 0xd5
+0x69 0x9e 0x3b 0xd5
+0x9 0xa2 0x38 0xd5
+0x9 0xa2 0x3c 0xd5
+0x9 0xa2 0x3e 0xd5
+0x9 0xa3 0x38 0xd5
+0x9 0xa3 0x3c 0xd5
+0x9 0xa3 0x3e 0xd5
+0x9 0xc0 0x38 0xd5
+0x9 0xc0 0x3c 0xd5
+0x9 0xc0 0x3e 0xd5
+0x29 0xc0 0x38 0xd5
+0x29 0xc0 0x3c 0xd5
+0x29 0xc0 0x3e 0xd5
+0x49 0xc0 0x38 0xd5
+0x49 0xc0 0x3c 0xd5
+0x49 0xc0 0x3e 0xd5
+0x9 0xc1 0x38 0xd5
+0x29 0xd0 0x38 0xd5
+0x49 0xd0 0x3b 0xd5
+0x49 0xd0 0x3c 0xd5
+0x49 0xd0 0x3e 0xd5
+0x69 0xd0 0x3b 0xd5
+0x89 0xd0 0x38 0xd5
+0x9 0xe0 0x3b 0xd5
+0x29 0xe0 0x3b 0xd5
+0x49 0xe0 0x3b 0xd5
+0x69 0xe0 0x3c 0xd5
+0x9 0xe1 0x38 0xd5
+0x9 0xe1 0x3c 0xd5
+0x9 0xe2 0x3b 0xd5
+0x9 0xe2 0x3c 0xd5
+0x9 0xe2 0x3f 0xd5
+0x29 0xe2 0x3b 0xd5
+0x29 0xe2 0x3c 0xd5
+0x29 0xe2 0x3f 0xd5
+0x49 0xe2 0x3b 0xd5
+0x49 0xe2 0x3c 0xd5
+0x49 0xe2 0x3f 0xd5
+0x9 0xe3 0x3b 0xd5
+0x29 0xe3 0x3b 0xd5
+0x49 0xe3 0x3b 0xd5
+0x9 0xe8 0x3b 0xd5
+0x29 0xe8 0x3b 0xd5
+0x49 0xe8 0x3b 0xd5
+0x69 0xe8 0x3b 0xd5
+0x89 0xe8 0x3b 0xd5
+0xa9 0xe8 0x3b 0xd5
+0xc9 0xe8 0x3b 0xd5
+0xe9 0xe8 0x3b 0xd5
+0x9 0xe9 0x3b 0xd5
+0x29 0xe9 0x3b 0xd5
+0x49 0xe9 0x3b 0xd5
+0x69 0xe9 0x3b 0xd5
+0x89 0xe9 0x3b 0xd5
+0xa9 0xe9 0x3b 0xd5
+0xc9 0xe9 0x3b 0xd5
+0xe9 0xe9 0x3b 0xd5
+0x9 0xea 0x3b 0xd5
+0x29 0xea 0x3b 0xd5
+0x49 0xea 0x3b 0xd5
+0x69 0xea 0x3b 0xd5
+0x89 0xea 0x3b 0xd5
+0xa9 0xea 0x3b 0xd5
+0xc9 0xea 0x3b 0xd5
+0xe9 0xea 0x3b 0xd5
+0x9 0xeb 0x3b 0xd5
+0x29 0xeb 0x3b 0xd5
+0x49 0xeb 0x3b 0xd5
+0x69 0xeb 0x3b 0xd5
+0x89 0xeb 0x3b 0xd5
+0xa9 0xeb 0x3b 0xd5
+0xc9 0xeb 0x3b 0xd5
+0xe9 0xef 0x3b 0xd5
+0x9 0xec 0x3b 0xd5
+0x29 0xec 0x3b 0xd5
+0x49 0xec 0x3b 0xd5
+0x69 0xec 0x3b 0xd5
+0x89 0xec 0x3b 0xd5
+0xa9 0xec 0x3b 0xd5
+0xc9 0xec 0x3b 0xd5
+0xe9 0xec 0x3b 0xd5
+0x9 0xed 0x3b 0xd5
+0x29 0xed 0x3b 0xd5
+0x49 0xed 0x3b 0xd5
+0x69 0xed 0x3b 0xd5
+0x89 0xed 0x3b 0xd5
+0xa9 0xed 0x3b 0xd5
+0xc9 0xed 0x3b 0xd5
+0xe9 0xed 0x3b 0xd5
+0x9 0xee 0x3b 0xd5
+0x29 0xee 0x3b 0xd5
+0x49 0xee 0x3b 0xd5
+0x69 0xee 0x3b 0xd5
+0x89 0xee 0x3b 0xd5
+0xa9 0xee 0x3b 0xd5
+0xc9 0xee 0x3b 0xd5
+0xe9 0xee 0x3b 0xd5
+0x9 0xef 0x3b 0xd5
+0x29 0xef 0x3b 0xd5
+0x49 0xef 0x3b 0xd5
+0x69 0xef 0x3b 0xd5
+0x89 0xef 0x3b 0xd5
+0xa9 0xef 0x3b 0xd5
+0xc9 0xef 0x3b 0xd5
+
+# CHECK: mrs x12, s3_7_c15_c1_5
+# CHECK: mrs x13, s3_2_c11_c15_7
+# CHECK: msr s3_0_c15_c0_0, x12
+# CHECK: msr s3_7_c11_c13_7, x5
+0xac 0xf1 0x3f 0xd5
+0xed 0xbf 0x3a 0xd5
+0x0c 0xf0 0x18 0xd5
+0xe5 0xbd 0x1f 0xd5
+
+#------------------------------------------------------------------------------
+# Test and branch (immediate)
+#------------------------------------------------------------------------------
+
+# CHECK: tbz x12, #62, #0
+# CHECK: tbz x12, #62, #4
+# CHECK: tbz x12, #62, #-32768
+# CHECK: tbnz x12, #60, #32764
+0x0c 0x00 0xf0 0xb6
+0x2c 0x00 0xf0 0xb6
+0x0c 0x00 0xf4 0xb6
+0xec 0xff 0xe3 0xb7
+
+#------------------------------------------------------------------------------
+# Unconditional branch (immediate)
+#------------------------------------------------------------------------------
+
+# CHECK: b #4
+# CHECK: b #-4
+# CHECK: b #134217724
+0x01 0x00 0x00 0x14
+0xff 0xff 0xff 0x17
+0xff 0xff 0xff 0x15
+
+#------------------------------------------------------------------------------
+# Unconditional branch (register)
+#------------------------------------------------------------------------------
+
+# CHECK: br x20
+# CHECK: blr xzr
+# CHECK: ret x10
+0x80 0x2 0x1f 0xd6
+0xe0 0x3 0x3f 0xd6
+0x40 0x1 0x5f 0xd6
+
+# CHECK: ret
+# CHECK: eret
+# CHECK: drps
+0xc0 0x3 0x5f 0xd6
+0xe0 0x3 0x9f 0xd6
+0xe0 0x3 0xbf 0xd6
+
diff --git a/test/MC/Disassembler/AArch64/basic-a64-undefined.txt b/test/MC/Disassembler/AArch64/basic-a64-undefined.txt
new file mode 100644
index 0000000000..a17579cb16
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/basic-a64-undefined.txt
@@ -0,0 +1,43 @@
+# These spawn another process so they're rather expensive. Not many.
+
+# Instructions notionally in the add/sub (extended register) sheet, but with
+# invalid shift amount or "opt" field.
+# RUN: echo "0x00 0x10 0xa0 0x0b" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x00 0x10 0x60 0x0b" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x00 0x14 0x20 0x0b" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+
+# Instructions notionally in the add/sub (immediate) sheet, but with
+# invalid "shift" field.
+# RUN: echo "0xdf 0x3 0x80 0x91" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0xed 0x8e 0xc4 0x31" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x62 0xfc 0xbf 0x11" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x3 0xff 0xff 0x91" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+
+# Instructions notionally in the load/store (unsigned immediate) sheet.
+# Only unallocated (int-register) variants are: opc=0b11, size=0b10, 0b11
+# RUN: echo "0xd7 0xfc 0xff 0xb9" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0xd7 0xfc 0xcf 0xf9" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+
+# Instructions notionally in the floating-point <-> fixed-point conversion
+# Scale field is 64-<imm> and <imm> should be 1-32 for a 32-bit int register.
+# RUN: echo "0x23 0x01 0x18 0x1e" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x23 0x25 0x42 0x1e" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+
+# Instructions notionally in the logical (shifted register) sheet, but with out
+# of range shift: w-registers can only have 0-31.
+# RUN: echo "0x00 0x80 0x00 0x0a" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+
+# Instructions notionally in the move wide (immediate) sheet, but with out
+# of range shift: w-registers can only have 0 or 16.
+# RUN: echo "0x00 0x00 0xc0 0x12" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x12 0x34 0xe0 0x52" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+
+# Data-processing instructions are undefined when S=1 and for the 0b0000111 value in opcode:sf
+# RUN: echo "0x00 0x00 0xc0 0x5f" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x56 0x0c 0xc0 0x5a" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+
+# Data-processing instructions (2 source) are undefined for a value of 0001xx:0:x or 0011xx:0:x for opcode:S:sf
+# RUN: echo "0x00 0x30 0xc1 0x1a" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x00 0x10 0xc1 0x1a" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+
+# CHECK: invalid instruction encoding
diff --git a/test/MC/Disassembler/AArch64/basic-a64-unpredictable.txt b/test/MC/Disassembler/AArch64/basic-a64-unpredictable.txt
new file mode 100644
index 0000000000..adb8f75ed9
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/basic-a64-unpredictable.txt
@@ -0,0 +1,96 @@
+# RUN: llvm-mc -triple=aarch64 -disassemble < %s 2>&1 | FileCheck %s
+
+#------------------------------------------------------------------------------
+# Load-store exclusive
+#------------------------------------------------------------------------------
+
+#ldxp x14, x14, [sp]
+0xee 0x3b 0x7f 0xc8
+#CHECK: warning: potentially undefined instruction encoding
+#CHECK-NEXT: 0xee 0x3b 0x7f 0xc8
+
+#ldaxp w19, w19, [x1]
+0x33 0xcc 0x7f 0x88
+#CHECK: warning: potentially undefined instruction encoding
+#CHECK-NEXT: 0x33 0xcc 0x7f 0x88
+
+#------------------------------------------------------------------------------
+# Load-store register (immediate post-indexed)
+#------------------------------------------------------------------------------
+
+0x63 0x44 0x40 0xf8
+#CHECK: warning: potentially undefined instruction encoding
+#CHECK-NEXT: 0x63 0x44 0x40 0xf8
+
+0x42 0x14 0xc0 0x38
+#CHECK: warning: potentially undefined instruction encoding
+#CHECK-NEXT: 0x42 0x14 0xc0 0x38
+
+#------------------------------------------------------------------------------
+# Load-store register (immediate pre-indexed)
+#------------------------------------------------------------------------------
+
+0x63 0x4c 0x40 0xf8
+#CHECK: warning: potentially undefined instruction encoding
+#CHECK-NEXT: 0x63 0x4c 0x40 0xf8
+
+0x42 0x1c 0xc0 0x38
+#CHECK: warning: potentially undefined instruction encoding
+#CHECK-NEXT: 0x42 0x1c 0xc0 0x38
+
+#------------------------------------------------------------------------------
+# Load-store register pair (offset)
+#------------------------------------------------------------------------------
+
+# Unpredictable if Rt == Rt2 on a load.
+
+0xe3 0x0f 0x40 0xa9
+# CHECK: warning: potentially undefined instruction encoding
+# CHECK-NEXT: 0xe3 0x0f 0x40 0xa9
+# CHECK-NEXT: ^
+
+0xe2 0x8b 0x41 0x69
+# CHECK: warning: potentially undefined instruction encoding
+# CHECK-NEXT: 0xe2 0x8b 0x41 0x69
+# CHECK-NEXT: ^
+
+0x82 0x88 0x40 0x2d
+# CHECK: warning: potentially undefined instruction encoding
+# CHECK-NEXT: 0x82 0x88 0x40 0x2d
+# CHECK-NEXT: ^
+
+#------------------------------------------------------------------------------
+# Load-store register pair (post-indexed)
+#------------------------------------------------------------------------------
+
+# Unpredictable if Rt == Rt2 on a load.
+
+0xe3 0x0f 0xc0 0xa8
+# CHECK: warning: potentially undefined instruction encoding
+# CHECK-NEXT: 0xe3 0x0f 0xc0 0xa8
+# CHECK-NEXT: ^
+
+0xe2 0x8b 0xc1 0x68
+# CHECK: warning: potentially undefined instruction encoding
+# CHECK-NEXT: 0xe2 0x8b 0xc1 0x68
+# CHECK-NEXT: ^
+
+0x82 0x88 0xc0 0x2c
+# CHECK: warning: potentially undefined instruction encoding
+# CHECK-NEXT: 0x82 0x88 0xc0 0x2c
+# CHECK-NEXT: ^
+
+# Also unpredictable if writeback clashes with either transfer register
+
+0x63 0x94 0xc0 0xa8
+# CHECK: warning: potentially undefined instruction encoding
+# CHECK-NEXT: 0x63 0x94 0xc0 0xa8
+
+0x69 0x2d 0x81 0xa8
+# CHECK: warning: potentially undefined instruction encoding
+# CHECK-NEXT: 0x69 0x2d 0x81 0xa8
+
+0x29 0xad 0xc0 0x28
+# CHECK: warning: potentially undefined instruction encoding
+# CHECK-NEXT: 0x29 0xad 0xc0 0x28
+
diff --git a/test/MC/Disassembler/AArch64/ldp-offset-predictable.txt b/test/MC/Disassembler/AArch64/ldp-offset-predictable.txt
new file mode 100644
index 0000000000..7ff495f499
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/ldp-offset-predictable.txt
@@ -0,0 +1,7 @@
+# RUN: llvm-mc -triple=aarch64 -disassemble < %s 2>&1 | FileCheck %s
+
+# Stores are OK.
+0xe0 0x83 0x00 0xa9
+# CHECK-NOT: potentially undefined instruction encoding
+# CHECK: stp x0, x0, [sp, #8]
+
diff --git a/test/MC/Disassembler/AArch64/ldp-postind.predictable.txt b/test/MC/Disassembler/AArch64/ldp-postind.predictable.txt
new file mode 100644
index 0000000000..775660bba8
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/ldp-postind.predictable.txt
@@ -0,0 +1,17 @@
+# RUN: llvm-mc -triple=aarch64 -disassemble < %s 2>&1 | FileCheck %s
+
+# None of these instructions should be classified as unpredictable:
+
+# CHECK-NOT: potentially undefined instruction encoding
+
+# Stores from duplicated registers should be fine.
+0xe3 0x0f 0x80 0xa8
+# CHECK: stp x3, x3, [sp], #0
+
+# d5 != x5 so "ldp d5, d6, [x5], #24" is fine.
+0xa5 0x98 0xc1 0x6c
+# CHECK: ldp d5, d6, [x5], #24
+
+# xzr != sp so "stp xzr, xzr, [sp], #8" is fine.
+0xff 0xff 0x80 0xa8
+# CHECK: stp xzr, xzr, [sp], #8
diff --git a/test/MC/Disassembler/AArch64/ldp-preind.predictable.txt b/test/MC/Disassembler/AArch64/ldp-preind.predictable.txt
new file mode 100644
index 0000000000..48ea8170ba
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/ldp-preind.predictable.txt
@@ -0,0 +1,17 @@
+# RUN: llvm-mc -triple=aarch64 -disassemble < %s 2>&1 | FileCheck %s
+
+# None of these instructions should be classified as unpredictable:
+
+# CHECK-NOT: potentially undefined instruction encoding
+
+# Stores from duplicated registers should be fine.
+0xe3 0x0f 0x80 0xa9
+# CHECK: stp x3, x3, [sp, #0]!
+
+# d5 != x5 so "ldp d5, d6, [x5, #24]!" is fine.
+0xa5 0x98 0xc1 0x6d
+# CHECK: ldp d5, d6, [x5, #24]!
+
+# xzr != sp so "stp xzr, xzr, [sp, #8]!" is fine.
+0xff 0xff 0x80 0xa9
+# CHECK: stp xzr, xzr, [sp, #8]!
diff --git a/test/MC/Disassembler/AArch64/lit.local.cfg b/test/MC/Disassembler/AArch64/lit.local.cfg
new file mode 100644
index 0000000000..f9df30e4d3
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/lit.local.cfg
@@ -0,0 +1,6 @@
+config.suffixes = ['.txt']
+
+targets = set(config.root.targets_to_build.split())
+if not 'AArch64' in targets:
+ config.unsupported = True
+