diff options
Diffstat (limited to 'test')
107 files changed, 22769 insertions, 0 deletions
diff --git a/test/CodeGen/AArch64/adc.ll b/test/CodeGen/AArch64/adc.ll new file mode 100644 index 0000000000..45bf07928f --- /dev/null +++ b/test/CodeGen/AArch64/adc.ll @@ -0,0 +1,54 @@ +; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s + +define i128 @test_simple(i128 %a, i128 %b, i128 %c) { +; CHECK: test_simple: + + %valadd = add i128 %a, %b +; CHECK: adds [[ADDLO:x[0-9]+]], x0, x2 +; CHECK-NEXT: adcs [[ADDHI:x[0-9]+]], x1, x3 + + %valsub = sub i128 %valadd, %c +; CHECK: subs x0, [[ADDLO]], x4 +; CHECK: sbcs x1, [[ADDHI]], x5 + + ret i128 %valsub +; CHECK: ret +} + +define i128 @test_imm(i128 %a) { +; CHECK: test_imm: + + %val = add i128 %a, 12 +; CHECK: adds x0, x0, #12 +; CHECK: adcs x1, x1, {{x[0-9]|xzr}} + + ret i128 %val +; CHECK: ret +} + +define i128 @test_shifted(i128 %a, i128 %b) { +; CHECK: test_shifted: + + %rhs = shl i128 %b, 45 + + %val = add i128 %a, %rhs +; CHECK: adds x0, x0, x2, lsl #45 +; CHECK: adcs x1, x1, {{x[0-9]}} + + ret i128 %val +; CHECK: ret +} + +define i128 @test_extended(i128 %a, i16 %b) { +; CHECK: test_extended: + + %ext = sext i16 %b to i128 + %rhs = shl i128 %ext, 3 + + %val = add i128 %a, %rhs +; CHECK: adds x0, x0, w2, sxth #3 +; CHECK: adcs x1, x1, {{x[0-9]}} + + ret i128 %val +; CHECK: ret +} diff --git a/test/CodeGen/AArch64/addsub-shifted.ll b/test/CodeGen/AArch64/addsub-shifted.ll new file mode 100644 index 0000000000..ed8ef0d59a --- /dev/null +++ b/test/CodeGen/AArch64/addsub-shifted.ll @@ -0,0 +1,295 @@ +; RUN: llc -verify-machineinstrs < %s -march=aarch64 | FileCheck %s + +@var32 = global i32 0 +@var64 = global i64 0 + +define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { +; CHECK: test_lsl_arith: + + %rhs1 = load volatile i32* @var32 + %shift1 = shl i32 %rhs1, 18 + %val1 = add i32 %lhs32, %shift1 + store volatile i32 %val1, i32* @var32 +; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18 + + %rhs2 = load volatile i32* @var32 + %shift2 = shl i32 %rhs2, 31 + %val2 = add i32 %shift2, %lhs32 + store volatile i32 %val2, i32* @var32 +; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31 + + %rhs3 = load volatile i32* @var32 + %shift3 = shl i32 %rhs3, 5 + %val3 = sub i32 %lhs32, %shift3 + store volatile i32 %val3, i32* @var32 +; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5 + +; Subtraction is not commutative! + %rhs4 = load volatile i32* @var32 + %shift4 = shl i32 %rhs4, 19 + %val4 = sub i32 %shift4, %lhs32 + store volatile i32 %val4, i32* @var32 +; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19 + + %lhs4a = load volatile i32* @var32 + %shift4a = shl i32 %lhs4a, 15 + %val4a = sub i32 0, %shift4a + store volatile i32 %val4a, i32* @var32 +; CHECK: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsl #15 + + %rhs5 = load volatile i64* @var64 + %shift5 = shl i64 %rhs5, 18 + %val5 = add i64 %lhs64, %shift5 + store volatile i64 %val5, i64* @var64 +; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18 + + %rhs6 = load volatile i64* @var64 + %shift6 = shl i64 %rhs6, 31 + %val6 = add i64 %shift6, %lhs64 + store volatile i64 %val6, i64* @var64 +; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31 + + %rhs7 = load volatile i64* @var64 + %shift7 = shl i64 %rhs7, 5 + %val7 = sub i64 %lhs64, %shift7 + store volatile i64 %val7, i64* @var64 +; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5 + +; Subtraction is not commutative! + %rhs8 = load volatile i64* @var64 + %shift8 = shl i64 %rhs8, 19 + %val8 = sub i64 %shift8, %lhs64 + store volatile i64 %val8, i64* @var64 +; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19 + + %lhs8a = load volatile i64* @var64 + %shift8a = shl i64 %lhs8a, 60 + %val8a = sub i64 0, %shift8a + store volatile i64 %val8a, i64* @var64 +; CHECK: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsl #60 + + ret void +; CHECK: ret +} + +define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { +; CHECK: test_lsr_arith: + + %shift1 = lshr i32 %rhs32, 18 + %val1 = add i32 %lhs32, %shift1 + store volatile i32 %val1, i32* @var32 +; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18 + + %shift2 = lshr i32 %rhs32, 31 + %val2 = add i32 %shift2, %lhs32 + store volatile i32 %val2, i32* @var32 +; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31 + + %shift3 = lshr i32 %rhs32, 5 + %val3 = sub i32 %lhs32, %shift3 + store volatile i32 %val3, i32* @var32 +; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5 + +; Subtraction is not commutative! + %shift4 = lshr i32 %rhs32, 19 + %val4 = sub i32 %shift4, %lhs32 + store volatile i32 %val4, i32* @var32 +; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19 + + %shift4a = lshr i32 %lhs32, 15 + %val4a = sub i32 0, %shift4a + store volatile i32 %val4a, i32* @var32 +; CHECK: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsr #15 + + %shift5 = lshr i64 %rhs64, 18 + %val5 = add i64 %lhs64, %shift5 + store volatile i64 %val5, i64* @var64 +; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18 + + %shift6 = lshr i64 %rhs64, 31 + %val6 = add i64 %shift6, %lhs64 + store volatile i64 %val6, i64* @var64 +; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31 + + %shift7 = lshr i64 %rhs64, 5 + %val7 = sub i64 %lhs64, %shift7 + store volatile i64 %val7, i64* @var64 +; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5 + +; Subtraction is not commutative! + %shift8 = lshr i64 %rhs64, 19 + %val8 = sub i64 %shift8, %lhs64 + store volatile i64 %val8, i64* @var64 +; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19 + + %shift8a = lshr i64 %lhs64, 45 + %val8a = sub i64 0, %shift8a + store volatile i64 %val8a, i64* @var64 +; CHECK: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsr #45 + + ret void +; CHECK: ret +} + +define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { +; CHECK: test_asr_arith: + + %shift1 = ashr i32 %rhs32, 18 + %val1 = add i32 %lhs32, %shift1 + store volatile i32 %val1, i32* @var32 +; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18 + + %shift2 = ashr i32 %rhs32, 31 + %val2 = add i32 %shift2, %lhs32 + store volatile i32 %val2, i32* @var32 +; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31 + + %shift3 = ashr i32 %rhs32, 5 + %val3 = sub i32 %lhs32, %shift3 + store volatile i32 %val3, i32* @var32 +; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5 + +; Subtraction is not commutative! + %shift4 = ashr i32 %rhs32, 19 + %val4 = sub i32 %shift4, %lhs32 + store volatile i32 %val4, i32* @var32 |