diff options
| author | Derek Schuff <dschuff@chromium.org> | 2013-01-30 11:34:40 -0800 |
|---|---|---|
| committer | Derek Schuff <dschuff@chromium.org> | 2013-01-30 11:34:40 -0800 |
| commit | 1843e19bce9b11fc840858e136c6c52cf8b42e0b (patch) | |
| tree | e8bfc928152e2d3b3dd120d141d13dc08a9b49e4 /test/CodeGen | |
| parent | aa0fa8a8df25807f784ec9ca9deeb40328636595 (diff) | |
| parent | a662a9862501fc86904e90054f7c1519101d9126 (diff) | |
Merge commit 'a662a9862501fc86904e90054f7c1519101d9126'
Conflicts:
include/llvm/CodeGen/IntrinsicLowering.h
include/llvm/MC/MCAssembler.h
include/llvm/MC/MCObjectStreamer.h
lib/LLVMBuild.txt
lib/Linker/LinkArchives.cpp
lib/MC/MCAssembler.cpp
lib/MC/MCELFStreamer.cpp
lib/MC/MCParser/AsmParser.cpp
lib/MC/MCPureStreamer.cpp
lib/MC/WinCOFFStreamer.cpp
lib/Makefile
lib/Support/Unix/Memory.inc
lib/Support/Unix/Process.inc
lib/Support/Unix/Program.inc
lib/Target/ARM/ARM.h
lib/Target/ARM/ARMFastISel.cpp
lib/Target/ARM/ARMISelLowering.cpp
lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
lib/Target/Mips/MipsInstrFPU.td
lib/Target/X86/CMakeLists.txt
lib/Target/X86/X86ISelLowering.cpp
lib/Target/X86/X86TargetMachine.cpp
lib/Target/X86/X86TargetObjectFile.cpp
lib/Transforms/InstCombine/InstCombineCalls.cpp
test/CodeGen/X86/fast-isel-x86-64.ll
tools/llc/llc.cpp
tools/lto/LTOModule.cpp
utils/TableGen/EDEmitter.cpp
Diffstat (limited to 'test/CodeGen')
123 files changed, 6219 insertions, 294 deletions
diff --git a/test/CodeGen/ARM/2010-11-30-reloc-movt.ll b/test/CodeGen/ARM/2010-11-30-reloc-movt.ll index 8b164c5d91..94a05412f5 100644 --- a/test/CodeGen/ARM/2010-11-30-reloc-movt.ll +++ b/test/CodeGen/ARM/2010-11-30-reloc-movt.ll @@ -23,7 +23,7 @@ entry: ; OBJ: Relocation 0 ; OBJ-NEXT: 'r_offset', 0x00000004 -; OBJ-NEXT: 'r_sym', 0x000007 +; OBJ-NEXT: 'r_sym', 0x000009 ; OBJ-NEXT: 'r_type', 0x2b ; OBJ: Relocation 1 @@ -33,7 +33,7 @@ entry: ; OBJ: # Relocation 2 ; OBJ-NEXT: 'r_offset', 0x0000000c -; OBJ-NEXT: 'r_sym', 0x000008 +; OBJ-NEXT: 'r_sym', 0x00000a ; OBJ-NEXT: 'r_type', 0x1c } diff --git a/test/CodeGen/ARM/alloc-no-stack-realign-error.ll b/test/CodeGen/ARM/alloc-no-stack-realign-error.ll new file mode 100644 index 0000000000..96c00174db --- /dev/null +++ b/test/CodeGen/ARM/alloc-no-stack-realign-error.ll @@ -0,0 +1,19 @@ +; RUN: llc < %s -mtriple=armv7-apple-ios -O0 -realign-stack=0 2>&1 | FileCheck %s + +; rdar://12713765 +@T3_retval = common global <16 x float> zeroinitializer, align 16 + +; If alignment for alloc is smaller than or equal to stack alignment, but the +; preferred type alignment is bigger, the alignment will be clamped. +; If alignment for alloca is bigger than stack alignment, the compiler +; will emit an error. +define void @test(<16 x float>* noalias sret %agg.result) nounwind ssp { +entry: +; CHECK: Requested Minimal Alignment exceeds the Stack Alignment! + %retval = alloca <16 x float>, align 16 + %0 = load <16 x float>* @T3_retval, align 16 + store <16 x float> %0, <16 x float>* %retval + %1 = load <16 x float>* %retval + store <16 x float> %1, <16 x float>* %agg.result, align 16 + ret void +} diff --git a/test/CodeGen/ARM/alloc-no-stack-realign.ll b/test/CodeGen/ARM/alloc-no-stack-realign.ll index 273041dee3..94adc9c67d 100644 --- a/test/CodeGen/ARM/alloc-no-stack-realign.ll +++ b/test/CodeGen/ARM/alloc-no-stack-realign.ll @@ -39,7 +39,7 @@ entry: ; NO-REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #16 ; NO-REALIGN: vst1.64 ; NO-REALIGN: vst1.64 - %retval = alloca <16 x float>, align 16 + %retval = alloca <16 x float>, align 4 %0 = load <16 x float>* @T3_retval, align 16 store <16 x float> %0, <16 x float>* %retval %1 = load <16 x float>* %retval diff --git a/test/CodeGen/ARM/avoid-cpsr-rmw.ll b/test/CodeGen/ARM/avoid-cpsr-rmw.ll index 96e83dd88e..d98925ef8f 100644 --- a/test/CodeGen/ARM/avoid-cpsr-rmw.ll +++ b/test/CodeGen/ARM/avoid-cpsr-rmw.ll @@ -49,3 +49,37 @@ while.body: while.end: ret void } + +; Allow partial CPSR dependency when code size is the priority. +; rdar://12878928 +define void @t3(i32* nocapture %ptr1, i32* %ptr2, i32 %c) nounwind minsize { +entry: +; CHECK: t3: + %tobool7 = icmp eq i32* %ptr2, null + br i1 %tobool7, label %while.end, label %while.body + +while.body: +; CHECK: while.body +; CHECK: mul r{{[0-9]+}} +; CHECK: muls + %ptr1.addr.09 = phi i32* [ %add.ptr, %while.body ], [ %ptr1, %entry ] + %ptr2.addr.08 = phi i32* [ %incdec.ptr, %while.body ], [ %ptr2, %entry ] + %0 = load i32* %ptr1.addr.09, align 4 + %arrayidx1 = getelementptr inbounds i32* %ptr1.addr.09, i32 1 + %1 = load i32* %arrayidx1, align 4 + %arrayidx3 = getelementptr inbounds i32* %ptr1.addr.09, i32 2 + %2 = load i32* %arrayidx3, align 4 + %arrayidx4 = getelementptr inbounds i32* %ptr1.addr.09, i32 3 + %3 = load i32* %arrayidx4, align 4 + %add.ptr = getelementptr inbounds i32* %ptr1.addr.09, i32 4 + %mul = mul i32 %1, %0 + %mul5 = mul i32 %mul, %2 + %mul6 = mul i32 %mul5, %3 + store i32 %mul6, i32* %ptr2.addr.08, align 4 + %incdec.ptr = getelementptr inbounds i32* %ptr2.addr.08, i32 -1 + %tobool = icmp eq i32* %incdec.ptr, null + br i1 %tobool, label %while.end, label %while.body + +while.end: + ret void +} diff --git a/test/CodeGen/ARM/bfx.ll b/test/CodeGen/ARM/bfx.ll index 519c1353a3..394da9e157 100644 --- a/test/CodeGen/ARM/bfx.ll +++ b/test/CodeGen/ARM/bfx.ll @@ -26,3 +26,28 @@ define i32 @ubfx2(i32 %a) { ret i32 %t2 } +; rdar://12870177 +define i32 @ubfx_opt(i32* nocapture %ctx, i32 %x) nounwind readonly ssp { +entry: +; CHECK: ubfx_opt +; CHECK: lsr [[REG1:(lr|r[0-9]+)]], r1, #24 +; CHECK: ldr {{lr|r[0-9]+}}, [r0, [[REG1]], lsl #2] +; CHECK: ubfx [[REG2:(lr|r[0-9]+)]], r1, #16, #8 +; CHECK: ldr {{lr|r[0-9]+}}, [r0, [[REG2]], lsl #2] +; CHECK: ubfx [[REG3:(lr|r[0-9]+)]], r1, #8, #8 +; CHECK: ldr {{lr|r[0-9]+}}, [r0, [[REG3]], lsl #2] + %and = lshr i32 %x, 8 + %shr = and i32 %and, 255 + %and1 = lshr i32 %x, 16 + %shr2 = and i32 %and1, 255 + %shr4 = lshr i32 %x, 24 + %arrayidx = getelementptr inbounds i32* %ctx, i32 %shr4 + %0 = load i32* %arrayidx, align 4 + %arrayidx5 = getelementptr inbounds i32* %ctx, i32 %shr2 + %1 = load i32* %arrayidx5, align 4 + %add = add i32 %1, %0 + %arrayidx6 = getelementptr inbounds i32* %ctx, i32 %shr + %2 = load i32* %arrayidx6, align 4 + %add7 = add i32 %add, %2 + ret i32 %add7 +} diff --git a/test/CodeGen/ARM/global-merge-addrspace.ll b/test/CodeGen/ARM/global-merge-addrspace.ll new file mode 100644 index 0000000000..0efa690bde --- /dev/null +++ b/test/CodeGen/ARM/global-merge-addrspace.ll @@ -0,0 +1,12 @@ +; RUN: llc < %s -mtriple=thumb-apple-darwin | FileCheck %s +; Test the GlobalMerge pass. Check that the pass does not crash when using +; multiple address spaces. + +; CHECK: _MergedGlobals: +@g1 = internal addrspace(1) global i32 1 +@g2 = internal addrspace(1) global i32 2 + + +; CHECK: _MergedGlobals1: +@g3 = internal addrspace(2) global i32 3 +@g4 = internal addrspace(2) global i32 4 diff --git a/test/CodeGen/Generic/dag-combine-crash.ll b/test/CodeGen/Generic/dag-combine-crash.ll new file mode 100644 index 0000000000..a7810b5c05 --- /dev/null +++ b/test/CodeGen/Generic/dag-combine-crash.ll @@ -0,0 +1,21 @@ +; RUN: llc < %s + +define void @main() { +if.end: + br label %block.i.i + +block.i.i: + %tmpbb = load i8* undef + %tmp54 = zext i8 %tmpbb to i64 + %tmp59 = and i64 %tmp54, 8 + %tmp60 = add i64 %tmp59, 3691045929300498764 + %tmp62 = sub i64 %tmp60, 3456506383779105993 + %tmp63 = xor i64 1050774804270620004, %tmp62 + %tmp65 = xor i64 %tmp62, 234539545521392771 + %tmp67 = or i64 %tmp65, %tmp63 + %tmp71 = xor i64 %tmp67, 6781485823212740913 + %tmp72 = trunc i64 %tmp71 to i32 + %tmp74 = lshr i32 2, %tmp72 + store i32 %tmp74, i32* undef + br label %block.i.i +} diff --git a/test/CodeGen/Generic/inline-asm-mem-clobber.ll b/test/CodeGen/Generic/inline-asm-mem-clobber.ll new file mode 100644 index 0000000000..e523d031dc --- /dev/null +++ b/test/CodeGen/Generic/inline-asm-mem-clobber.ll @@ -0,0 +1,21 @@ +; RUN: llc -O2 < %s | FileCheck %s + +@G = common global i32 0, align 4 + +define i32 @foo(i8* %p) nounwind uwtable { +entry: + %p.addr = alloca i8*, align 8 + %rv = alloca i32, align 4 + store i8* %p, i8** %p.addr, align 8 + store i32 0, i32* @G, align 4 + %0 = load i8** %p.addr, align 8 +; CHECK: blah + %1 = call i32 asm "blah", "=r,r,~{memory}"(i8* %0) nounwind +; CHECK: @G + store i32 %1, i32* %rv, align 4 + %2 = load i32* %rv, align 4 + %3 = load i32* @G, align 4 + %add = add nsw i32 %2, %3 + ret i32 %add +} + diff --git a/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll b/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll new file mode 100644 index 0000000000..9d4daee696 --- /dev/null +++ b/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll @@ -0,0 +1,11 @@ +; RUN: llc -march=mips64el -mcpu=mips64r2 < %s + +@.str = private unnamed_addr constant [7 x i8] c"hello\0A\00", align 1 + +define void @t(i8* %ptr) { +entry: + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %ptr, i8* getelementptr inbounds ([7 x i8]* @.str, i64 0, i64 0), i64 7, i32 1, i1 false) + ret void +} + +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind diff --git a/test/CodeGen/Mips/alloca.ll b/test/CodeGen/Mips/alloca.ll index 29f43c8afa..220f33bd45 100644 --- a/test/CodeGen/Mips/alloca.ll +++ b/test/CodeGen/Mips/alloca.ll @@ -3,11 +3,11 @@ define i32 @twoalloca(i32 %size) nounwind { entry: ; CHECK: subu $[[T0:[0-9]+]], $sp, $[[SZ:[0-9]+]] -; CHECK: addu $sp, $zero, $[[T0]] +; CHECK: or $sp, $[[T0]], $zero ; CHECK: subu $[[T2:[0-9]+]], $sp, $[[SZ]] -; CHECK: addu $sp, $zero, $[[T2]] -; CHECK: addu $4, $zero, $[[T0]] -; CHECK: addu $4, $zero, $[[T2]] +; CHECK: or $sp, $[[T2]], $zero +; CHECK: or $4, $[[T0]], $zero +; CHECK: or $4, $[[T2]], $zero %tmp1 = alloca i8, i32 %size, align 4 %add.ptr = getelementptr inbounds i8* %tmp1, i32 5 store i8 97, i8* %add.ptr, align 1 @@ -29,7 +29,7 @@ define i32 @alloca2(i32 %size) nounwind { entry: ; CHECK: alloca2 ; CHECK: subu $[[T0:[0-9]+]], $sp -; CHECK: addu $sp, $zero, $[[T0]] +; CHECK: or $sp, $[[T0]], $zero %tmp1 = alloca i8, i32 %size, align 4 %0 = bitcast i8* %tmp1 to i32* diff --git a/test/CodeGen/Mips/alloca16.ll b/test/CodeGen/Mips/alloca16.ll index 731edae43c..5ae9a84791 100644 --- a/test/CodeGen/Mips/alloca16.ll +++ b/test/CodeGen/Mips/alloca16.ll @@ -68,8 +68,8 @@ entry: %21 = load i32** %ip, align 4 %arrayidx6 = getelementptr inbounds i32* %21, i32 %20 %22 = load i32* %arrayidx6, align 4 -; 16: save 16 +; 16: addiu $sp, -16 call void @temp(i32 %22) -; 16: restore 16 +; 16: addiu $sp, 16 ret void } diff --git a/test/CodeGen/Mips/ex2.ll b/test/CodeGen/Mips/ex2.ll new file mode 100644 index 0000000000..67d19e4b84 --- /dev/null +++ b/test/CodeGen/Mips/ex2.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@.str = private unnamed_addr constant [6 x i8] c"hello\00", align 1 +@_ZTIPKc = external constant i8* + +define i32 @main() { +; 16: main: +; 16: .cfi_startproc +; 16: save $ra, $s0, $s1, 32 +; 16: .cfi_offset 17, -8 +; 16: .cfi_offset 16, -12 +; 16: .cfi_offset 31, -4 +entry: + %retval = alloca i32, align 4 + store i32 0, i32* %retval + %exception = call i8* @__cxa_allocate_exception(i32 4) nounwind + %0 = bitcast i8* %exception to i8** + store i8* getelementptr inbounds ([6 x i8]* @.str, i32 0, i32 0), i8** %0 + call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIPKc to i8*), i8* null) noreturn + unreachable + +return: ; No predecessors! + %1 = load i32* %retval + ret i32 %1 +} + +declare i8* @__cxa_allocate_exception(i32) + +declare void @__cxa_throw(i8*, i8*, i8*) diff --git a/test/CodeGen/Mips/frame-address.ll b/test/CodeGen/Mips/frame-address.ll index 9df1808fde..e64e6d8cfe 100644 --- a/test/CodeGen/Mips/frame-address.ll +++ b/test/CodeGen/Mips/frame-address.ll @@ -8,5 +8,5 @@ entry: ret i8* %0 ; CHECK: addu $fp, $sp, $zero -; CHECK: addu $2, $zero, $fp +; CHECK: or $2, $fp, $zero } diff --git a/test/CodeGen/Mips/gpreg-lazy-binding.ll b/test/CodeGen/Mips/gpreg-lazy-binding.ll new file mode 100644 index 0000000000..bb3ad4264e --- /dev/null +++ b/test/CodeGen/Mips/gpreg-lazy-binding.ll @@ -0,0 +1,27 @@ +; RUN: llc -march=mipsel -disable-mips-delay-filler < %s | FileCheck %s + +@g = external global i32 + +; CHECK: or $gp +; CHECK: jalr $25 +; CHECK: nop +; CHECK-NOT: or $gp +; CHECK: jalr $25 + +define void @f0() nounwind { +entry: + tail call void @externalFunc() nounwind + tail call fastcc void @internalFunc() + ret void +} + +declare void @externalFunc() + +define internal fastcc void @internalFunc() nounwind noinline { +entry: + %0 = load i32* @g, align 4 + %inc = add nsw i32 %0, 1 + store i32 %inc, i32* @g, align 4 + ret void +} + diff --git a/test/CodeGen/Mips/i64arg.ll b/test/CodeGen/Mips/i64arg.ll index 8b1f71b69f..e16e126af4 100644 --- a/test/CodeGen/Mips/i64arg.ll +++ b/test/CodeGen/Mips/i64arg.ll @@ -2,8 +2,8 @@ define void @f1(i64 %ll1, float %f, i64 %ll, i32 %i, float %f2) nounwind { entry: -; CHECK: addu $[[R1:[0-9]+]], $zero, $5 -; CHECK: addu $[[R0:[0-9]+]], $zero, $4 +; CHECK: or $[[R1:[0-9]+]], $5, $zero +; CHECK: or $[[R0:[0-9]+]], $4, $zero ; CHECK: ori $6, ${{[0-9]+}}, 3855 ; CHECK: ori $7, ${{[0-9]+}}, 22136 ; CHECK: lw $25, %call16(ff1) @@ -12,16 +12,16 @@ entry: ; CHECK: lw $25, %call16(ff2) ; CHECK: lw $[[R2:[0-9]+]], 80($sp) ; CHECK: lw $[[R3:[0-9]+]], 84($sp) -; CHECK: addu $4, $zero, $[[R2]] -; CHECK: addu $5, $zero, $[[R3]] +; CHECK: or $4, $[[R2]], $zero +; CHECK: or $5, $[[R3]], $zero ; CHECK: jalr $25 tail call void @ff2(i64 %ll, double 3.000000e+00) nounwind %sub = add nsw i32 %i, -1 ; CHECK: sw $[[R1]], 28($sp) ; CHECK: sw $[[R0]], 24($sp) ; CHECK: lw $25, %call16(ff3) -; CHECK: addu $6, $zero, $[[R2]] -; CHECK: addu $7, $zero, $[[R3]] +; CHECK: or $6, $[[R2]], $zero +; CHECK: or $7, $[[R3]], $zero ; CHECK: jalr $25 tail call void @ff3(i32 %i, i64 %ll, i32 %sub, i64 %ll1) nounwind ret void diff --git a/test/CodeGen/Mips/mips16ex.ll b/test/CodeGen/Mips/mips16ex.ll new file mode 100644 index 0000000000..ecb30b5c63 --- /dev/null +++ b/test/CodeGen/Mips/mips16ex.ll @@ -0,0 +1,87 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +;16: $eh_func_begin0=. +@.str = private unnamed_addr constant [7 x i8] c"hello\0A\00", align 1 +@_ZTIi = external constant i8* +@.str1 = private unnamed_addr constant [15 x i8] c"exception %i \0A\00", align 1 + +define i32 @main() { +entry: + %retval = alloca i32, align 4 + %exn.slot = alloca i8* + %ehselector.slot = alloca i32 + %e = alloca i32, align 4 + store i32 0, i32* %retval + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0)) + %exception = call i8* @__cxa_allocate_exception(i32 4) nounwind + %0 = bitcast i8* %exception to i32* + store i32 20, i32* %0 + invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) noreturn + to label %unreachable unwind label %lpad + +lpad: ; preds = %entry + %1 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + catch i8* bitcast (i8** @_ZTIi to i8*) + %2 = extractvalue { i8*, i32 } %1, 0 + store i8* %2, i8** %exn.slot + %3 = extractvalue { i8*, i32 } %1, 1 + store i32 %3, i32* %ehselector.slot + br label %catch.dispatch + +catch.dispatch: ; preds = %lpad + %sel = load i32* %ehselector.slot + %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) nounwind + %matches = icmp eq i32 %sel, %4 + br i1 %matches, label %catch, label %eh.resume + +catch: ; preds = %catch.dispatch + %exn = load i8** %exn.slot + %5 = call i8* @__cxa_begin_catch(i8* %exn) nounwind + %6 = bitcast i8* %5 to i32* + %exn.scalar = load i32* %6 + store i32 %exn.scalar, i32* %e, align 4 + %7 = load i32* %e, align 4 + %call2 = invoke i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([15 x i8]* @.str1, i32 0, i32 0), i32 %7) + to label %invoke.cont unwind label %lpad1 + +invoke.cont: ; preds = %catch + call void @__cxa_end_catch() nounwind + br label %try.cont + +try.cont: ; preds = %invoke.cont + ret i32 0 + +lpad1: ; preds = %catch + %8 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + cleanup + %9 = extractvalue { i8*, i32 } %8, 0 + store i8* %9, i8** %exn.slot + %10 = extractvalue { i8*, i32 } %8, 1 + store i32 %10, i32* %ehselector.slot + call void @__cxa_end_catch() nounwind + br label %eh.resume + +eh.resume: ; preds = %lpad1, %catch.dispatch + %exn3 = load i8** %exn.slot + %sel4 = load i32* %ehselector.slot + %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn3, 0 + %lpad.val5 = insertvalue { i8*, i32 } %lpad.val, i32 %sel4, 1 + resume { i8*, i32 } %lpad.val5 + +unreachable: ; preds = %entry + unreachable +} + +declare i32 @printf(i8*, ...) + +declare i8* @__cxa_allocate_exception(i32) + +declare i32 @__gxx_personality_v0(...) + +declare void @__cxa_throw(i8*, i8*, i8*) + +declare i32 @llvm.eh.typeid.for(i8*) nounwind readnone + +declare i8* @__cxa_begin_catch(i8*) + +declare void @__cxa_end_catch() diff --git a/test/CodeGen/Mips/mips16fpe.ll b/test/CodeGen/Mips/mips16fpe.ll new file mode 100644 index 0000000000..4335436079 --- /dev/null +++ b/test/CodeGen/Mips/mips16fpe.ll @@ -0,0 +1,381 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 -soft-float -mips16-hard-float < %s | FileCheck %s -check-prefix=16hf + +@x = global float 5.000000e+00, align 4 +@y = global float 1.500000e+01, align 4 +@xd = global double 6.000000e+00, align 8 +@yd = global double 1.800000e+01, align 8 +@two = global i32 2, align 4 +@addsf3_result = common global float 0.000000e+00, align 4 +@adddf3_result = common global double 0.000000e+00, align 8 +@subsf3_result = common global float 0.000000e+00, align 4 +@subdf3_result = common global double 0.000000e+00, align 8 +@mulsf3_result = common global float 0.000000e+00, align 4 +@muldf3_result = common global double 0.000000e+00, align 8 +@divsf3_result = common global float 0.000000e+00, align 4 +@divdf3_result = common global double 0.000000e+00, align 8 +@extendsfdf2_result = common global double 0.000000e+00, align 8 +@xd2 = global double 0x40147E6B74B4CF6A, align 8 +@truncdfsf2_result = common global float 0.000000e+00, align 4 +@fix_truncsfsi_result = common global i32 0, align 4 +@fix_truncdfsi_result = common global i32 0, align 4 +@si = global i32 -9, align 4 +@ui = global i32 9, align 4 +@floatsisf_result = common global float 0.000000e+00, align 4 +@floatsidf_result = common global double 0.000000e+00, align 8 +@floatunsisf_result = common global float 0.000000e+00, align 4 +@floatunsidf_result = common global double 0.000000e+00, align 8 +@xx = global float 5.000000e+00, align 4 +@eqsf2_result = common global i32 0, align 4 +@xxd = global double 6.000000e+00, align 8 +@eqdf2_result = common global i32 0, align 4 +@nesf2_result = common global i32 0, align 4 +@nedf2_result = common global i32 0, align 4 +@gesf2_result = common global i32 0, align 4 +@gedf2_result = common global i32 0, align 4 +@ltsf2_result = common global i32 0, align 4 +@ltdf2_result = common global i32 0, align 4 +@lesf2_result = common global i32 0, align 4 +@ledf2_result = common global i32 0, align 4 +@gtsf2_result = common global i32 0, align 4 +@gtdf2_result = common global i32 0, align 4 + +define void @test_addsf3() nounwind { +entry: +;16hf: test_addsf3: + %0 = load float* @x, align 4 + %1 = load float* @y, align 4 + %add = fadd float %0, %1 + store float %add, float* @addsf3_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_addsf3)(${{[0-9]+}}) + ret void +} + +define void @test_adddf3() nounwind { +entry: +;16hf: test_adddf3: + %0 = load double* @xd, align 8 + %1 = load double* @yd, align 8 + %add = fadd double %0, %1 + store double %add, double* @adddf3_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_adddf3)(${{[0-9]+}}) + ret void +} + +define void @test_subsf3() nounwind { +entry: +;16hf: test_subsf3: + %0 = load float* @x, align 4 + %1 = load float* @y, align 4 + %sub = fsub float %0, %1 + store float %sub, float* @subsf3_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_subsf3)(${{[0-9]+}}) + ret void +} + +define void @test_subdf3() nounwind { +entry: +;16hf: test_subdf3: + %0 = load double* @xd, align 8 + %1 = load double* @yd, align 8 + %sub = fsub double %0, %1 + store double %sub, double* @subdf3_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_subdf3)(${{[0-9]+}}) + ret void +} + +define void @test_mulsf3() nounwind { +entry: +;16hf: test_mulsf3: + %0 = load float* @x, align 4 + %1 = load float* @y, align 4 + %mul = fmul float %0, %1 + store float %mul, float* @mulsf3_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_mulsf3)(${{[0-9]+}}) + ret void +} + +define void @test_muldf3() nounwind { +entry: +;16hf: test_muldf3: + %0 = load double* @xd, align 8 + %1 = load double* @yd, align 8 + %mul = fmul double %0, %1 + store double %mul, double* @muldf3_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_muldf3)(${{[0-9]+}}) + ret void +} + +define void @test_divsf3() nounwind { +entry: +;16hf: test_divsf3: + %0 = load float* @y, align 4 + %1 = load float* @x, align 4 + %div = fdiv float %0, %1 + store float %div, float* @divsf3_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_divsf3)(${{[0-9]+}}) + ret void +} + +define void @test_divdf3() nounwind { +entry: +;16hf: test_divdf3: + %0 = load double* @yd, align 8 + %mul = fmul double %0, 2.000000e+00 + %1 = load double* @xd, align 8 + %div = fdiv double %mul, %1 + store double %div, double* @divdf3_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_divdf3)(${{[0-9]+}}) + ret void +} + +define void @test_extendsfdf2() nounwind { +entry: +;16hf: test_extendsfdf2: + %0 = load float* @x, align 4 + %conv = fpext float %0 to double + store double %conv, double* @extendsfdf2_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_extendsfdf2)(${{[0-9]+}}) + ret void +} + +define void @test_truncdfsf2() nounwind { +entry: +;16hf: test_truncdfsf2: + %0 = load double* @xd2, align 8 + %conv = fptrunc double %0 to float + store float %conv, float* @truncdfsf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_truncdfsf2)(${{[0-9]+}}) + ret void +} + +define void @test_fix_truncsfsi() nounwind { +entry: +;16hf: test_fix_truncsfsi: + %0 = load float* @x, align 4 + %conv = fptosi float %0 to i32 + store i32 %conv, i32* @fix_truncsfsi_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_fix_truncsfsi)(${{[0-9]+}}) + ret void +} + +define void @test_fix_truncdfsi() nounwind { +entry: +;16hf: test_fix_truncdfsi: + %0 = load double* @xd, align 8 + %conv = fptosi double %0 to i32 + store i32 %conv, i32* @fix_truncdfsi_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_fix_truncdfsi)(${{[0-9]+}}) + ret void +} + +define void @test_floatsisf() nounwind { +entry: +;16hf: test_floatsisf: + %0 = load i32* @si, align 4 + %conv = sitofp i32 %0 to float + store float %conv, float* @floatsisf_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatsisf)(${{[0-9]+}}) + ret void +} + +define void @test_floatsidf() nounwind { +entry: +;16hf: test_floatsidf: + %0 = load i32* @si, align 4 + %conv = sitofp i32 %0 to double + store double %conv, double* @floatsidf_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatsidf)(${{[0-9]+}}) + ret void +} + +define void @test_floatunsisf() nounwind { +entry: +;16hf: test_floatunsisf: + %0 = load i32* @ui, align 4 + %conv = uitofp i32 %0 to float + store float %conv, float* @floatunsisf_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatunsisf)(${{[0-9]+}}) + ret void +} + +define void @test_floatunsidf() nounwind { +entry: +;16hf: test_floatunsidf: + %0 = load i32* @ui, align 4 + %conv = uitofp i32 %0 to double + store double %conv, double* @floatunsidf_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatunsidf)(${{[0-9]+}}) + ret void +} + +define void @test_eqsf2() nounwind { +entry: +;16hf: test_eqsf2: + %0 = load float* @x, align 4 + %1 = load float* @xx, align 4 + %cmp = fcmp oeq float %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @eqsf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_eqsf2)(${{[0-9]+}}) + ret void +} + +define void @test_eqdf2() nounwind { +entry: +;16hf: test_eqdf2: + %0 = load double* @xd, align 8 + %1 = load double* @xxd, align 8 + %cmp = fcmp oeq double %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @eqdf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_eqdf2)(${{[0-9]+}}) + ret void +} + +define void @test_nesf2() nounwind { +entry: +;16hf: test_nesf2: + %0 = load float* @x, align 4 + %1 = load float* @y, align 4 + %cmp = fcmp une float %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @nesf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_nesf2)(${{[0-9]+}}) + ret void +} + +define void @test_nedf2() nounwind { +entry: +;16hf: test_nedf2: + %0 = load double* @xd, align 8 + %1 = load double* @yd, align 8 + %cmp = fcmp une double %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @nedf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_nedf2)(${{[0-9]+}}) + ret void +} + +define void @test_gesf2() nounwind { +entry: +;16hf: test_gesf2: + %0 = load float* @x, align 4 + %1 = load float* @xx, align 4 + %cmp = fcmp oge float %0, %1 + %2 = load float* @y, align 4 + %cmp1 = fcmp oge float %2, %0 + %and3 = and i1 %cmp, %cmp1 + %and = zext i1 %and3 to i32 + store i32 %and, i32* @gesf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_gesf2)(${{[0-9]+}}) + ret void +} + +define void @test_gedf2() nounwind { +entry: +;16hf: test_gedf2: + %0 = load double* @xd, align 8 + %1 = load double* @xxd, align 8 + %cmp = fcmp oge double %0, %1 + %2 = load double* @yd, align 8 + %cmp1 = fcmp oge double %2, %0 + %and3 = and i1 %cmp, %cmp1 + %and = zext i1 %and3 to i32 + store i32 %and, i32* @gedf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_gedf2)(${{[0-9]+}}) + ret void +} + +define void @test_ltsf2() nounwind { +entry: +;16hf: test_ltsf2: + %0 = load float* @x, align 4 + %1 = load float* @xx, align 4 + %lnot = fcmp uge float %0, %1 + %2 = load float* @y, align 4 + %cmp1 = fcmp olt float %0, %2 + %and2 = and i1 %lnot, %cmp1 + %and = zext i1 %and2 to i32 + store i32 %and, i32* @ltsf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_unordsf2)(${{[0-9]+}}) +;16hf: lw ${{[0-9]+}}, %call16(__mips16_ltsf2)(${{[0-9]+}}) + ret void +} + +define void @test_ltdf2() nounwind { +entry: +;16hf: test_ltdf2: + %0 = load double* @xd, align 8 + %1 = load double* @xxd, align 8 + %lnot = fcmp uge double %0, %1 + %2 = load double* @yd, align 8 + %cmp1 = fcmp olt double %0, %2 + %and2 = and i1 %lnot, %cmp1 + %and = zext i1 %and2 to i32 + store i32 %and, i32* @ltdf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_unorddf2)(${{[0-9]+}}) +;16hf: lw ${{[0-9]+}}, %call16(__mips16_ltdf2)(${{[0-9]+}}) + ret void +} + +define void @test_lesf2() nounwind { +entry: +;16hf: test_lesf2: + %0 = load float* @x, align 4 + %1 = load float* @xx, align 4 + %cmp = fcmp ole float %0, %1 + %2 = load float* @y, align 4 + %cmp1 = fcmp ole float %0, %2 + %and3 = and i1 %cmp, %cmp1 + %and = zext i1 %and3 to i32 + store i32 %and, i32* @lesf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_lesf2)(${{[0-9]+}}) + ret void +} + +define void @test_ledf2() nounwind { +entry: +;16hf: test_ledf2: + %0 = load double* @xd, align 8 + %1 = load double* @xxd, align 8 + %cmp = fcmp ole double %0, %1 + %2 = load double* @yd, align 8 + %cmp1 = fcmp ole double %0, %2 + %and3 = and i1 %cmp, %cmp1 + %and = zext i1 %and3 to i32 + store i32 %and, i32* @ledf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_ledf2)(${{[0-9]+}}) + ret void +} + +define void @test_gtsf2() nounwind { +entry: +;16hf: test_gtsf2: + %0 = load float* @x, align 4 + %1 = load float* @xx, align 4 + %lnot = fcmp ule float %0, %1 + %2 = load float* @y, align 4 + %cmp1 = fcmp ogt float %2, %0 + %and2 = and i1 %lnot, %cmp1 + %and = zext i1 %and2 to i32 + store i32 %and, i32* @gtsf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_gtsf2)(${{[0-9]+}}) + ret void +} + +define void @test_gtdf2() nounwind { +entry: +;16hf: test_gtdf2: + %0 = load double* @xd, align 8 + %1 = load double* @xxd, align 8 + %lnot = fcmp ule double %0, %1 + %2 = load double* @yd, align 8 + %cmp1 = fcmp ogt double %2, %0 + %and2 = and i1 %lnot, %cmp1 + %and = zext i1 %and2 to i32 + store i32 %and, i32* @gtdf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_gtdf2)(${{[0-9]+}}) + ret void +} + + diff --git a/test/CodeGen/Mips/mips64-sret.ll b/test/CodeGen/Mips/mips64-sret.ll index e26b0223b4..eb08e700bc 100644 --- a/test/CodeGen/Mips/mips64-sret.ll +++ b/test/CodeGen/Mips/mips64-sret.ll @@ -6,7 +6,7 @@ define void @f(%struct.S* noalias sret %agg.result) nounwind { entry: -; CHECK: daddu $2, $zero, $4 +; CHECK: or $2, $4, $zero %0 = bitcast %struct.S* %agg.result to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.S* @g to i8*), i64 32, i32 4, i1 false) diff --git a/test/CodeGen/Mips/return_address.ll b/test/CodeGen/Mips/return_address.ll index e1c9241984..3bcd5601ee 100644 --- a/test/CodeGen/Mips/return_address.ll +++ b/test/CodeGen/Mips/return_address.ll @@ -5,7 +5,7 @@ entry: %0 = call i8* @llvm.returnaddress(i32 0) ret i8* %0 -; CHECK: addu $2, $zero, $ra +; CHECK: or $2, $ra, $zero } define i8* @f2() nounwind { @@ -14,9 +14,9 @@ entry: %0 = call i8* @llvm.returnaddress(i32 0) ret i8* %0 -; CHECK: addu $[[R0:[0-9]+]], $zero, $ra +; CHECK: or $[[R0:[0-9]+]], $ra, $zero ; CHECK: jal -; CHECK: addu $2, $zero, $[[R0]] +; CHECK: or $2, $[[R0]], $zero } declare i8* @llvm.returnaddress(i32) nounwind readnone diff --git a/test/CodeGen/Mips/vector-setcc.ll b/test/CodeGen/Mips/vector-setcc.ll new file mode 100644 index 0000000000..aeff4918c8 --- /dev/null +++ b/test/CodeGen/Mips/vector-setcc.ll @@ -0,0 +1,16 @@ +; RUN: llc -march=mipsel < %s + +@a = common global <4 x i32> zeroinitializer, align 16 +@b = common global <4 x i32> zeroinitializer, align 16 +@g0 = common global <4 x i32> zeroinitializer, align 16 + +define void @foo0() nounwind { +entry: + %0 = load <4 x i32>* @a, align 16 + %1 = load <4 x i32>* @b, align 16 + %cmp = icmp slt <4 x i32> %0, %1 + %sext = sext <4 x i1> %cmp to <4 x i32> + store <4 x i32> %sext, <4 x i32>* @g0, align 16 + ret void +} + diff --git a/test/CodeGen/PowerPC/atomic-2.ll b/test/CodeGen/PowerPC/atomic-2.ll index a427379a8b..40b4a2eea9 100644 --- a/test/CodeGen/PowerPC/atomic-2.ll +++ b/test/CodeGen/PowerPC/atomic-2.ll @@ -24,3 +24,23 @@ define i64 @exchange(i64* %mem, i64 %val) nounwind { ; CHECK: stdcx. ret i64 %tmp } + +define void @atomic_store(i64* %mem, i64 %val) nounwind { +entry: +; CHECK: @atomic_store + store atomic i64 %val, i64* %mem release, align 64 +; CHECK: ldarx +; CHECK: stdcx. + ret void +} + +define i64 @atomic_load(i64* %mem) nounwind { +entry: +; CHECK: @atomic_load + %tmp = load atomic i64* %mem acquire, align 64 +; CHECK: ldarx +; CHECK: stdcx. +; CHECK: stdcx. + ret i64 %tmp +} + diff --git a/test/CodeGen/PowerPC/dcbt-sched.ll b/test/CodeGen/PowerPC/dcbt-sched.ll new file mode 100644 index 0000000000..dfa1b75bd7 --- /dev/null +++ b/test/CodeGen/PowerPC/dcbt-sched.ll @@ -0,0 +1,22 @@ +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" +; RUN: llc -mcpu=a2 -enable-misched -enable-aa-sched-mi < %s | FileCheck %s + +define i8 @test1(i8* noalias %a, i8* noalias %b, i8* noalias %c) nounwind { +entry: + %q = load i8* %b + call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 1) + %r = load i8* %c + %s = add i8 %q, %r + ret i8 %s +} + +declare void @llvm.prefetch(i8*, i32, i32, i32) + +; Test that we've moved the second load to before the dcbt to better +; hide its latency. +; CHECK: @test1 +; CHECK: lbz +; CHECK: lbz +; CHECK: dcbt + diff --git a/test/CodeGen/PowerPC/float-asmprint.ll b/test/CodeGen/PowerPC/float-asmprint.ll new file mode 100644 index 0000000000..c9dc02862a --- /dev/null +++ b/test/CodeGen/PowerPC/float-asmprint.ll @@ -0,0 +1,34 @@ +; RUN: llc -mtriple=powerpc64-none-linux < %s | FileCheck %s + +; Check that all current floating-point types are correctly emitted to assembly +; on a big-endian target. x86_fp80 can't actually print for unrelated reasons, +; but that's not really a problem. + +@var128 = global fp128 0xL00000000000000008000000000000000, align 16 +@varppc128 = global ppc_fp128 0xM80000000000000000000000000000000, align 16 +@var64 = global double -0.0, align 8 +@var32 = global float -0.0, align 4 +@var16 = global half -0.0, align 2 + +; CHECK: var128: +; CHECK-NEXT: .quad -9223372036854775808 # fp128 -0 +; CHECK-NEXT: .quad 0 +; CHECK-NEXT: .size + +; CHECK: varppc128: +; CHECK-NEXT: .quad -9223372036854775808 # ppc_fp128 -0 +; CHECK-NEXT: .quad 0 +; CHECK-NEXT: .size + +; CHECK: var64: +; CHECK-NEXT: .quad -9223372036854775808 # double -0 +; CHECK-NEXT: .size + +; CHECK: var32: +; CHECK-NEXT: .long 2147483648 # float -0 +; CHECK-NEXT: .size + +; CHECK: var16: +; CHECK-NEXT: .short 32768 # half -0 +; CHECK-NEXT: .size + diff --git a/test/CodeGen/PowerPC/in-asm-f64-reg.ll b/test/CodeGen/PowerPC/in-asm-f64-reg.ll new file mode 100644 index 0000000000..1321dfce20 --- /dev/null +++ b/test/CodeGen/PowerPC/in-asm-f64-reg.ll @@ -0,0 +1,11 @@ +; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s + +define void @f() { +; CHECK: @f + +entry: + %0 = tail call double* asm sideeffect "qvstfdux $2,$0,$1", "=b,{r7},{f11},0,~{memory}"(i32 64, double undef, double* undef) + ret void + +; CHECK: qvstfdux 11,{{[0-9]+}},7 +} diff --git a/test/CodeGen/PowerPC/mcm-8.ll b/test/CodeGen/PowerPC/mcm-8.ll new file mode 100644 index 0000000000..9381a976a4 --- /dev/null +++ b/test/CodeGen/PowerPC/mcm-8.ll @@ -0,0 +1,24 @@ +; RUN: llc -mcpu=pwr7 -O0 -code-model=medium < %s | FileCheck %s + +; Test correct code generation for medium code model (32-bit TOC offsets) +; for loading a variable with available-externally linkage. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +@x = available_externally constant [13 x i8] c"St9bad_alloc\00" + +define signext i8 @test_avext() nounwind { +entry: + %0 = getelementptr inbounds [13 x i8]* @x, i32 0, i32 0 + %1 = load i8* %0, align 1 + ret i8 %1 +} + +; CHECK: test_avext: +; CHECK: addis [[REG1:[0-9]+]], 2, .LC[[TOCNUM:[0-9]+]]@toc@ha +; CHECK: ld [[REG2:[0-9]+]], .LC[[TOCNUM]]@toc@l([[REG1]]) +; CHECK: lbz {{[0-9]+}}, 0([[REG2]]) +; CHECK: .section .toc +; CHECK: .LC[[TOCNUM]]: +; CHECK: .tc {{[a-z0-9A-Z_.]+}}[TC],{{[a-z0-9A-Z_.]+}} diff --git a/test/CodeGen/PowerPC/mcm-9.ll b/test/CodeGen/PowerPC/mcm-9.ll new file mode 100644 index 0000000000..422607c5bc --- /dev/null +++ b/test/CodeGen/PowerPC/mcm-9.ll @@ -0,0 +1,27 @@ +; RUN: llc -mcpu=pwr7 -O0 -code-model=medium <%s | FileCheck %s + +; Test correct code generation for medium code model (32-bit TOC offsets) +; for loading and storing an aliased external variable. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +@ei = external global i32 +@a = alias i32* @ei + +define signext i32 @test_external() nounwind { +entry: + %0 = load i32* @a, align 4 + %inc = add nsw i32 %0, 1 + store i32 %inc, i32* @a, align 4 + ret i32 %0 +} + +; CHECK: test_external: +; CHECK: addis [[REG1:[0-9]+]], 2, .LC[[TOCNUM:[0-9]+]]@toc@ha +; CHECK: ld [[REG2:[0-9]+]], .LC[[TOCNUM]]@toc@l([[REG1]]) +; CHECK: lwz {{[0-9]+}}, 0([[REG2]]) +; CHECK: stw {{[0-9]+}}, 0([[REG2]]) +; CHECK: .section .toc +; CHECK: .LC[[TOCNUM]]: +; CHECK: .tc {{[a-z0-9A-Z_.]+}}[TC],{{[a-z0-9A-Z_.]+}} diff --git a/test/CodeGen/PowerPC/misched-inorder-latency.ll b/test/CodeGen/PowerPC/misched-inorder-latency.ll new file mode 100644 index 0000000000..8fae7ad4d1 --- /dev/null +++ b/test/CodeGen/PowerPC/misched-inorder-latency.ll @@ -0,0 +1,55 @@ +; RUN: llc < %s -enable-misched -pre-RA-sched=source -scheditins=false \ +; RUN: -disable-ifcvt-triangle-false -disable-post-ra | FileCheck %s +; +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-bgq-linux" + +; %val1 is a load live out of %entry. It should be hoisted +; above the add. +; CHECK: testload: +; CHECK: %entry +; CHECK: lwz +; CHECK: addi +; CHECK: bne +; CHECK: %true +define i32 @testload(i32 *%ptr, i32 %sumin) { +entry: + %sum1 = add i32 %sumin, 1 + %val1 = load i32* %ptr + %p = icmp eq i32 %sumin, 0 + br i1 %p, label %true, label %end +true: + %sum2 = add i32 %sum1, 1 + %ptr2 = getelementptr i32* %ptr, i32 1 + %val = load i32* %ptr2 + %val2 = add i32 %val1, %val + br label %end +end: + %valmerge = phi i32 [ %val1, %entry], [ %val2, %true ] + %summerge = phi i32 [ %sum1, %entry], [ %sum2, %true ] + %sumout = add i32 %valmerge, %summerge + ret i32 %sumout +} + +; The prefetch gets a default latency of 3 cycles and should be hoisted +; above the add. +; +; CHECK: testprefetch: +; CHECK: %entry +; CHECK: dcbt +; CHECK: addi +; CHECK: blr +define i32 @testprefetch(i8 *%ptr, i32 %i) { +entry: + %val1 = add i32 %i, 1 + tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 3, i32 1 ) + %p = icmp eq i32 %i, 0 + br i1 %p, label %true, label %end +true: + %val2 = add i32 %val1, 1 + br label %end +end: + %valmerge = phi i32 [ %val1, %entry], [ %val2, %true ] + ret i32 %valmerge +} +declare void @llvm.prefetch(i8*, i32, i32, i32) nounwind diff --git a/test/CodeGen/PowerPC/sdag-ppcf128.ll b/test/CodeGen/PowerPC/sdag-ppcf128.ll new file mode 100644 index 0000000000..535ece6d3d --- /dev/null +++ b/test/CodeGen/PowerPC/sdag-ppcf128.ll @@ -0,0 +1,15 @@ +; RUN: llc -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s +; +; PR14751: Unsupported type in SelectionDAG::getConstantFP() + +define fastcc void @_D3std4math4sqrtFNaNbNfcZc() { +entry: + br i1 undef, label %if, label %else +; CHECK: cmplwi 0, 3, 0 +if: ; preds = %entry + store { ppc_fp128, ppc_fp128 } zeroinitializer, { ppc_fp128, ppc_fp128 }* undef + ret void + +else: ; preds = %entry + unreachable +} diff --git a/test/CodeGen/PowerPC/tls-gd-obj.ll b/test/CodeGen/PowerPC/tls-gd-obj.ll new file mode 100644 index 0000000000..00b537d532 --- /dev/null +++ b/test/CodeGen/PowerPC/tls-gd-obj.ll @@ -0,0 +1,41 @@ +; RUN: llc -mcpu=pwr7 -O0 -filetype=obj -relocation-model=pic %s -o - | \ +; RUN: elf-dump --dump-section-data | FileCheck %s + +; Test correct relocation generation for thread-local storage using +; the general dynamic model and integrated assembly. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +@a = thread_local global i32 0, align 4 + +define signext i32 @main() nounwind { +entry: + %retval = alloca i32, align 4 + store i32 0, i32* %retval + %0 = load i32* @a, align 4 + ret i32 %0 +} + +; Verify generation of R_PPC64_GOT_TLSGD16_HA, R_PPC64_GOT_TLSGD16_LO, +; and R_PPC64_TLSGD for accessing external variable a, and R_PPC64_REL24 +; for the call to __tls_get_addr. +; +; CHECK: '.rela.text' +; CHECK: Relocation 0 +; CHECK-NEXT: 'r_offset' +; CHECK-NEXT: 'r_sym', 0x[[SYM1:[0-9a-f]+]] +; CHECK-NEXT: 'r_type', 0x00000052 +; CHECK: Relocation 1 +; CHECK-NEXT: 'r_offset' +; CHECK-NEXT: 'r_sym', 0x[[SYM1]] +; CHECK-NEXT: 'r_type', 0x00000050 +; CHECK: Relocation 2 +; CHECK-NEXT: 'r_offset' +; CHECK-NEXT: 'r_sym', 0x[[SYM1]] +; CHECK-NEXT: 'r_type', 0x0000006b +; CHECK: Relocation 3 +; CHECK-NEXT: 'r_offset' +; CHECK-NEXT: 'r_sym', 0x{{[0-9a-f]+}} +; CHECK-NEXT: 'r_type', 0x0000000a + diff --git a/test/CodeGen/PowerPC/tls-gd.ll b/test/CodeGen/PowerPC/tls-gd.ll new file mode 100644 index 0000000000..fb8dfaf04a --- /dev/null +++ b/test/CodeGen/PowerPC/tls-gd.ll @@ -0,0 +1,23 @@ +; RUN: llc -mcpu=pwr7 -O0 -relocation-model=pic < %s | FileCheck %s + +; Test correct assembly code generation for thread-local storage using +; the general dynamic model. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +@a = thread_local global i32 0, align 4 + +define signext i32 @main() nounwind { +entry: + %retval = alloca i32, align 4 + store i32 0, i32* %retval + %0 = load i32* @a, align 4 + ret i32 %0 +} + +; CHECK: addis [[REG:[0-9]+]], 2, a@got@tlsgd@ha +; CHECK-NEXT: addi 3, [[REG]], a@got@tlsgd@l +; CHECK-NEXT: bl __tls_get_addr(a@tlsgd) +; CHECK-NEXT: nop + diff --git a/test/CodeGen/PowerPC/tls-ie-obj.ll b/test/CodeGen/PowerPC/tls-ie-obj.ll index 5cc0b187f6..3600cc52ba 100644 --- a/test/CodeGen/PowerPC/tls-ie-obj.ll +++ b/test/CodeGen/PowerPC/tls-ie-obj.ll @@ -24,9 +24,13 @@ entry: ; CHECK: Relocation 0 ; CHECK-NEXT: 'r_offset' ; CHECK-NEXT: 'r_sym', 0x[[SYM1:[0-9a-f]+]] -; CHECK-NEXT: 'r_type', 0x00000057 +; CHECK-NEXT: 'r_type', 0x0000005a ; CHECK: Relocation 1 ; CHECK-NEXT: 'r_offset' ; CHECK-NEXT: 'r_sym', 0x[[SYM1]] +; CHECK-NEXT: 'r_type', 0x00000058 +; CHECK: Relocation 2 +; CHECK-NEXT: 'r_offset' +; CHECK-NEXT: 'r_sym', 0x[[SYM1]] ; CHECK-NEXT: 'r_type', 0x00000043 diff --git a/test/CodeGen/PowerPC/tls-ie.ll b/test/CodeGen/PowerPC/tls-ie.ll index cc6f084efb..c5cfba7b3f 100644 --- a/test/CodeGen/PowerPC/tls-ie.ll +++ b/test/CodeGen/PowerPC/tls-ie.ll @@ -16,6 +16,7 @@ entry: ret i32 %0 } -; CHECK: ld [[REG:[0-9]+]], a@got@tprel(2) -; CHECK: add {{[0-9]+}}, [[REG]], a@tls +; CHECK: addis [[REG1:[0-9]+]], 2, a@got@tprel@ha +; CHECK: ld [[REG2:[0-9]+]], a@got@tprel@l([[REG1]]) +; CHECK: add {{[0-9]+}}, [[REG2]], a@tls diff --git a/test/CodeGen/PowerPC/tls-ld-obj.ll b/test/CodeGen/PowerPC/tls-ld-obj.ll new file mode 100644 index 0000000000..c521ae405f --- /dev/null +++ b/test/CodeGen/PowerPC/tls-ld-obj.ll @@ -0,0 +1,50 @@ +; RUN: llc -mcpu=pwr7 -O0 -filetype=obj -relocation-model=pic %s -o - | \ +; RUN: elf-dump --dump-section-data | FileCheck %s + +; Test correct relocation generation for thread-local storage using +; the local dynamic model. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +@a = hidden thread_local global i32 0, align 4 + +define signext i32 @main() nounwind { +entry: + %retval = alloca i32, align 4 + store i32 0, i32* %retval + %0 = load i32* @a, align 4 + ret i32 %0 +} + +; Verify generation of R_PPC64_GOT_TLSLD16_HA, R_PPC64_GOT_TLSLD16_LO, +; R_PPC64_TLSLD, R_PPC64_DTPREL16_HA, and R_PPC64_DTPREL16_LO for +; accessing external variable a, and R_PPC64_REL24 for the call to +; __tls_get_addr. +; +; CHECK: '.rela.text' +; CHECK: Relocation 0 +; CHECK-NEXT: 'r_offset' +; CHECK-NEXT: 'r_sym', 0x[[SYM1:[0-9a-f]+]] +; CHECK-NEXT: 'r_type', 0x00000056 +; CHECK: Relocation 1 +; CHECK-NEXT: 'r_offset' +; CHECK-NEXT: 'r_sym', 0x[[SYM1]] +; CHECK-NEXT: 'r_type', 0x00000054 +; CHECK: Relocation 2 +; CHECK-NEXT: 'r_offset' +; CHECK-NEXT: 'r_sym', 0x[[SYM1]] +; CHECK-NEXT: 'r_type', 0x0000006c +; CHECK: Relocation 3 +; CHECK-NEXT: 'r_offset' +; CHECK-NEXT: 'r_sym', 0x{{[0-9a-f]+}} +; CHECK-NEXT: 'r_type', 0x0000000a +; CHECK: Relocation 4 +; CHECK-NEXT: 'r_offset' +; CHECK-NEXT: 'r_sym', 0x[[SYM1]] +; CHECK-NEXT: 'r_type', 0x0000004d +; CHECK: Relocation 5 +; CHECK-NEXT: 'r_offset' +; CHECK-NEXT: 'r_sym', 0x[[SYM1]] +; CHECK-NEXT: 'r_type', 0x0000004b + diff --git a/test/CodeGen/PowerPC/tls-ld.ll b/test/CodeGen/PowerPC/tls-ld.ll new file mode 100644 index 0000000000..1ebc6129e2 --- /dev/null +++ b/test/CodeGen/PowerPC/tls-ld.ll @@ -0,0 +1,24 @@ +; RUN: llc -mcpu=pwr7 -O0 -relocation-model=pic < %s | FileCheck %s + +; Test correct assembly code generation for thread-local storage using +; the local dynamic model. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +@a = hidden thread_local global i32 0, align 4 + +define signext i32 @main() nounwind { +entry: + %retval = alloca i32, align 4 + store i32 0, i32* %retval + %0 = load i32* @a, align 4 + ret i32 %0 +} + +; CHECK: addis [[REG:[0-9]+]], 2, a@got@tlsld@ha +; CHECK-NEXT: addi 3, [[REG]], a@got@tlsld@l +; CHECK-NEXT: bl __tls_get_addr(a@tlsld) +; CHECK-NEXT: nop +; CHECK-NEXT: addis [[REG2:[0-9]+]], 3, a@dtprel@ha +; CHECK-NEXT: addi {{[0-9]+}}, [[REG2]], a@dtprel@l diff --git a/test/CodeGen/PowerPC/vec_extload.ll b/test/CodeGen/PowerPC/vec_extload.ll index 15a3f9f295..998645d90d 100644 --- a/test/CodeGen/PowerPC/vec_extload.ll +++ b/test/CodeGen/PowerPC/vec_extload.ll @@ -15,55 +15,9 @@ define <16 x i8> @v16si8_sext_in_reg(<16 x i8> %a) { ret <16 x i8> %c } ; CHECK: v16si8_sext_in_reg: -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lbz -; CHECK: stb -; CHECK: lvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vslb +; CHECK: vsrab +; CHECK: blr ; The zero extend uses a more clever logic: a vector splat ; and a logic and to set higher bits to 0. @@ -83,31 +37,9 @@ define <8 x i16> @v8si16_sext_in_reg(<8 x i16> %a) { ret <8 x i16> %c } ; CHECK: v8si16_sext_in_reg: -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lhz -; CHECK: sth -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lhz -; CHECK: sth -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lhz -; CHECK: sth -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lhz -; CHECK: sth -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lhz -; CHECK: sth -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lhz -; CHECK: sth -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lhz -; CHECK: sth -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lhz -; CHECK: sth -; CHECK: lvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vslh +; CHECK: vsrah +; CHECK: blr ; Same as v8si16_sext_in_reg, but instead of creating the mask ; with a splat, loads it from memory. @@ -129,19 +61,9 @@ define <4 x i32> @v4si32_sext_in_reg(<4 x i32> %a) { ret <4 x i32> %c } ; CHECK: v4si32_sext_in_reg: -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lha -; CHECK: stw -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lha -; CHECK: stw -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lha -; CHECK: stw -; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} -; CHECK: lha -; CHECK: stw -; CHECK: lvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vslw +; CHECK: vsraw +; CHECK: blr ; Same as v8si16_sext_in_reg. define <4 x i32> @v4si32_zext_in_reg(<4 x i32> %a) { diff --git a/test/CodeGen/PowerPC/vec_select.ll b/test/CodeGen/PowerPC/vec_select.ll new file mode 100644 index 0000000000..4ad0acca00 --- /dev/null +++ b/test/CodeGen/PowerPC/vec_select.ll @@ -0,0 +1,7 @@ +; RUN: llc < %s -mtriple=powerpc64-linux-gnu -mattr=+altivec | FileCheck %s + +; CHECK: vsel_float +define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) { + %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %v1, <4 x float> %v2 + ret <4 x float> %vsel +} diff --git a/test/CodeGen/R600/add.v4i32.ll b/test/CodeGen/R600/add.v4i32.ll new file mode 100644 index 0000000000..ac4a87417b --- /dev/null +++ b/test/CodeGen/R600/add.v4i32.ll @@ -0,0 +1,15 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: ADD_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: ADD_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: ADD_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: ADD_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { + %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1 + %a = load <4 x i32> addrspace(1) * %in + %b = load <4 x i32> addrspace(1) * %b_ptr + %result = add <4 x i32> %a, %b + store <4 x i32> %result, <4 x i32> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/and.v4i32.ll b/test/CodeGen/R600/and.v4i32.ll new file mode 100644 index 0000000000..662085e2d6 --- /dev/null +++ b/test/CodeGen/R600/and.v4i32.ll @@ -0,0 +1,15 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: AND_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: AND_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: AND_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: AND_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { + %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1 + %a = load <4 x i32> addrspace(1) * %in + %b = load <4 x i32> addrspace(1) * %b_ptr + %result = and <4 x i32> %a, %b + store <4 x i32> %result, <4 x i32> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll b/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll new file mode 100644 index 0000000000..1acf905955 --- /dev/null +++ b/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll @@ -0,0 +1,33 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: INT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +; This test is for a bug in +; DAGCombiner::reduceBuildVecConvertToConvertBuildVec() where +; the wrong type was being passed to +; TargetLowering::getOperationAction() when checking the legality of +; ISD::UINT_TO_FP and ISD::SINT_TO_FP opcodes. + +define void @sint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) { +entry: + %ptr = getelementptr i32 addrspace(1)* %in, i32 1 + %sint = load i32 addrspace(1) * %in + %conv = sitofp i32 %sint to float + %0 = insertelement <4 x float> undef, float %conv, i32 0 + %splat = shufflevector <4 x float> %0, <4 x float> undef, <4 x i32> zeroinitializer + store <4 x float> %splat, <4 x float> addrspace(1)* %out + ret void +} + +;CHECK: UINT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @uint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) { +entry: + %ptr = getelementptr i32 addrspace(1)* %in, i32 1 + %uint = load i32 addrspace(1) * %in + %conv = uitofp i32 %uint to float + %0 = insertelement <4 x float> undef, float %conv, i32 0 + %splat = shufflevector <4 x float> %0, <4 x float> undef, <4 x i32> zeroinitializer + store <4 x float> %splat, <4 x float> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/fabs.ll b/test/CodeGen/R600/fabs.ll new file mode 100644 index 0000000000..0407533eaa --- /dev/null +++ b/test/CodeGen/R600/fabs.ll @@ -0,0 +1,16 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: MOV T{{[0-9]+\.[XYZW], \|T[0-9]+\.[XYZW]\|}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = call float @fabs( float %r0) + call void @llvm.AMDGPU.store.output(float %r1, i32 0) + ret void +} + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) + +declare float @fabs(float ) readnone diff --git a/test/CodeGen/R600/fadd.ll b/test/CodeGen/R600/fadd.ll new file mode 100644 index 0000000000..d7d1b6572c --- /dev/null +++ b/test/CodeGen/R600/fadd.ll @@ -0,0 +1,16 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +; CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = call float @llvm.R600.load.input(i32 1) + %r2 = fadd float %r0, %r1 + call void @llvm.AMDGPU.store.output(float %r2, i32 0) + ret void +} + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) + diff --git a/test/CodeGen/R600/fadd.v4f32.ll b/test/CodeGen/R600/fadd.v4f32.ll new file mode 100644 index 0000000000..85dbfd52cb --- /dev/null +++ b/test/CodeGen/R600/fadd.v4f32.ll @@ -0,0 +1,15 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { + %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1 + %a = load <4 x float> addrspace(1) * %in + %b = load <4 x float> addrspace(1) * %b_ptr + %result = fadd <4 x float> %a, %b + store <4 x float> %result, <4 x float> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/fcmp-cnd.ll b/test/CodeGen/R600/fcmp-cnd.ll new file mode 100644 index 0000000000..a94cfb5cf2 --- /dev/null +++ b/test/CodeGen/R600/fcmp-cnd.ll @@ -0,0 +1,14 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;Not checking arguments 2 and 3 to CNDE, because they may change between +;registers and literal.x depending on what the optimizer does. +;CHECK: CNDE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test(i32 addrspace(1)* %out, float addrspace(1)* %in) { +entry: + %0 = load float addrspace(1)* %in + %cmp = fcmp oeq float %0, 0.000000e+00 + %value = select i1 %cmp, i32 2, i32 3 + store i32 %value, i32 addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/fcmp-cnde-int-args.ll b/test/CodeGen/R600/fcmp-cnde-int-args.ll new file mode 100644 index 0000000000..5c981efa9d --- /dev/null +++ b/test/CodeGen/R600/fcmp-cnde-int-args.ll @@ -0,0 +1,16 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +; This test checks a bug in R600TargetLowering::LowerSELECT_CC where the +; chance to optimize the fcmp + select instructions to CNDE was missed +; due to the fact that the operands to fcmp and select had different types + +;CHECK: CNDE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], literal.x, 0.0, -1}} + +define void @test(i32 addrspace(1)* %out, float addrspace(1)* %in) { +entry: + %0 = load float addrspace(1)* %in + %cmp = fcmp oeq float %0, 0.000000e+00 + %value = select i1 %cmp, i32 -1, i32 0 + store i32 %value, i32 addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/fcmp.ll b/test/CodeGen/R600/fcmp.ll new file mode 100644 index 0000000000..1dcd07c0b3 --- /dev/null +++ b/test/CodeGen/R600/fcmp.ll @@ -0,0 +1,16 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: SETE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: MOV T{{[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} +;CHECK: FLT_TO_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test(i32 addrspace(1)* %out, float addrspace(1)* %in) { +entry: + %0 = load float addrspace(1)* %in + %arrayidx1 = getelementptr inbounds float addrspace(1)* %in, i32 1 + %1 = load float addrspace(1)* %arrayidx1 + %cmp = fcmp oeq float %0, %1 + %sext = sext i1 %cmp to i32 + store i32 %sext, i32 addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/fdiv.v4f32.ll b/test/CodeGen/R600/fdiv.v4f32.ll new file mode 100644 index 0000000000..b013fd647c --- /dev/null +++ b/test/CodeGen/R600/fdiv.v4f32.ll @@ -0,0 +1,19 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: RECIP_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: RECIP_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: RECIP_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: RECIP_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { + %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1 + %a = load <4 x float> addrspace(1) * %in + %b = load <4 x float> addrspace(1) * %b_ptr + %result = fdiv <4 x float> %a, %b + store <4 x float> %result, <4 x float> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/floor.ll b/test/CodeGen/R600/floor.ll new file mode 100644 index 0000000000..845330f284 --- /dev/null +++ b/test/CodeGen/R600/floor.ll @@ -0,0 +1,16 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: FLOOR T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = call float @floor(float %r0) + call void @llvm.AMDGPU.store.output(float %r1, i32 0) + ret void +} + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) + +declare float @floor(float) readonly diff --git a/test/CodeGen/R600/fmax.ll b/test/CodeGen/R600/fmax.ll new file mode 100644 index 0000000000..3708f0b9ee --- /dev/null +++ b/test/CodeGen/R600/fmax.ll @@ -0,0 +1,16 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: MAX T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = call float @llvm.R600.load.input(i32 1) + %r2 = fcmp uge float %r0, %r1 + %r3 = select i1 %r2, float %r0, float %r1 + call void @llvm.AMDGPU.store.output(float %r3, i32 0) + ret void +} + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) diff --git a/test/CodeGen/R600/fmin.ll b/test/CodeGen/R600/fmin.ll new file mode 100644 index 0000000000..19d59ab306 --- /dev/null +++ b/test/CodeGen/R600/fmin.ll @@ -0,0 +1,16 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: MIN T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = call float @llvm.R600.load.input(i32 1) + %r2 = fcmp uge float %r0, %r1 + %r3 = select i1 %r2, float %r1, float %r0 + call void @llvm.AMDGPU.store.output(float %r3, i32 0) + ret void +} + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) diff --git a/test/CodeGen/R600/fmul.ll b/test/CodeGen/R600/fmul.ll new file mode 100644 index 0000000000..eb1d523c0b --- /dev/null +++ b/test/CodeGen/R600/fmul.ll @@ -0,0 +1,16 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +; CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = call float @llvm.R600.load.input(i32 1) + %r2 = fmul float %r0, %r1 + call void @llvm.AMDGPU.store.output(float %r2, i32 0) + ret void +} + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) + diff --git a/test/CodeGen/R600/fmul.v4f32.ll b/test/CodeGen/R600/fmul.v4f32.ll new file mode 100644 index 0000000000..6d44a0c5c7 --- /dev/null +++ b/test/CodeGen/R600/fmul.v4f32.ll @@ -0,0 +1,15 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { + %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1 + %a = load <4 x float> addrspace(1) * %in + %b = load <4 x float> addrspace(1) * %b_ptr + %result = fmul <4 x float> %a, %b + store <4 x float> %result, <4 x float> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/fsub.ll b/test/CodeGen/R600/fsub.ll new file mode 100644 index 0000000000..0ec1c376df --- /dev/null +++ b/test/CodeGen/R600/fsub.ll @@ -0,0 +1,17 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +; CHECK: MOV T{{[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} +; CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = call float @llvm.R600.load.input(i32 1) + %r2 = fsub float %r0, %r1 + call void @llvm.AMDGPU.store.output(float %r2, i32 0) + ret void +} + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) + diff --git a/test/CodeGen/R600/fsub.v4f32.ll b/test/CodeGen/R600/fsub.v4f32.ll new file mode 100644 index 0000000000..612a57e4b6 --- /dev/null +++ b/test/CodeGen/R600/fsub.v4f32.ll @@ -0,0 +1,15 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { + %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1 + %a = load <4 x float> addrspace(1) * %in + %b = load <4 x float> addrspace(1) * %b_ptr + %result = fsub <4 x float> %a, %b + store <4 x float> %result, <4 x float> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/i8_to_double_to_float.ll b/test/CodeGen/R600/i8_to_double_to_float.ll new file mode 100644 index 0000000000..39f33227fa --- /dev/null +++ b/test/CodeGen/R600/i8_to_double_to_float.ll @@ -0,0 +1,11 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: UINT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test(float addrspace(1)* %out, i8 addrspace(1)* %in) { + %1 = load i8 addrspace(1)* %in + %2 = uitofp i8 %1 to double + %3 = fptrunc double %2 to float + store float %3, float addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/icmp-select-sete-reverse-args.ll b/test/CodeGen/R600/icmp-select-sete-reverse-args.ll new file mode 100644 index 0000000000..aad44d9edf --- /dev/null +++ b/test/CodeGen/R600/icmp-select-sete-reverse-args.ll @@ -0,0 +1,18 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;Test that a select with reversed True/False values is correctly lowered +;to a SETNE_INT. There should only be one SETNE_INT instruction. + +;CHECK: SETNE_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK_NOT: SETNE_INT + +define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { +entry: + %0 = load i32 addrspace(1)* %in + %arrayidx1 = getelementptr inbounds i32 addrspace(1)* %in, i32 1 + %1 = load i32 addrspace(1)* %arrayidx1 + %cmp = icmp eq i32 %0, %1 + %value = select i1 %cmp, i32 0, i32 -1 + store i32 %value, i32 addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/lit.local.cfg b/test/CodeGen/R600/lit.local.cfg new file mode 100644 index 0000000000..36ee493e59 --- /dev/null +++ b/test/CodeGen/R600/lit.local.cfg @@ -0,0 +1,13 @@ +config.suffixes = ['.ll', '.c', '.cpp'] + +def getRoot(config): + if not config.parent: + return config + return getRoot(config.parent) + +root = getRoot(config) + +targets = set(root.targets_to_build.split()) +if not 'R600' in targets: + config.unsupported = True + diff --git a/test/CodeGen/R600/literals.ll b/test/CodeGen/R600/literals.ll new file mode 100644 index 0000000000..4c731b25ec --- /dev/null +++ b/test/CodeGen/R600/literals.ll @@ -0,0 +1,30 @@ +; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +; Test using an integer literal constant. +; Generated ASM should be: +; ADD_INT REG literal.x, 5 +; or +; ADD_INT literal.x REG, 5 + +; CHECK: ADD_INT {{[A-Z0-9,. ]*}}literal.x,{{[A-Z0-9,. ]*}} 5 +define void @i32_literal(i32 addrspace(1)* %out, i32 %in) { +entry: + %0 = add i32 5, %in + store i32 %0, i32 addrspace(1)* %out + ret void +} + +; Test using a float literal constant. +; Generated ASM should be: +; ADD REG literal.x, 5.0 +; or +; ADD literal.x REG, 5.0 + +; CHECK: ADD {{[A-Z0-9,. ]*}}literal.x,{{[A-Z0-9,. ]*}} {{[0-9]+}}(5.0 +define void @float_literal(float addrspace(1)* %out, float %in) { +entry: + %0 = fadd float 5.0, %in + store float %0, float addrspace(1)* %out + ret void +} + diff --git a/test/CodeGen/R600/llvm.AMDGPU.mul.ll b/test/CodeGen/R600/llvm.AMDGPU.mul.ll new file mode 100644 index 0000000000..693eb27457 --- /dev/null +++ b/test/CodeGen/R600/llvm.AMDGPU.mul.ll @@ -0,0 +1,17 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = call float @llvm.R600.load.input(i32 1) + %r2 = call float @llvm.AMDGPU.mul( float %r0, float %r1) + call void @llvm.AMDGPU.store.output(float %r2, i32 0) + ret void +} + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) + +declare float @llvm.AMDGPU.mul(float ,float ) readnone diff --git a/test/CodeGen/R600/llvm.AMDGPU.trunc.ll b/test/CodeGen/R600/llvm.AMDGPU.trunc.ll new file mode 100644 index 0000000000..fac957f7ee --- /dev/null +++ b/test/CodeGen/R600/llvm.AMDGPU.trunc.ll @@ -0,0 +1,16 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: TRUNC T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = call float @llvm.AMDGPU.trunc( float %r0) + call void @llvm.AMDGPU.store.output(float %r1, i32 0) + ret void +} + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) + +declare float @llvm.AMDGPU.trunc(float ) readnone diff --git a/test/CodeGen/R600/llvm.cos.ll b/test/CodeGen/R600/llvm.cos.ll new file mode 100644 index 0000000000..dc120bfb00 --- /dev/null +++ b/test/CodeGen/R600/llvm.cos.ll @@ -0,0 +1,16 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: COS T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = call float @llvm.cos.f32(float %r0) + call void @llvm.AMDGPU.store.output(float %r1, i32 0) + ret void +} + +declare float @llvm.cos.f32(float) readnone + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) diff --git a/test/CodeGen/R600/llvm.pow.ll b/test/CodeGen/R600/llvm.pow.ll new file mode 100644 index 0000000000..0ae9172579 --- /dev/null +++ b/test/CodeGen/R600/llvm.pow.ll @@ -0,0 +1,19 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: LOG_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK-NEXT: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +;CHECK-NEXT: EXP_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = call float @llvm.R600.load.input(i32 1) + %r2 = call float @llvm.pow.f32( float %r0, float %r1) + call void @llvm.AMDGPU.store.output(float %r2, i32 0) + ret void +} + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) + +declare float @llvm.pow.f32(float ,float ) readonly diff --git a/test/CodeGen/R600/llvm.sin.ll b/test/CodeGen/R600/llvm.sin.ll new file mode 100644 index 0000000000..5cd6998c93 --- /dev/null +++ b/test/CodeGen/R600/llvm.sin.ll @@ -0,0 +1,16 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: SIN T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = call float @llvm.sin.f32( float %r0) + call void @llvm.AMDGPU.store.output(float %r1, i32 0) + ret void +} + +declare float @llvm.sin.f32(float) readnone + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) diff --git a/test/CodeGen/R600/load.constant_addrspace.f32.ll b/test/CodeGen/R600/load.constant_addrspace.f32.ll new file mode 100644 index 0000000000..93627283bb --- /dev/null +++ b/test/CodeGen/R600/load.constant_addrspace.f32.ll @@ -0,0 +1,9 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: VTX_READ_32 T{{[0-9]+\.X, T[0-9]+\.X}} + +define void @test(float addrspace(1)* %out, float addrspace(2)* %in) { + %1 = load float addrspace(2)* %in + store float %1, float addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/load.i8.ll b/test/CodeGen/R600/load.i8.ll new file mode 100644 index 0000000000..b070dcd520 --- /dev/null +++ b/test/CodeGen/R600/load.i8.ll @@ -0,0 +1,10 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}} + +define void @test(i32 addrspace(1)* %out, i8 addrspace(1)* %in) { + %1 = load i8 addrspace(1)* %in + %2 = zext i8 %1 to i32 + store i32 %2, i32 addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/reciprocal.ll b/test/CodeGen/R600/reciprocal.ll new file mode 100644 index 0000000000..6838c1ae36 --- /dev/null +++ b/test/CodeGen/R600/reciprocal.ll @@ -0,0 +1,16 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: RECIP_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test() { + %r0 = call float @llvm.R600.load.input(i32 0) + %r1 = fdiv float 1.0, %r0 + call void @llvm.AMDGPU.store.output(float %r1, i32 0) + ret void +} + +declare float @llvm.R600.load.input(i32) readnone + +declare void @llvm.AMDGPU.store.output(float, i32) + +declare float @llvm.AMDGPU.rcp(float ) readnone diff --git a/test/CodeGen/R600/sdiv.ll b/test/CodeGen/R600/sdiv.ll new file mode 100644 index 0000000000..3556facfba --- /dev/null +++ b/test/CodeGen/R600/sdiv.ll @@ -0,0 +1,21 @@ +; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +; The code generated by sdiv is long and complex and may frequently change. +; The goal of this test is to make sure the ISel doesn't fail. +; +; This program was previously failing to compile when one of the selectcc +; opcodes generated by the sdiv lowering was being legalized and optimized to: +; selectcc Remainder -1, 0, -1, SETGT +; This was fixed by adding an additional pattern in R600Instructions.td to +; match this pattern with a CNDGE_INT. + +; CHECK: RETURN + +define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { + %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1 + %num = load i32 addrspace(1) * %in + %den = load i32 addrspace(1) * %den_ptr + %result = sdiv i32 %num, %den + store i32 %result, i32 addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/selectcc-icmp-select-float.ll b/test/CodeGen/R600/selectcc-icmp-select-float.ll new file mode 100644 index 0000000000..f65a30086e --- /dev/null +++ b/test/CodeGen/R600/selectcc-icmp-select-float.ll @@ -0,0 +1,15 @@ +; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +; Note additional optimizations may cause this SGT to be replaced with a +; CND* instruction. +; CHECK: SGT_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], literal.x, -1}} +; Test a selectcc with i32 LHS/RHS and float True/False + +define void @test(float addrspace(1)* %out, i32 addrspace(1)* %in) { +entry: + %0 = load i32 addrspace(1)* %in + %1 = icmp sge i32 %0, 0 + %2 = select i1 %1, float 1.0, float 0.0 + store float %2, float addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/selectcc_cnde.ll b/test/CodeGen/R600/selectcc_cnde.ll new file mode 100644 index 0000000000..f0a0f512ba --- /dev/null +++ b/test/CodeGen/R600/selectcc_cnde.ll @@ -0,0 +1,11 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK-NOT: SETE +;CHECK: CNDE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], 1.0, literal.x, [-0-9]+\(2.0}} +define void @test(float addrspace(1)* %out, float addrspace(1)* %in) { + %1 = load float addrspace(1)* %in + %2 = fcmp oeq float %1, 0.0 + %3 = select i1 %2, float 1.0, float 2.0 + store float %3, float addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/selectcc_cnde_int.ll b/test/CodeGen/R600/selectcc_cnde_int.ll new file mode 100644 index 0000000000..b38078e26d --- /dev/null +++ b/test/CodeGen/R600/selectcc_cnde_int.ll @@ -0,0 +1,11 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK-NOT: SETE_INT +;CHECK: CNDE_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], 1, literal.x, 2}} +define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { + %1 = load i32 addrspace(1)* %in + %2 = icmp eq i32 %1, 0 + %3 = select i1 %2, i32 1, i32 2 + store i32 %3, i32 addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/setcc.v4i32.ll b/test/CodeGen/R600/setcc.v4i32.ll new file mode 100644 index 0000000000..0752f2e63d --- /dev/null +++ b/test/CodeGen/R600/setcc.v4i32.ll @@ -0,0 +1,12 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s +;CHECK: SETE_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { + %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1 + %a = load <4 x i32> addrspace(1) * %in + %b = load <4 x i32> addrspace(1) * %b_ptr + %result = icmp eq <4 x i32> %a, %b + %sext = sext <4 x i1> %result to <4 x i32> + store <4 x i32> %sext, <4 x i32> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/short-args.ll b/test/CodeGen/R600/short-args.ll new file mode 100644 index 0000000000..107025045c --- /dev/null +++ b/test/CodeGen/R600/short-args.ll @@ -0,0 +1,37 @@ +; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +; CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}} + +define void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) nounwind { +entry: + %0 = zext i8 %in to i32 + store i32 %0, i32 addrspace(1)* %out, align 4 + ret void +} + +; CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}} + +define void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zeroext %in) nounwind { +entry: + %0 = zext i8 %in to i32 + store i32 %0, i32 addrspace(1)* %out, align 4 + ret void +} + +; CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}} + +define void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) nounwind { +entry: + %0 = zext i16 %in to i32 + store i32 %0, i32 addrspace(1)* %out, align 4 + ret void +} + +; CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}} + +define void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 zeroext %in) nounwind { +entry: + %0 = zext i16 %in to i32 + store i32 %0, i32 addrspace(1)* %out, align 4 + ret void +} diff --git a/test/CodeGen/R600/store.v4f32.ll b/test/CodeGen/R600/store.v4f32.ll new file mode 100644 index 0000000000..8b0d244459 --- /dev/null +++ b/test/CodeGen/R600/store.v4f32.ll @@ -0,0 +1,9 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: RAT_WRITE_CACHELESS_128 T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1 + +define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { + %1 = load <4 x float> addrspace(1) * %in + store <4 x float> %1, <4 x float> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/store.v4i32.ll b/test/CodeGen/R600/store.v4i32.ll new file mode 100644 index 0000000000..a659815dde --- /dev/null +++ b/test/CodeGen/R600/store.v4i32.ll @@ -0,0 +1,9 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;CHECK: RAT_WRITE_CACHELESS_128 T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1 + +define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { + %1 = load <4 x i32> addrspace(1) * %in + store <4 x i32> %1, <4 x i32> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/udiv.v4i32.ll b/test/CodeGen/R600/udiv.v4i32.ll new file mode 100644 index 0000000000..47657a6be7 --- /dev/null +++ b/test/CodeGen/R600/udiv.v4i32.ll @@ -0,0 +1,15 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;The code generated by udiv is long and complex and may frequently change. +;The goal of this test is to make sure the ISel doesn't fail when it gets +;a v4i32 udiv +;CHECK: RETURN + +define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { + %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1 + %a = load <4 x i32> addrspace(1) * %in + %b = load <4 x i32> addrspace(1) * %b_ptr + %result = udiv <4 x i32> %a, %b + store <4 x i32> %result, <4 x i32> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/urem.v4i32.ll b/test/CodeGen/R600/urem.v4i32.ll new file mode 100644 index 0000000000..2e7388caa6 --- /dev/null +++ b/test/CodeGen/R600/urem.v4i32.ll @@ -0,0 +1,15 @@ +;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +;The code generated by urem is long and complex and may frequently change. +;The goal of this test is to make sure the ISel doesn't fail when it gets +;a v4i32 urem +;CHECK: RETURN + +define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { + %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1 + %a = load <4 x i32> addrspace(1) * %in + %b = load <4 x i32> addrspace(1) * %b_ptr + %result = urem <4 x i32> %a, %b + store <4 x i32> %result, <4 x i32> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/vec4-expand.ll b/test/CodeGen/R600/vec4-expand.ll new file mode 100644 index 0000000000..c61f6e25b5 --- /dev/null +++ b/test/CodeGen/R600/vec4-expand.ll @@ -0,0 +1,49 @@ +; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s + +; CHECK: FLT_TO_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; CHECK: FLT_TO_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; CHECK: FLT_TO_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; CHECK: FLT_TO_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @fp_to_sint(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { + %value = load <4 x float> addrspace(1) * %in + %result = fptosi <4 x float> %value to <4 x i32> + store <4 x i32> %result, <4 x i32> addrspace(1)* %out + ret void +} + +; CHECK: FLT_TO_UINT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; CHECK: FLT_TO_UINT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; CHECK: FLT_TO_UINT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; CHECK: FLT_TO_UINT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @fp_to_uint(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { + %value = load <4 x float> addrspace(1) * %in + %result = fptoui <4 x float> %value to <4 x i32> + store <4 x i32> %result, <4 x i32> addrspace(1)* %out + ret void +} + +; CHECK: INT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; CHECK: INT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; CHECK: INT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; CHECK: INT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @sint_to_fp(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { + %value = load <4 x i32> addrspace(1) * %in + %result = sitofp <4 x i32> %value to <4 x float> + store <4 x float> %result, <4 x float> addrspace(1)* %out + ret void +} + +; CHECK: UINT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; CHECK: UINT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; CHECK: UINT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; CHECK: UINT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} + +define void @uint_to_fp(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { + %value = load <4 x i32> addrspace(1) * %in + %result = uitofp <4 x i32> %value to <4 x float> + store <4 x float> %result, <4 x float> addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/SI/sanity.ll b/test/CodeGen/SI/sanity.ll new file mode 100644 index 0000000000..62cdcf5eca --- /dev/null +++ b/test/CodeGen/SI/sanity.ll @@ -0,0 +1,37 @@ +;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s + +; CHECK: S_ENDPGM + +define void @main() { +main_body: + call void @llvm.AMDGPU.shader.type(i32 1) + %0 = load <4 x i32> addrspace(2)* addrspace(8)* inttoptr (i32 6 to <4 x i32> addrspace(2)* addrspace(8)*) + %1 = getelementptr <4 x i32> addrspace(2)* %0, i32 0 + %2 = load <4 x i32> addrspace(2)* %1 + %3 = call i32 @llvm.SI.vs.load.buffer.index() + %4 = call <4 x float> @llvm.SI.vs.load.input(<4 x i32> %2, i32 0, i32 %3) + %5 = extractelement <4 x float> %4, i32 0 + %6 = extractelement <4 x float> %4, i32 1 + %7 = extractelement <4 x float> %4, i32 2 + %8 = extractelement <4 x float> %4, i32 3 + %9 = load <4 x i32> addrspace(2)* addrspace(8)* inttoptr (i32 6 to <4 x i32> addrspace(2)* addrspace(8)*) + %10 = getelementptr <4 x i32> addrspace(2)* %9, i32 1 + %11 = load <4 x i32> addrspace(2)* %10 + %12 = call i32 @llvm.SI.vs.load.buffer.index() + %13 = call <4 x float> @llvm.SI.vs.load.input(<4 x i32> %11, i32 0, i32 %12) + %14 = extractelement <4 x float> %13, i32 0 + %15 = extractelement <4 x float> %13, i32 1 + %16 = extractelement <4 x float> %13, i32 2 + %17 = extractelement <4 x float> %13, i32 3 + call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 32, i32 0, float %14, float %15, float %16, float %17) + call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %5, float %6, float %7, float %8) + ret void +} + +declare void @llvm.AMDGPU.shader.type(i32) + +declare i32 @llvm.SI.vs.load.buffer.index() readnone + +declare <4 x float> @llvm.SI.vs.load.input(<4 x i32>, i32, i32) + +declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float) diff --git a/test/CodeGen/Thumb2/thumb2-shifter.ll b/test/CodeGen/Thumb2/thumb2-shifter.ll index 98854a1205..05dd90cfbf 100644 --- a/test/CodeGen/Thumb2/thumb2-shifter.ll +++ b/test/CodeGen/Thumb2/thumb2-shifter.ll @@ -1,24 +1,27 @@ -; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2xtpk | FileCheck %s +; RUN: llc < %s -march=thumb -mcpu=cortex-a8 | FileCheck %s --check-prefix=A8 +; RUN: llc < %s -march=thumb -mcpu=swift | FileCheck %s --check-prefix=SWIFT + +; rdar://12892707 define i32 @t2ADDrs_lsl(i32 %X, i32 %Y) { -; CHECK: t2ADDrs_lsl -; CHECK: add.w r0, r0, r1, lsl #16 +; A8: t2ADDrs_lsl +; A8: add.w r0, r0, r1, lsl #16 %A = shl i32 %Y, 16 %B = add i32 %X, %A ret i32 %B } define i32 @t2ADDrs_lsr(i32 %X, i32 %Y) { -; CHECK: t2ADDrs_lsr -; CHECK: add.w r0, r0, r1, lsr #16 +; A8: t2ADDrs_lsr +; A8: add.w r0, r0, r1, lsr #16 %A = lshr i32 %Y, 16 %B = add i32 %X, %A ret i32 %B } define i32 @t2ADDrs_asr(i32 %X, i32 %Y) { -; CHECK: t2ADDrs_asr -; CHECK: add.w r0, r0, r1, asr #16 +; A8: t2ADDrs_asr +; A8: add.w r0, r0, r1, asr #16 %A = ashr i32 %Y, 16 %B = add i32 %X, %A ret i32 %B @@ -26,8 +29,8 @@ define i32 @t2ADDrs_asr(i32 %X, i32 %Y) { ; i32 ror(n) = (x >> n) | (x << (32 - n)) define i32 @t2ADDrs_ror(i32 %X, i32 %Y) { -; CHECK: t2ADDrs_ror -; CHECK: add.w r0, r0, r1, ror #16 +; A8: t2ADDrs_ror +; A8: add.w r0, r0, r1, ror #16 %A = lshr i32 %Y, 16 %B = shl i32 %Y, 16 %C = or i32 %B, %A @@ -36,13 +39,66 @@ define i32 @t2ADDrs_ror(i32 %X, i32 %Y) { } define i32 @t2ADDrs_noRegShift(i32 %X, i32 %Y, i8 %sh) { -; CHECK: t2ADDrs_noRegShift -; CHECK: uxtb r2, r2 -; CHECK: lsls r1, r2 -; CHECK: add r0, r1 +; A8: t2ADDrs_noRegShift +; A8: uxtb r2, r2 +; A8: lsls r1, r2 +; A8: add r0, r1 + +; SWIFT: t2ADDrs_noRegShift +; SWIFT-NOT: lsls +; SWIFT: lsl.w + %shift.upgrd.1 = zext i8 %sh to i32 + %A = shl i32 %Y, %shift.upgrd.1 + %B = add i32 %X, %A + ret i32 %B +} + +define i32 @t2ADDrs_noRegShift2(i32 %X, i32 %Y, i8 %sh) { +; A8: t2ADDrs_noRegShift2 +; A8: uxtb r2, r2 +; A8: lsrs r1, r2 +; A8: add r0, r1 + +; SWIFT: t2ADDrs_noRegShift2 +; SWIFT-NOT: lsrs +; SWIFT: lsr.w + %shift.upgrd.1 = zext i8 %sh to i32 + %A = lshr i32 %Y, %shift.upgrd.1 + %B = add i32 %X, %A + ret i32 %B +} + +define i32 @t2ADDrs_noRegShift3(i32 %X, i32 %Y, i8 %sh) { +; A8: t2ADDrs_noRegShift3 +; A8: uxtb r2, r2 +; A8: asrs r1, r2 +; A8: add r0, r1 + +; SWIFT: t2ADDrs_noRegShift3 +; SWIFT-NOT: asrs +; SWIFT: asr.w + %shift.upgrd.1 = zext i8 %sh to i32 + %A = ashr i32 %Y, %shift.upgrd.1 + %B = add i32 %X, %A + ret i32 %B +} + +define i32 @t2ADDrs_optsize(i32 %X, i32 %Y, i8 %sh) optsize { +; SWIFT: t2ADDrs_optsize +; SWIFT-NOT: lsl.w +; SWIFT: lsls %shift.upgrd.1 = zext i8 %sh to i32 %A = shl i32 %Y, %shift.upgrd.1 %B = add i32 %X, %A ret i32 %B } +define i32 @t2ADDrs_minsize(i32 %X, i32 %Y, i8 %sh) minsize { +; SWIFT: t2ADDrs_minsize +; SWIFT-NOT: lsr.w +; SWIFT: lsrs + %shift.upgrd.1 = zext i8 %sh to i32 + %A = lshr i32 %Y, %shift.upgrd.1 + %B = add i32 %X, %A + ret i32 %B +} diff --git a/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll b/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll index 19a73543c6..fc38135032 100644 --- a/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll +++ b/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movups | count 2 +; RUN: llc < %s -march=x86 -mcpu=penryn | FileCheck %s define void @a(<4 x float>* %x) nounwind { entry: @@ -8,4 +8,10 @@ entry: ret void } +; CHECK: a: +; CHECK: movups +; CHECK: movups +; CHECK-NOT: movups +; CHECK: ret + declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) diff --git a/test/CodeGen/X86/2010-08-10-DbgConstant.ll b/test/CodeGen/X86/2010-08-10-DbgConstant.ll deleted file mode 100644 index b3cc35d723..0000000000 --- a/test/CodeGen/X86/2010-08-10-DbgConstant.ll +++ /dev/null @@ -1,25 +0,0 @@ -; RUN: llc -mtriple=i686-linux -O0 < %s | FileCheck %s -; CHECK: DW_TAG_constant -; CHECK-NEXT: .long .Lstring3 #{{#?}} DW_AT_name - -define void @foo() nounwind ssp { -entry: - call void @bar(i32 201), !dbg !8 - ret void, !dbg !8 -} - -declare void @bar(i32) - -!llvm.dbg.sp = !{!0} -!llvm.dbg.gv = !{!5} - -!0 = metadata !{i32 524334, i32 0, metadata !1, metadata !"foo", metadata !"foo", metadata !"foo", metadata !1, i32 3, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 false, void ()* @foo} ; [ DW_TAG_subprogram ] -!1 = metadata !{i32 524329, metadata !"/tmp/l.c", metadata !"/Volumes/Lalgate/clean/D", metadata !2} ; [ DW_TAG_file_type ] -!2 = metadata !{i32 524305, i32 0, i32 12, metadata !"/tmp/l.c", metadata !"/Volumes/Lalgate/clean/D", metadata !"clang 2.8", i1 true, i1 false, metadata !"", i32 0} ; [ DW_TAG_compile_unit ] -!3 = metadata !{i32 524309, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_subroutine_type ] -!4 = metadata !{null} -!5 = metadata !{i32 524327, i32 0, metadata !1, metadata !"ro", metadata !"ro", metadata !"ro", metadata !1, i32 1, metadata !6, i1 true, i1 true, i32 201} ; [ DW_TAG_constant ] -!6 = metadata !{i32 524326, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !7} ; [ DW_TAG_const_type ] -!7 = metadata !{i32 524324, metadata !1, metadata !"unsigned int", metadata !1, i32 0, i64 32, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ] -!8 = metadata !{i32 3, i32 14, metadata !9, null} -!9 = metadata !{i32 524299, metadata !0, i32 3, i32 12, metadata !1, i32 0} ; [ DW_TAG_lexical_block ] diff --git a/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll b/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll index a7207537de..da734d4b64 100644 --- a/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll +++ b/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll @@ -16,8 +16,8 @@ target triple = "x86_64-unknown-linux-gnu" ; CHECK: main define i32 @main() nounwind uwtable { entry: -; CHECK: movsbq j(%rip), % -; CHECK: movsbq i(%rip), % +; CHECK: pmovsxbq j(%rip), % +; CHECK: pmovsxbq i(%rip), % %0 = load <2 x i8>* @i, align 8 %1 = load <2 x i8>* @j, align 8 %div = sdiv <2 x i8> %1, %0 diff --git a/test/CodeGen/X86/2012-12-12-DAGCombineCrash.ll b/test/CodeGen/X86/2012-12-12-DAGCombineCrash.ll new file mode 100644 index 0000000000..8cef2c8201 --- /dev/null +++ b/test/CodeGen/X86/2012-12-12-DAGCombineCrash.ll @@ -0,0 +1,46 @@ +; RUN: llc -march=x86 -mtriple=i686-apple-ios -mcpu=yonah < %s +; rdar://12868039 + +define void @t() nounwind ssp { + %1 = alloca i32 + %2 = ptrtoint i32* %1 to i32 + br label %3 + +; <label>:3 ; preds = %5, %3, %0 + switch i32 undef, label %3 [ + i32 611946160, label %5 + i32 954117870, label %4 + ] + +; <label>:4 ; preds = %3 + ret void + +; <label>:5 ; preds = %5, %3 + %6 = add i32 0, 148 + %7 = and i32 %6, 48 + %8 = add i32 %7, 0 + %9 = or i32 %2, %8 + %10 = xor i32 -1, %2 + %11 = or i32 %8, %10 + %12 = or i32 %9, %11 + %13 = xor i32 %9, %11 + %14 = sub i32 %12, %13 + %15 = xor i32 2044674005, %14 + %16 = xor i32 %15, 0 + %17 = shl nuw nsw i32 %16, 1 + %18 = sub i32 0, %17 + %19 = and i32 %18, 2051242402 + %20 = sub i32 0, %19 + %21 = xor i32 %20, 0 + %22 = xor i32 %21, 0 + %23 = add i32 0, %22 + %24 = shl i32 %23, 1 + %25 = or i32 1, %24 + %26 = add i32 0, %25 + %27 = trunc i32 %26 to i8 + %28 = xor i8 %27, 125 + %29 = add i8 %28, -16 + %30 = add i8 0, %29 + store i8 %30, i8* null + br i1 undef, label %5, label %3 +} diff --git a/test/CodeGen/X86/2012-12-14-v8fp80-crash.ll b/test/CodeGen/X86/2012-12-14-v8fp80-crash.ll new file mode 100644 index 0000000000..c465527bd8 --- /dev/null +++ b/test/CodeGen/X86/2012-12-14-v8fp80-crash.ll @@ -0,0 +1,22 @@ +; RUN: llc < %s -march=x86 -mcpu=corei7 -mtriple=i686-pc-win32 + +; Make sure we don't crash on this testcase. + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.9.0" + +define void @_ZN6VectorIfE3equIeEEvfRKS_IT_E() nounwind uwtable ssp align 2 { +entry: + br i1 undef, label %while.end, label %while.body.lr.ph + +while.body.lr.ph: ; preds = %entry + br label %vector.body + +vector.body: ; preds = %vector.body, %while.body.lr.ph + %0 = fptrunc <8 x x86_fp80> undef to <8 x float> + store <8 x float> %0, <8 x float>* undef, align 4 + br label %vector.body + +while.end: ; preds = %entry + ret void +} diff --git a/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll b/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll new file mode 100644 index 0000000000..3025665206 --- /dev/null +++ b/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll @@ -0,0 +1,17 @@ +; RUN: llc -mtriple=x86_64-apple-macosx10.8.0 -mcpu=core2 < %s | FileCheck %s +; Test that we do not introduce vector operations with noimplicitfloat. +; rdar://12879313 + +%struct1 = type { i32*, i32* } + +define void @test() nounwind noimplicitfloat { +entry: +; CHECK-NOT: xmm +; CHECK: ret + %0 = load %struct1** undef, align 8 + %1 = getelementptr inbounds %struct1* %0, i64 0, i32 0 + store i32* null, i32** %1, align 8 + %2 = getelementptr inbounds %struct1* %0, i64 0, i32 1 + store i32* null, i32** %2, align 8 + ret void +} diff --git a/test/CodeGen/X86/2013-01-09-DAGCombineBug.ll b/test/CodeGen/X86/2013-01-09-DAGCombineBug.ll new file mode 100644 index 0000000000..db7ec8ae26 --- /dev/null +++ b/test/CodeGen/X86/2013-01-09-DAGCombineBug.ll @@ -0,0 +1,41 @@ +; RUN: llc -mtriple=x86_64-apple-macosx10.5.0 < %s + +; rdar://12968664 + +define void @t() nounwind uwtable ssp { + br label %4 + +; <label>:1 ; preds = %4, %2 + ret void + +; <label>:2 ; preds = %6, %5, %3, %2 + switch i32 undef, label %2 [ + i32 1090573978, label %1 + i32 1090573938, label %3 + i32 1090573957, label %5 + ] + +; <label>:3 ; preds = %4, %2 + br i1 undef, label %2, label %4 + +; <label>:4 ; preds = %6, %5, %3, %0 + switch i32 undef, label %11 [ + i32 1090573938, label %3 + i32 1090573957, label %5 + i32 1090573978, label %1 + i32 165205179, label %6 + ] + +; <label>:5 ; preds = %4, %2 + br i1 undef, label %2, label %4 + +; <label>:6 ; preds = %4 + %7 = icmp eq i32 undef, 590901838 + %8 = or i1 false, %7 + %9 = or i1 true, %8 + %10 = xor i1 %8, %9 + br i1 %10, label %4, label %2 + +; <label>:11 ; preds = %11, %4 + br label %11 +} diff --git a/test/CodeGen/X86/WidenArith.ll b/test/CodeGen/X86/WidenArith.ll new file mode 100644 index 0000000000..0383bd665b --- /dev/null +++ b/test/CodeGen/X86/WidenArith.ll @@ -0,0 +1,23 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck %s + +;CHECK: test +;CHECK: vaddps +;CHECK: vmulps +;CHECK: vsubps +;CHECK: vcmpltps +;CHECK: vcmpltps +;CHECK: vandps +;CHECK: vandps +;CHECK: ret +define <8 x i32> @test(<8 x float> %a, <8 x float> %b) { + %c1 = fadd <8 x float> %a, %b + %b1 = fmul <8 x float> %b, %a + %d = fsub <8 x float> %b1, %c1 + %res1 = fcmp olt <8 x float> %a, %b1 + %res2 = fcmp olt <8 x float> %c1, %d + %andr = and <8 x i1>%res1, %res2 + %ex = zext <8 x i1> %andr to <8 x i32> + ret <8 x i32>%ex +} + + diff --git a/test/CodeGen/X86/atom-bypass-slow-division.ll b/test/CodeGen/X86/atom-bypass-slow-division.ll index e7c9605d3e..453e72672b 100644 --- a/test/CodeGen/X86/atom-bypass-slow-division.ll +++ b/test/CodeGen/X86/atom-bypass-slow-division.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -mcpu=atom -mtriple=i686-linux | FileCheck %s -define i32 @test_get_quotient(i32 %a, i32 %b) nounwind { -; CHECK: test_get_quotient +define i32 @Test_get_quotient(i32 %a, i32 %b) nounwind { +; CHECK: Test_get_quotient: ; CHECK: orl %ecx, %edx ; CHECK-NEXT: testl $-256, %edx ; CHECK-NEXT: je @@ -13,8 +13,8 @@ define i32 @test_get_quotient(i32 %a, i32 %b) nounwind { ret i32 %result } -define i32 @test_get_remainder(i32 %a, i32 %b) nounwind { -; CHECK: test_get_remainder +define i32 @Test_get_remainder(i32 %a, i32 %b) nounwind { +; CHECK: Test_get_remainder: ; CHECK: orl %ecx, %edx ; CHECK-NEXT: testl $-256, %edx ; CHECK-NEXT: je @@ -26,8 +26,8 @@ define i32 @test_get_remainder(i32 %a, i32 %b) nounwind { ret i32 %result } -define i32 @test_get_quotient_and_remainder(i32 %a, i32 %b) nounwind { -; CHECK: test_get_quotient_and_remainder +define i32 @Test_get_quotient_and_remainder(i32 %a, i32 %b) nounwind { +; CHECK: Test_get_quotient_and_remainder: ; CHECK: orl %ecx, %edx ; CHECK-NEXT: testl $-256, %edx ; CHECK-NEXT: je @@ -35,7 +35,7 @@ define i32 @test_get_quotient_and_remainder(i32 %a, i32 %b) nounwind { ; CHECK: divb ; CHECK: addl ; CHECK: ret -; CEECK-NOT: idivl +; CHECK-NOT: idivl ; CHECK-NOT: divb %resultdiv = sdiv i32 %a, %b %resultrem = srem i32 %a, %b @@ -43,8 +43,8 @@ define i32 @test_get_quotient_and_remainder(i32 %a, i32 %b) nounwind { ret i32 %result } -define i32 @test_use_div_and_idiv(i32 %a, i32 %b) nounwind { -; CHECK: test_use_div_and_idiv +define i32 @Test_use_div_and_idiv(i32 %a, i32 %b) nounwind { +; CHECK: Test_use_div_and_idiv: ; CHECK: idivl ; CHECK: divb ; CHECK: divl @@ -57,34 +57,34 @@ define i32 @test_use_div_and_idiv(i32 %a, i32 %b) nounwind { ret i32 %result } -define i32 @test_use_div_imm_imm() nounwind { -; CHECK: test_use_div_imm_imm +define i32 @Test_use_div_imm_imm() nounwind { +; CHECK: Test_use_div_imm_imm: ; CHECK: movl $64 %resultdiv = sdiv i32 256, 4 ret i32 %resultdiv } -define i32 @test_use_div_reg_imm(i32 %a) nounwind { -; CHECK: test_use_div_reg_imm -; CEHCK-NOT: test +define i32 @Test_use_div_reg_imm(i32 %a) nounwind { +; CHECK: Test_use_div_reg_imm: +; CHECK-NOT: test ; CHECK-NOT: idiv ; CHECK-NOT: divb %resultdiv = sdiv i32 %a, 33 ret i32 %resultdiv } -define i32 @test_use_rem_reg_imm(i32 %a) nounwind { -; CHECK: test_use_rem_reg_imm -; CEHCK-NOT: test +define i32 @Test_use_rem_reg_imm(i32 %a) nounwind { +; CHECK: Test_use_rem_reg_imm: +; CHECK-NOT: test ; CHECK-NOT: idiv ; CHECK-NOT: divb %resultrem = srem i32 %a, 33 ret i32 %resultrem } -define i32 @test_use_divrem_reg_imm(i32 %a) nounwind { -; CHECK: test_use_divrem_reg_imm -; CEHCK-NOT: test +define i32 @Test_use_divrem_reg_imm(i32 %a) nounwind { +; CHECK: Test_use_divrem_reg_imm: +; CHECK-NOT: test ; CHECK-NOT: idiv ; CHECK-NOT: divb %resultdiv = sdiv i32 %a, 33 @@ -93,8 +93,8 @@ define i32 @test_use_divrem_reg_imm(i32 %a) nounwind { ret i32 %result } -define i32 @test_use_div_imm_reg(i32 %a) nounwind { -; CHECK: test_use_div_imm_reg +define i32 @Test_use_div_imm_reg(i32 %a) nounwind { +; CHECK: Test_use_div_imm_reg: ; CHECK: test ; CHECK: idiv ; CHECK: divb @@ -102,8 +102,8 @@ define i32 @test_use_div_imm_reg(i32 %a) nounwind { ret i32 %resultdiv } -define i32 @test_use_rem_imm_reg(i32 %a) nounwind { -; CHECK: test_use_rem_imm_reg +define i32 @Test_use_rem_imm_reg(i32 %a) nounwind { +; CHECK: Test_use_rem_imm_reg: ; CHECK: test ; CHECK: idiv ; CHECK: divb diff --git a/test/CodeGen/X86/atom-pad-short-functions.ll b/test/CodeGen/X86/atom-pad-short-functions.ll new file mode 100644 index 0000000000..b9a39e08cb --- /dev/null +++ b/test/CodeGen/X86/atom-pad-short-functions.ll @@ -0,0 +1,103 @@ +; RUN: llc < %s -O1 -mcpu=atom -mtriple=i686-linux | FileCheck %s + +declare void @external_function(...) + +define i32 @test_return_val(i32 %a) nounwind { +; CHECK: test_return_val +; CHECK: movl +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: ret + ret i32 %a +} + +define i32 @test_optsize(i32 %a) nounwind optsize { +; CHECK: test_optsize +; CHECK: movl +; CHECK-NEXT: ret + ret i32 %a +} + +define i32 @test_minsize(i32 %a) nounwind minsize { +; CHECK: test_minsize +; CHECK: movl +; CHECK-NEXT: ret + ret i32 %a +} + +define i32 @test_add(i32 %a, i32 %b) nounwind { +; CHECK: test_add +; CHECK: addl +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: ret + %result = add i32 %a, %b + ret i32 %result +} + +define i32 @test_multiple_ret(i32 %a, i32 %b, i1 %c) nounwind { +; CHECK: @test_multiple_ret +; CHECK: je + +; CHECK: nop +; CHECK: nop +; CHECK: ret + +; CHECK: nop +; CHECK: nop +; CHECK: ret + + br i1 %c, label %bb1, label %bb2 + +bb1: + ret i32 %a + +bb2: + ret i32 %b +} + +define void @test_call_others(i32 %x) nounwind +{ +; CHECK: test_call_others +; CHECK: je + %tobool = icmp eq i32 %x, 0 + br i1 %tobool, label %if.end, label %true.case + +; CHECK: jmp external_function +true.case: + tail call void bitcast (void (...)* @external_function to void ()*)() nounwind + br label %if.end + +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: ret +if.end: + ret void + +} + +define void @test_branch_to_same_bb(i32 %x, i32 %y) nounwind { +; CHECK: @test_branch_to_same_bb + %cmp = icmp sgt i32 %x, 0 + br i1 %cmp, label %while.cond, label %while.end + +while.cond: + br label %while.cond + +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: ret +while.end: + ret void +} + diff --git a/test/CodeGen/X86/avx-cvt.ll b/test/CodeGen/X86/avx-cvt.ll index d0a7fe0100..62bdea2b49 100644 --- a/test/CodeGen/X86/avx-cvt.ll +++ b/test/CodeGen/X86/avx-cvt.ll @@ -46,7 +46,7 @@ entry: ret double %conv } -; CHECK: vcvtsi2sd (% +; CHECK: vcvtsi2sdl (% define double @funcB(i32* nocapture %e) nounwind uwtable readonly ssp { entry: %tmp1 = load i32* %e, align 4 @@ -54,7 +54,7 @@ entry: ret double %conv } -; CHECK: vcvtsi2ss (% +; CHECK: vcvtsi2ssl (% define float @funcC(i32* nocapture %e) nounwind uwtable readonly ssp { entry: %tmp1 = load i32* %e, align 4 diff --git a/test/CodeGen/X86/avx-sext.ll b/test/CodeGen/X86/avx-sext.ll index 3713a8c377..8d7d79db7d 100755 --- a/test/CodeGen/X86/avx-sext.ll +++ b/test/CodeGen/X86/avx-sext.ll @@ -1,17 +1,144 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck %s -check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=SSSE3 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=pentium4 | FileCheck %s -check-prefix=SSE2 define <8 x i32> @sext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp { -;CHECK: sext_8i16_to_8i32 -;CHECK: vpmovsxwd +; AVX: sext_8i16_to_8i32 +; AVX: vpmovsxwd %B = sext <8 x i16> %A to <8 x i32> ret <8 x i32>%B } define <4 x i64> @sext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp { -;CHECK: sext_4i32_to_4i64 -;CHECK: vpmovsxdq +; AVX: sext_4i32_to_4i64 +; AVX: vpmovsxdq %B = sext <4 x i32> %A to <4 x i64> ret <4 x i64>%B } + +; AVX: load_sext_test1 +; AVX: vpmovsxwd (%r{{[^,]*}}), %xmm{{.*}} +; AVX: ret + +; SSSE3: load_sext_test1 +; SSSE3: movq +; SSSE3: punpcklwd %xmm{{.*}}, %xmm{{.*}} +; SSSE3: psrad $16 +; SSSE3: ret + +; SSE2: load_sext_test1 +; SSE2: movq +; SSE2: punpcklwd %xmm{{.*}}, %xmm{{.*}} +; SSE2: psrad $16 +; SSE2: ret +define <4 x i32> @load_sext_test1(<4 x i16> *%ptr) { + %X = load <4 x i16>* %ptr + %Y = sext <4 x i16> %X to <4 x i32> + ret <4 x i32>%Y +} + +; AVX: load_sext_test2 +; AVX: vpmovsxbd (%r{{[^,]*}}), %xmm{{.*}} +; AVX: ret + +; SSSE3: load_sext_test2 +; SSSE3: movd +; SSSE3: pshufb +; SSSE3: psrad $24 +; SSSE3: ret + +; SSE2: load_sext_test2 +; SSE2: movl +; SSE2: psrad $24 +; SSE2: ret +define <4 x i32> @load_sext_test2(<4 x i8> *%ptr) { + %X = load <4 x i8>* %ptr + %Y = sext <4 x i8> %X to <4 x i32> + ret <4 x i32>%Y +} + +; AVX: load_sext_test3 +; AVX: vpmovsxbq (%r{{[^,]*}}), %xmm{{.*}} +; AVX: ret + +; SSSE3: load_sext_test3 +; SSSE3: movsbq +; SSSE3: movsbq +; SSSE3: punpcklqdq +; SSSE3: ret + +; SSE2: load_sext_test3 +; SSE2: movsbq +; SSE2: movsbq +; SSE2: punpcklqdq +; SSE2: ret +define <2 x i64> @load_sext_test3(<2 x i8> *%ptr) { + %X = load <2 x i8>* %ptr + %Y = sext <2 x i8> %X to <2 x i64> + ret <2 x i64>%Y +} + +; AVX: load_sext_test4 +; AVX: vpmovsxwq (%r{{[^,]*}}), %xmm{{.*}} +; AVX: ret + +; SSSE3: load_sext_test4 +; SSSE3: movswq +; SSSE3: movswq +; SSSE3: punpcklqdq +; SSSE3: ret + +; SSE2: load_sext_test4 +; SSE2: movswq +; SSE2: movswq +; SSE2: punpcklqdq +; SSE2: ret +define <2 x i64> @load_sext_test4(<2 x i16> *%ptr) { + %X = load <2 x i16>* %ptr + %Y = sext <2 x i16> %X to <2 x i64> + ret <2 x i64>%Y +} + +; AVX: load_sext_test5 +; AVX: vpmovsxdq (%r{{[^,]*}}), %xmm{{.*}} +; AVX: ret + +; SSSE3: load_sext_test5 +; SSSE3: movslq +; SSSE3: movslq +; SSSE3: punpcklqdq +; SSSE3: ret + +; SSE2: load_sext_test5 +; SSE2: movslq +; SSE2: movslq +; SSE2: punpcklqdq +; SSE2: ret +define <2 x i64> @load_sext_test5(<2 x i32> *%ptr) { + %X = load <2 x i32>* %ptr + %Y = sext <2 x i32> %X to <2 x i64> + ret <2 x i64>%Y +} + +; AVX: load_sext_test6 +; AVX: vpmovsxbw (%r{{[^,]*}}), %xmm{{.*}} +; AVX: ret + +; SSSE3: load_sext_test6 +; SSSE3: movq +; SSSE3: punpcklbw +; SSSE3: psraw $8 +; SSSE3: ret + +; SSE2: load_sext_test6 +; SSE2: movq +; SSE2: punpcklbw +; SSE2: psraw $8 +; SSE2: ret +define <8 x i16> @load_sext_test6(<8 x i8> *%ptr) { + %X = load <8 x i8>* %ptr + %Y = sext <8 x i8> %X to <8 x i16> + ret <8 x i16>%Y +} diff --git a/test/CodeGen/X86/avx-zext.ll b/test/CodeGen/X86/avx-zext.ll index b630e9d146..582537ea90 100755 --- a/test/CodeGen/X86/avx-zext.ll +++ b/test/CodeGen/X86/avx-zext.ll @@ -18,11 +18,10 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp ret <4 x i64>%B } - define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) { ;CHECK: zext_8i8_to_8i32 ;CHECK: vpunpckhwd -;CHECK: vpunpcklwd +;CHECK: vpmovzxwd ;CHECK: vinsertf128 ;CHECK: ret %t = zext <8 x i8> %z to <8 x i32> diff --git a/test/CodeGen/X86/avx2-conversions.ll b/test/CodeGen/X86/avx2-conversions.ll index b47491335a..3ce08dcc73 100755 --- a/test/CodeGen/X86/avx2-conversions.ll +++ b/test/CodeGen/X86/avx2-conversions.ll @@ -63,6 +63,47 @@ define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind { ret <8 x i32>%B } +; CHECK: load_sext_test1 +; CHECK: vpmovsxdq (%r{{[^,]*}}), %ymm{{.*}} +; CHECK: ret +define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) { + %X = load <4 x i32>* %ptr + %Y = sext <4 x i32> %X to <4 x i64> + ret <4 x i64>%Y +} + +; CHECK: load_sext_test2 +; CHECK: vpmovsxbq (%r{{[^,]*}}), %ymm{{.*}} +; CHECK: ret +define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) { + %X = load <4 x i8>* %ptr + %Y = sext <4 x i8> %X to <4 x i64> + ret <4 x i64>%Y +} +; CHECK: load_sext_test3 +; CHECK: vpmovsxwq (%r{{[^,]*}}), %ymm{{.*}} +; CHECK: ret +define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) { + %X = load <4 x i16>* %ptr + %Y = sext <4 x i16> %X to <4 x i64> + ret <4 x i64>%Y +} +; CHECK: load_sext_test4 +; CHECK: vpmovsxwd (%r{{[^,]*}}), %ymm{{.*}} +; CHECK: ret +define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) { + %X = load <8 x i16>* %ptr + %Y = sext <8 x i16> %X to <8 x i32> + ret <8 x i32>%Y +} +; CHECK: load_sext_test5 +; CHECK: vpmovsxbd (%r{{[^,]*}}), %ymm{{.*}} +; CHECK: ret +define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) { + %X = load <8 x i8>* %ptr + %Y = sext <8 x i8> %X to <8 x i32> + ret <8 x i32>%Y +} diff --git a/test/CodeGen/X86/avx2-logic.ll b/test/CodeGen/X86/avx2-logic.ll index 13ebaa6f87..a5bb1a8f8e 100644 --- a/test/CodeGen/X86/avx2-logic.ll +++ b/test/CodeGen/X86/avx2-logic.ll @@ -48,9 +48,8 @@ entry: ; CHECK: vpblendvb ; CHECK: vpblendvb %ymm ; CHECK: ret -define <32 x i8> @vpblendvb(<32 x i8> %x, <32 x i8> %y) { - %min_is_x = icmp ult <32 x i8> %x, %y - %min = select <32 x i1> %min_is_x, <32 x i8> %x, <32 x i8> %y +define <32 x i8> @vpblendvb(<32 x i1> %cond, <32 x i8> %x, <32 x i8> %y) { + %min = select <32 x i1> %cond, <32 x i8> %x, <32 x i8> %y ret <32 x i8> %min } diff --git a/test/CodeGen/X86/bmi.ll b/test/CodeGen/X86/bmi.ll index 43c47c0fa8..b89e648c52 100644 --- a/test/CodeGen/X86/bmi.ll +++ b/test/CodeGen/X86/bmi.ll @@ -26,6 +26,14 @@ define i32 @t3(i32 %x) nounwind { ; CHECK: tzcntl } +define i32 @tzcnt32_load(i32* %x) nounwind { + %x1 = load i32* %x + %tmp = tail call i32 @llvm.cttz.i32(i32 %x1, i1 false ) + ret i32 %tmp +; CHECK: tzcnt32_load: +; CHECK: tzcntl ({{.*}}) +} + define i64 @t4(i64 %x) nounwind { %tmp = tail call i64 @llvm.cttz.i64( i64 %x, i1 false ) ret i64 %tmp @@ -69,6 +77,15 @@ define i32 @andn32(i32 %x, i32 %y) nounwind readnone { ; CHECK: andnl } +define i32 @andn32_load(i32 %x, i32* %y) nounwind readnone { + %y1 = load i32* %y + %tmp1 = xor i32 %x, -1 + %tmp2 = and i32 %y1, %tmp1 + ret i32 %tmp2 +; CHECK: andn32_load: +; CHECK: andnl ({{.*}}) +} + define i64 @andn64(i64 %x, i64 %y) nounwind readnone { %tmp1 = xor i64 %x, -1 %tmp2 = and i64 %tmp1, %y @@ -84,6 +101,14 @@ define i32 @bextr32(i32 %x, i32 %y) nounwind readnone { ; CHECK: bextrl } +define i32 @bextr32_load(i32* %x, i32 %y) nounwind readnone { + %x1 = load i32* %x + %tmp = tail call i32 @llvm.x86.bmi.bextr.32(i32 %x1, i32 %y) + ret i32 %tmp +; CHECK: bextr32_load: +; CHECK: bextrl {{.*}}, ({{.*}}), {{.*}} +} + declare i32 @llvm.x86.bmi.bextr.32(i32, i32) nounwind readnone define i64 @bextr64(i64 %x, i64 %y) nounwind readnone { @@ -102,6 +127,14 @@ define i32 @bzhi32(i32 %x, i32 %y) nounwind readnone { ; CHECK: bzhil } +define i32 @bzhi32_load(i32* %x, i32 %y) nounwind readnone { + %x1 = load i32* %x + %tmp = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %x1, i32 %y) + ret i32 %tmp +; CHECK: bzhi32_load: +; CHECK: bzhil {{.*}}, ({{.*}}), {{.*}} +} + declare i32 @llvm.x86.bmi.bzhi.32(i32, i32) nounwind readnone define i64 @bzhi64(i64 %x, i64 %y) nounwind readnone { @@ -121,6 +154,15 @@ define i32 @blsi32(i32 %x) nounwind readnone { ; CHECK: blsil } +define i32 @blsi32_load(i32* %x) nounwind readnone { + %x1 = load i32* %x + %tmp = sub i32 0, %x1 + %tmp2 = and i32 %x1, %tmp + ret i32 %tmp2 +; CHECK: blsi32_load: +; CHECK: blsil ({{.*}}) +} + define i64 @blsi64(i64 %x) nounwind readnone { %tmp = sub i64 0, %x %tmp2 = and i64 %tmp, %x @@ -137,6 +179,15 @@ define i32 @blsmsk32(i32 %x) nounwind readnone { ; CHECK: blsmskl } +define i32 @blsmsk32_load(i32* %x) nounwind readnone { + %x1 = load i32* %x + %tmp = sub i32 %x1, 1 + %tmp2 = xor i32 %x1, %tmp + ret i32 %tmp2 +; CHECK: blsmsk32_load: +; CHECK: blsmskl ({{.*}}) +} + define i64 @blsmsk64(i64 %x) nounwind readnone { %tmp = sub i64 %x, 1 %tmp2 = xor i64 %tmp, %x @@ -153,6 +204,15 @@ define i32 @blsr32(i32 %x) nounwind readnone { ; CHECK: blsrl } +define i32 @blsr32_load(i32* %x) nounwind readnone { + %x1 = load i32* %x + %tmp = sub i32 %x1, 1 + %tmp2 = and i32 %x1, %tmp + ret i32 %tmp2 +; CHECK: blsr32_load: +; CHECK: blsrl ({{.*}}) +} + define i64 @blsr64(i64 %x) nounwind readnone { %tmp = sub i64 %x, 1 %tmp2 = and i64 %tmp, %x @@ -168,6 +228,14 @@ define i32 @pdep32(i32 %x, i32 %y) nounwind readnone { ; CHECK: pdepl } +define i32 @pdep32_load(i32 %x, i32* %y) nounwind readnone { + %y1 = load i32* %y + %tmp = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 %y1) + ret i32 %tmp +; CHECK: pdep32_load: +; CHECK: pdepl ({{.*}}) +} + declare i32 @llvm.x86.bmi.pdep.32(i32, i32) nounwind readnone define i64 @pdep64(i64 %x, i64 %y) nounwind readnone { @@ -186,6 +254,14 @@ define i32 @pext32(i32 %x, i32 %y) nounwind readnone { ; CHECK: pextl } +define i32 @pext32_load(i32 %x, i32* %y) nounwind readnone { + %y1 = load i32* %y + %tmp = tail call i32 @llvm.x86.bmi.pext.32(i32 %x, i32 %y1) + ret i32 %tmp +; CHECK: pext32_load: +; CHECK: pextl ({{.*}}) +} + declare i32 @llvm.x86.bmi.pext.32(i32, i32) nounwind readnone define i64 @pext64(i64 %x, i64 %y) nounwind readnone { diff --git a/test/CodeGen/X86/clobber-fi0.ll b/test/CodeGen/X86/clobber-fi0.ll new file mode 100644 index 0000000000..38a42dbf1a --- /dev/null +++ b/test/CodeGen/X86/clobber-fi0.ll @@ -0,0 +1,37 @@ +; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.7.0" + +; In the code below we need to copy the EFLAGS because of scheduling constraints. +; When copying the EFLAGS we need to write to the stack with push/pop. This forces +; us to emit the prolog. + +; CHECK: main +; CHECK: subq{{.*}}rsp +; CHECK: ret +define i32 @main(i32 %arg, i8** %arg1) nounwind { +bb: + %tmp = alloca i32, align 4 ; [#uses=3 type=i32*] + %tmp2 = alloca i32, align 4 ; [#uses=3 type=i32*] + %tmp3 = alloca i32 ; [#uses=1 type=i32*] + store i32 1, i32* %tmp, align 4 + store i32 1, i32* %tmp2, align 4 + br label %bb4 + +bb4: ; preds = %bb4, %bb + %tmp6 = load i32* %tmp2, align 4 ; [#uses=1 type=i32] + %tmp7 = add i32 %tmp6, -1 ; [#uses=2 type=i32] + store i32 %tmp7, i32* %tmp2, align 4 + %tmp8 = icmp eq i32 %tmp7, 0 ; [#uses=1 type=i1] + %tmp9 = load i32* %tmp ; [#uses=1 type=i32] + %tmp10 = add i32 %tmp9, -1 ; [#uses=1 type=i32] + store i32 %tmp10, i32* %tmp3 + br i1 %tmp8, label %bb11, label %bb4 + +bb11: ; preds = %bb4 + %tmp12 = load i32* %tmp, align 4 ; [#uses=1 type=i32] + ret i32 %tmp12 +} + + diff --git a/test/CodeGen/X86/cmp.ll b/test/CodeGen/X86/cmp.ll index eb06327f55..1855fe2fb8 100644 --- a/test/CodeGen/X86/cmp.ll +++ b/test/CodeGen/X86/cmp.ll @@ -151,3 +151,18 @@ entry: %conv = zext i1 %cmp to i32 ret i32 %conv } + +define i32 @test12() uwtable ssp { +; CHECK: test12: +; CHECK: testb + %1 = call zeroext i1 @test12b() + br i1 %1, label %2, label %3 + +; <label>:2 ; preds = %0 + ret i32 1 + +; <label>:3 ; preds = %0 + ret i32 2 +} + +declare zeroext i1 @test12b() diff --git a/test/CodeGen/X86/coalesce-implicitdef.ll b/test/CodeGen/X86/coalesce-implicitdef.ll new file mode 100644 index 0000000000..19cd08cf37 --- /dev/null +++ b/test/CodeGen/X86/coalesce-implicitdef.ll @@ -0,0 +1,130 @@ +; RUN: llc < %s -verify-coalescing +; PR14732 +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10" + +@c = common global i32 0, align 4 +@b = common global i32 0, align 4 +@a = common global i32 0, align 4 +@d = common global i32 0, align 4 + +; This function creates an IMPLICIT_DEF with a long live range, even after +; ProcessImplicitDefs. +; +; The coalescer should be able to deal with all kinds of IMPLICIT_DEF live +; ranges, even if they are not common. + +define void @f() nounwind uwtable ssp { +entry: + %i = alloca i32, align 4 + br label %for.cond + +for.cond: ; preds = %for.inc34, %entry + %i.0.load44 = phi i32 [ %inc35, %for.inc34 ], [ undef, %entry ] + %pi.0 = phi i32* [ %pi.4, %for.inc34 ], [ undef, %entry ] + %tobool = icmp eq i32 %i.0.load44, 0 + br i1 %tobool, label %for.end36, label %for.body + +for.body: ; preds = %for.cond + store i32 0, i32* @c, align 4, !tbaa !0 + br label %for.body2 + +for.body2: ; preds = %for.body, %for.inc + %i.0.load45 = phi i32 [ %i.0.load44, %for.body ], [ 0, %for.inc ] + %tobool3 = icmp eq i32 %i.0.load45, 0 + br i1 %tobool3, label %if.then10, label %if.then + +if.then: ; preds = %for.body2 + store i32 0, i32* %i, align 4, !tbaa !0 + br label %for.body6 + +for.body6: ; preds = %if.then, %for.body6 + store i32 0, i32* %i, align 4 + br i1 true, label %for.body6, label %for.inc + +if.then10: ; preds = %for.body2 + store i32 1, i32* @b, align 4, !tbaa !0 + ret void + +for.inc: ; preds = %for.body6 + br i1 undef, label %for.body2, label %if.end30 + +while.condthread-pre-split: ; preds = %label.loopexit, %while.condthread-pre-split.lr.ph.lr.ph, %for.inc27.backedge + %0 = phi i32 [ %inc28, %for.inc27.backedge ], [ %inc285863, %while.condthread-pre-split.lr.ph.lr.ph ], [ %inc2858, %label.loopexit ] + %inc2060 = phi i32 [ %inc20, %for.inc27.backedge ], [ %a.promoted.pre, %while.condthread-pre-split.lr.ph.lr.ph ], [ %inc20, %label.loopexit ] + br label %while.cond + +while.cond: ; preds = %while.condthread-pre-split, %while.cond + %p2.1.in = phi i32* [ %pi.3.ph, %while.cond ], [ %i, %while.condthread-pre-split ] + %p2.1 = bitcast i32* %p2.1.in to i16* + br i1 %tobool19, label %while.end, label %while.cond + +while.end: ; preds = %while.cond + %inc20 = add nsw i32 %inc2060, 1 + %tobool21 = icmp eq i32 %inc2060, 0 + br i1 %tobool21, label %for.inc27.backedge, label %if.then22 + +for.inc27.backedge: ; preds = %while.end, %if.then22 + %inc28 = add nsw i32 %0, 1 + store i32 %inc28, i32* @b, align 4, !tbaa !0 + %tobool17 = icmp eq i32 %inc28, 0 + br i1 %tobool17, label %for.inc27.if.end30.loopexit56_crit_edge, label %while.condthread-pre-split + +if.then22: ; preds = %while.end + %1 = load i16* %p2.1, align 2, !tbaa !3 + %tobool23 = icmp eq i16 %1, 0 + br i1 %tobool23, label %for.inc27.backedge, label %label.loopexit + +label.loopexit: ; preds = %if.then22 + store i32 %inc20, i32* @a, align 4, !tbaa !0 + %inc2858 = add nsw i32 %0, 1 + store i32 %inc2858, i32* @b, align 4, !tbaa !0 + %tobool1759 = icmp eq i32 %inc2858, 0 + br i1 %tobool1759, label %if.end30, label %while.condthread-pre-split + +for.inc27.if.end30.loopexit56_crit_edge: ; preds = %for.inc27.backedge + store i32 %inc20, i32* @a, align 4, !tbaa !0 + br label %if.end30 + +if.end30: ; preds = %for.inc27.if.end30.loopexit56_crit_edge, %label.loopexit, %label.preheader, %for.inc + %i.0.load46 = phi i32 [ 0, %for.inc ], [ %i.0.load4669, %label.preheader ], [ %i.0.load4669, %label.loopexit ], [ %i.0.load4669, %for.inc27.if.end30.loopexit56_crit_edge ] + %pi.4 = phi i32* [ %i, %for.inc ], [ %pi.3.ph, %label.preheader ], [ %pi.3.ph, %label.loopexit ], [ %pi.3.ph, %for.inc27.if.end30.loopexit56_crit_edge ] + %2 = load i32* %pi.4, align 4, !tbaa !0 + %tobool31 = icmp eq i32 %2, 0 + br i1 %tobool31, label %for.inc34, label %label.preheader + +for.inc34: ; preds = %if.end30 + %inc35 = add nsw i32 %i.0.load46, 1 + store i32 %inc35, i32* %i, align 4 + br label %for.cond + +for.end36: ; preds = %for.cond + store i32 1, i32* %i, align 4 + %3 = load i32* @c, align 4, !tbaa !0 + %tobool37 = icmp eq i32 %3, 0 + br i1 %tobool37, label %label.preheader, label %land.rhs + +land.rhs: ; preds = %for.end36 + store i32 0, i32* @a, align 4, !tbaa !0 + br label %label.preheader + +label.preheader: ; preds = %for.end36, %if.end30, %land.rhs + %i.0.load4669 = phi i32 [ 1, %land.rhs ], [ %i.0.load46, %if.end30 ], [ 1, %for.end36 ] + %pi.3.ph = phi i32* [ %pi.0, %land.rhs ], [ %pi.4, %if.end30 ], [ %pi.0, %for.end36 ] + %4 = load i32* @b, align 4, !tbaa !0 + %inc285863 = add nsw i32 %4, 1 + store i32 %inc285863, i32* @b, align 4, !tbaa !0 + %tobool175964 = icmp eq i32 %inc285863, 0 + br i1 %tobool175964, label %if.end30, label %while.condthread-pre-split.lr.ph.lr.ph + +while.condthread-pre-split.lr.ph.lr.ph: ; preds = %label.preheader + %.pr50 = load i32* @d, align 4, !tbaa !0 + %tobool19 = icmp eq i32 %.pr50, 0 + %a.promoted.pre = load i32* @a, align 4, !tbaa !0 + br label %while.condthread-pre-split +} + +!0 = metadata !{metadata !"int", metadata !1} +!1 = metadata !{metadata !"omnipotent char", metadata !2} +!2 = metadata !{metadata !"Simple C/C++ TBAA"} +!3 = metadata !{metadata !"short", metadata !1} diff --git a/test/CodeGen/X86/cvtv2f32.ll b/test/CodeGen/X86/cvtv2f32.ll index 466b096067..d11bb9ee3e 100644 --- a/test/CodeGen/X86/cvtv2f32.ll +++ b/test/CodeGen/X86/cvtv2f32.ll @@ -1,3 +1,7 @@ +; A bug fix in the DAGCombiner made this test fail, so marking as xfail +; until this can be investigated further. +; XFAIL: * + ; RUN: llc < %s -mtriple=i686-linux-pc -mcpu=corei7 | FileCheck %s define <2 x float> @foo(i32 %x, i32 %y, <2 x float> %v) { diff --git a/test/CodeGen/X86/dbg-value-inlined-parameter.ll b/test/CodeGen/X86/dbg-value-inlined-parameter.ll deleted file mode 100644 index d248a41303..0000000000 --- a/test/CodeGen/X86/dbg-value-inlined-parameter.ll +++ /dev/null @@ -1,87 +0,0 @@ -; RUN: llc -mtriple=x86_64-apple-darwin < %s | FileCheck %s -; RUN: llc -mtriple=x86_64-apple-darwin -regalloc=basic < %s | FileCheck %s - -;CHECK: DW_TAG_inlined_subroutine -;CHECK-NEXT: DW_AT_abstract_origin -;CHECK-NEXT: DW_AT_low_pc -;CHECK-NEXT: DW_AT_high_pc -;CHECK-NEXT: DW_AT_call_file -;CHECK-NEXT: DW_AT_call_line -;CHECK-NEXT: DW_TAG_formal_parameter -;CHECK-NEXT: Lstring11-Lsection_str ## DW_AT_name - -%struct.S1 = type { float*, i32 } - -@p = common global %struct.S1 zeroinitializer, align 8 - -define i32 @foo(%struct.S1* nocapture %sp, i32 %nums) nounwind optsize ssp { -entry: - tail call void @llvm.dbg.value(metadata !{%struct.S1* %sp}, i64 0, metadata !9), !dbg !20 - tail call void @llvm.dbg.value(metadata !{i32 %nums}, i64 0, metadata !18), !dbg !21 - %tmp2 = getelementptr inbounds %struct.S1* %sp, i64 0, i32 1, !dbg !22 - store i32 %nums, i32* %tmp2, align 4, !dbg !22, !tbaa !24 - %call = tail call float* @bar(i32 %nums) nounwind optsize, !dbg !27 - %tmp5 = getelementptr inbounds %struct.S1* %sp, i64 0, i32 0, !dbg !27 - store float* %call, float** %tmp5, align 8, !dbg !27, !tbaa !28 - %cmp = icmp ne float* %call, null, !dbg !29 - %cond = zext i1 %cmp to i32, !dbg !29 - ret i32 %cond, !dbg !29 -} - -declare float* @bar(i32) optsize - -define void @foobar() nounwind optsize ssp { -entry: - tail call void @llvm.dbg.value(metadata !30, i64 0, metadata !9) nounwind, !dbg !31 - tail call void @llvm.dbg.value(metadata !34, i64 0, metadata !18) nounwind, !dbg !35 - store i32 1, i32* getelementptr inbounds (%struct.S1* @p, i64 0, i32 1), align 8, !dbg !36, !tbaa !24 - %call.i = tail call float* @bar(i32 1) nounwind optsize, !dbg !37 - store float* %call.i, float** getelementptr inbounds (%struct.S1* @p, i64 0, i32 0), align 8, !dbg !37, !tbaa !28 - ret void, !dbg !38 -} - -declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone - -!llvm.dbg.sp = !{!0, !6} -!llvm.dbg.lv.foo = !{!9, !18} -!llvm.dbg.gv = !{!19} - -!0 = metadata !{i32 589870, i32 0, metadata !1, metadata !"foo", metadata !"foo", metadata !"", metadata !1, i32 8, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (%struct.S1*, i32)* @foo} ; [ DW_TAG_subprogram ] -!1 = metadata !{i32 589865, metadata !"nm2.c", metadata !"/private/tmp", metadata !2} ; [ DW_TAG_file_type ] -!2 = metadata !{i32 589841, i32 0, i32 12, metadata !"nm2.c", metadata !"/private/tmp", metadata !"clang version 2.9 (trunk 125693)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ] -!3 = metadata !{i32 589845, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] -!4 = metadata !{metadata !5} -!5 = metadata !{i32 589860, metadata !2, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] -!6 = metadata !{i32 589870, i32 0, metadata !1, metadata !"foobar", metadata !"foobar", metadata !"", metadata !1, i32 15, metadata !7, i1 false, i1 true, i32 0, i32 0, i32 0, i32 0, i1 true, void ()* @foobar} ; [ DW_TAG_subprogram ] -!7 = metadata !{i32 589845, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !8, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] -!8 = metadata !{null} -!9 = metadata !{i32 590081, metadata !0, metadata !"sp", metadata !1, i32 7, metadata !10, i32 0} ; [ DW_TAG_arg_variable ] -!10 = metadata !{i32 589839, metadata !2, metadata !"", null, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !11} ; [ DW_TAG_pointer_type ] -!11 = metadata !{i32 589846, metadata !2, metadata !"S1", metadata !1, i32 4, i64 0, i64 0, i64 0, i32 0, metadata !12} ; [ DW_TAG_typedef ] -!12 = metadata !{i32 589843, metadata !2, metadata !"S1", metadata !1, i32 1, i64 128, i64 64, i32 0, i32 0, i32 0, metadata !13, i32 0, i32 0} ; [ DW_TAG_structure_type ] -!13 = metadata !{metadata !14, metadata !17} -!14 = metadata !{i32 589837, metadata !1, metadata !"m", metadata !1, i32 2, i64 64, i64 64, i64 0, i32 0, metadata !15} ; [ DW_TAG_member ] -!15 = metadata !{i32 589839, metadata !2, metadata !"", null, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !16} ; [ DW_TAG_pointer_type ] -!16 = metadata !{i32 589860, metadata !2, metadata !"float", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ] -!17 = metadata !{i32 589837, metadata !1, metadata !"nums", metadata !1, i32 3, i64 32, i64 32, i64 64, i32 0, metadata !5} ; [ DW_TAG_member ] -!18 = metadata !{i32 590081, metadata !0, metadata !"nums", metadata !1, i32 7, metadata !5, i32 0} ; [ DW_TAG_arg_variable ] -!19 = metadata !{i32 589876, i32 0, metadata !2, metadata !"p", metadata !"p", metadata !"", metadata !1, i32 14, metadata !11, i32 0, i32 1, %struct.S1* @p} ; [ DW_TAG_variable ] -!20 = metadata !{i32 7, i32 13, metadata !0, null} -!21 = metadata !{i32 7, i32 21, metadata !0, null} -!22 = metadata !{i32 9, i32 3, metadata !23, null} -!23 = metadata !{i32 589835, metadata !0, i32 8, i32 1, metadata !1, i32 0} ; [ DW_TAG_lexical_block ] -!24 = metadata !{metadata !"int", metadata !25} -!25 = metadata !{metadata !"omnipotent char", metadata !26} -!26 = metadata !{metadata !"Simple C/C++ TBAA", null} -!27 = metadata !{i32 10, i32 3, metadata !23, null} -!28 = metadata !{metadata !"any pointer", metadata !25} -!29 = metadata !{i32 11, i32 3, metadata !23, null} -!30 = metadata !{%struct.S1* @p} -!31 = metadata !{i32 7, i32 13, metadata !0, metadata !32} -!32 = metadata !{i32 16, i32 3, metadata !33, null} -!33 = metadata !{i32 589835, metadata !6, i32 15, i32 15, metadata !1, i32 1} ; [ DW_TAG_lexical_block ] -!34 = metadata !{i32 1} -!35 = metadata !{i32 7, i32 21, metadata !0, metadata !32} -!36 = metadata !{i32 9, i32 3, metadata !23, metadata !32} -!37 = metadata !{i32 10, i32 3, metadata !23, metadata !32} -!38 = metadata !{i32 17, i32 1, metadata !33, null} diff --git a/test/CodeGen/X86/early-ifcvt.ll b/test/CodeGen/X86/early-ifcvt.ll index 2e1852d3e3..2606bd28d5 100644 --- a/test/CodeGen/X86/early-ifcvt.ll +++ b/test/CodeGen/X86/early-ifcvt.ll @@ -142,3 +142,34 @@ save_state_and_return: } declare void @BZ2_bz__AssertH__fail() + +; Make sure we don't speculate on div/idiv instructions +; CHECK: test_idiv +; CHECK-NOT: cmov +define i32 @test_idiv(i32 %a, i32 %b) nounwind uwtable readnone ssp { + %1 = icmp eq i32 %b, 0 + br i1 %1, label %4, label %2 + +; <label>:2 ; preds = %0 + %3 = sdiv i32 %a, %b + br label %4 + +; <label>:4 ; preds = %0, %2 + %5 = phi i32 [ %3, %2 ], [ %a, %0 ] + ret i32 %5 +} + +; CHECK: test_div +; CHECK-NOT: cmov +define i32 @test_div(i32 %a, i32 %b) nounwind uwtable readnone ssp { + %1 = icmp eq i32 %b, 0 + br i1 %1, label %4, label %2 + +; <label>:2 ; preds = %0 + %3 = udiv i32 %a, %b + br label %4 + +; <label>:4 ; preds = %0, %2 + %5 = phi i32 [ %3, %2 ], [ %a, %0 ] + ret i32 %5 +} diff --git a/test/CodeGen/X86/fast-isel-x86-64.ll b/test/CodeGen/X86/fast-isel-x86-64.ll index 86b6606779..acfa64582c 100644 --- a/test/CodeGen/X86/fast-isel-x86-64.ll +++ b/test/CodeGen/X86/fast-isel-x86-64.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -mattr=-avx -fast-isel -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort | FileCheck %s -; RUN: llc < %s -mattr=+avx -fast-isel -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort | FileCheck %s --check-prefix=AVX +; RUN: llc < %s -mattr=-avx -fast-isel -mcpu=core2 -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort | FileCheck %s +; RUN: llc < %s -mattr=+avx -fast-isel -mcpu=core2 -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort | FileCheck %s --check-prefix=AVX ; RUN: llc < %s -fast-isel -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort -mtriple=x86_64-none-nacl | FileCheck %s --check-prefix=NACL64 ; RUN: llc < %s -fast-isel -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort -mtriple=x86_64-none-nacl -relocation-model=pic | FileCheck %s --check-prefix=NACL64_PIC diff --git a/test/CodeGen/X86/float-asmprint.ll b/test/CodeGen/X86/float-asmprint.ll new file mode 100644 index 0000000000..4aeae7fe04 --- /dev/null +++ b/test/CodeGen/X86/float-asmprint.ll @@ -0,0 +1,40 @@ +; RUN: llc -mtriple=x86_64-none-linux < %s | FileCheck %s + +; Check that all current floating-point types are correctly emitted to assembly +; on a little-endian target. + +@var128 = global fp128 0xL00000000000000008000000000000000, align 16 +@varppc128 = global ppc_fp128 0xM80000000000000000000000000000000, align 16 +@var80 = global x86_fp80 0xK80000000000000000000, align 16 +@var64 = global double -0.0, align 8 +@var32 = global float -0.0, align 4 +@var16 = global half -0.0, align 2 + +; CHECK: var128: +; CHECK-NEXT: .quad 0 # fp128 -0 +; CHECK-NEXT: .quad -9223372036854775808 +; CHECK-NEXT: .size + +; CHECK: varppc128: +; CHECK-NEXT: .quad 0 # ppc_fp128 -0 +; CHECK-NEXT: .quad -9223372036854775808 +; CHECK-NEXT: .size + +; CHECK: var80: +; CHECK-NEXT: .quad 0 # x86_fp80 -0 +; CHECK-NEXT: .short 32768 +; CHECK-NEXT: .zero 6 +; CHECK-NEXT: .size + +; CHECK: var64: +; CHECK-NEXT: .quad -9223372036854775808 # double -0 +; CHECK-NEXT: .size + +; CHECK: var32: +; CHECK-NEXT: .long 2147483648 # float -0 +; CHECK-NEXT: .size + +; CHECK: var16: +; CHECK-NEXT: .short 32768 # half -0 +; CHECK-NEXT: .size + diff --git a/test/CodeGen/X86/fold-call.ll b/test/CodeGen/X86/fold-call.ll index 603e9ad66c..35327faa64 100644 --- a/test/CodeGen/X86/fold-call.ll +++ b/test/CodeGen/X86/fold-call.ll @@ -1,10 +1,27 @@ -; RUN: llc < %s -march=x86 | not grep mov -; RUN: llc < %s -march=x86-64 | not grep mov +; RUN: llc < %s -march=x86 | FileCheck %s +; RUN: llc < %s -march=x86-64 | FileCheck %s -declare void @bar() +; CHECK: test1 +; CHECK-NOT: mov -define void @foo(i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, void()* %arg) nounwind { +declare void @bar() +define void @test1(i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, void()* %arg) nounwind { call void @bar() call void %arg() ret void } + +; PR14739 +; CHECK: test2 +; CHECK: mov{{.*}} $0, ([[REGISTER:%[a-z]+]]) +; CHECK-NOT: jmp{{.*}} *([[REGISTER]]) + +%struct.X = type { void ()* } +define void @test2(%struct.X* nocapture %x) { +entry: + %f = getelementptr inbounds %struct.X* %x, i64 0, i32 0 + %0 = load void ()** %f + store void ()* null, void ()** %f + tail call void %0() + ret void +} diff --git a/test/CodeGen/X86/fold-vex.ll b/test/CodeGen/X86/fold-vex.ll new file mode 100644 index 0000000000..2bb5b441c7 --- /dev/null +++ b/test/CodeGen/X86/fold-vex.ll @@ -0,0 +1,16 @@ +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s + +;CHECK: @test +; No need to load from memory. The operand will be loaded as part of th AND instr. +;CHECK-NOT: vmovaps +;CHECK: vandps +;CHECK: ret + +define void @test1(<8 x i32>* %p0, <8 x i32> %in1) nounwind { +entry: + %in0 = load <8 x i32>* %p0, align 2 + %a = and <8 x i32> %in0, %in1 + store <8 x i32> %a, <8 x i32>* undef + ret void +} + diff --git a/test/CodeGen/X86/memcpy-2.ll b/test/CodeGen/X86/memcpy-2.ll index dcc8f0d268..949d6a4293 100644 --- a/test/CodeGen/X86/memcpy-2.ll +++ b/test/CodeGen/X86/memcpy-2.ll @@ -17,11 +17,11 @@ entry: ; SSE2: movb $0, 24(%esp) ; SSE1: t1: -; SSE1: fldl _.str+16 -; SSE1: fstpl 16(%esp) ; SSE1: movaps _.str, %xmm0 ; SSE1: movaps %xmm0 ; SSE1: movb $0, 24(%esp) +; SSE1: movl $0, 20(%esp) +; SSE1: movl $0, 16(%esp) ; NOSSE: t1: ; NOSSE: movb $0 diff --git a/test/CodeGen/X86/memcpy.ll b/test/CodeGen/X86/memcpy.ll index 39c7fbafd4..2e02e45c8d 100644 --- a/test/CodeGen/X86/memcpy.ll +++ b/test/CodeGen/X86/memcpy.ll @@ -87,8 +87,21 @@ entry: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([30 x i8]* @.str, i64 0, i64 0), i64 16, i32 1, i1 false) ret void +; DARWIN: test5: ; DARWIN: movabsq $7016996765293437281 ; DARWIN: movabsq $7016996765293437184 } +; PR14896 +@.str2 = private unnamed_addr constant [2 x i8] c"x\00", align 1 + +define void @test6() nounwind uwtable { +entry: +; DARWIN: test6 +; DARWIN: movw $0, 8 +; DARWIN: movq $120, 0 + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* null, i8* getelementptr inbounds ([2 x i8]* @.str2, i64 0, i64 0), i64 10, i32 1, i1 false) + ret void +} + diff --git a/test/CodeGen/X86/ms-inline-asm.ll b/test/CodeGen/X86/ms-inline-asm.ll index 24d28adda8..68e332eed4 100644 --- a/test/CodeGen/X86/ms-inline-asm.ll +++ b/test/CodeGen/X86/ms-inline-asm.ll @@ -61,3 +61,21 @@ entry: ; CHECK: .att_syntax ; CHECK: {{## InlineAsm End|#NO_APP}} } + +define void @t19_helper() nounwind { +entry: + ret void +} + +define void @t19() nounwind { +entry: + call void asm sideeffect inteldialect "call $0", "r,~{dirflag},~{fpsr},~{flags}"(void ()* @t19_helper) nounwind + ret void +; CHECK: t19: +; CHECK: movl ${{_?}}t19_helper, %eax +; CHECK: {{## InlineAsm Start|#APP}} +; CHECK: .intel_syntax +; CHECK: call eax +; CHECK: .att_syntax +; CHECK: {{## InlineAsm End|#NO_APP}} +} diff --git a/test/CodeGen/X86/pmovsx-inreg.ll b/test/CodeGen/X86/pmovsx-inreg.ll new file mode 100644 index 0000000000..d8c27f2504 --- /dev/null +++ b/test/CodeGen/X86/pmovsx-inreg.ll @@ -0,0 +1,176 @@ +; RUN: llc < %s -march=x86-64 -mcpu=penryn | FileCheck -check-prefix=SSE41 %s +; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck -check-prefix=AVX1 %s +; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck -check-prefix=AVX2 %s + +; PR14887 +; These tests inject a store into the chain to test the inreg versions of pmovsx + +define void @test1(<2 x i8>* %in, <2 x i64>* %out) nounwind { + %wide.load35 = load <2 x i8>* %in, align 1 + %sext = sext <2 x i8> %wide.load35 to <2 x i64> + store <2 x i64> zeroinitializer, <2 x i64>* undef, align 8 + store <2 x i64> %sext, <2 x i64>* %out, align 8 + ret void + +; SSE41: test1: +; SSE41: pmovsxbq + +; AVX1: test1: +; AVX1: vpmovsxbq + +; AVX2: test1: +; AVX2: vpmovsxbq +} + +define void @test2(<4 x i8>* %in, <4 x i64>* %out) nounwind { + %wide.load35 = load <4 x i8>* %in, align 1 + %sext = sext <4 x i8> %wide.load35 to <4 x i64> + store <4 x i64> zeroinitializer, <4 x i64>* undef, align 8 + store <4 x i64> %sext, <4 x i64>* %out, align 8 + ret void + +; AVX2: test2: +; AVX2: vpmovsxbq +} + +define void @test3(<4 x i8>* %in, <4 x i32>* %out) nounwind { + %wide.load35 = load <4 x i8>* %in, align 1 + %sext = sext <4 x i8> %wide.load35 to <4 x i32> + store <4 x i32> zeroinitializer, <4 x i32>* undef, align 8 + store <4 x i32> %sext, <4 x i32>* %out, align 8 + ret void + +; SSE41: test3: +; SSE41: pmovsxbd + +; AVX1: test3: +; AVX1: vpmovsxbd + +; AVX2: test3: +; AVX2: vpmovsxbd +} + +define void @test4(<8 x i8>* %in, <8 x i32>* %out) nounwind { + %wide.load35 = load <8 x i8>* %in, align 1 + %sext = sext <8 x i8> %wide.load35 to <8 x i32> + store <8 x i32> zeroinitializer, <8 x i32>* undef, align 8 + store <8 x i32> %sext, <8 x i32>* %out, align 8 + ret void + +; AVX2: test4: +; AVX2: vpmovsxbd +} + +define void @test5(<8 x i8>* %in, <8 x i16>* %out) nounwind { + %wide.load35 = load <8 x i8>* %in, align 1 + %sext = sext <8 x i8> %wide.load35 to <8 x i16> + store <8 x i16> zeroinitializer, <8 x i16>* undef, align 8 + store <8 x i16> %sext, <8 x i16>* %out, align 8 + ret void + +; SSE41: test5: +; SSE41: pmovsxbw + +; AVX1: test5: +; AVX1: vpmovsxbw + +; AVX2: test5: +; AVX2: vpmovsxbw +} + +define void @test6(<16 x i8>* %in, <16 x i16>* %out) nounwind { + %wide.load35 = load <16 x i8>* %in, align 1 + %sext = sext <16 x i8> %wide.load35 to <16 x i16> + store <16 x i16> zeroinitializer, <16 x i16>* undef, align 8 + store <16 x i16> %sext, <16 x i16>* %out, align 8 + ret void + +; AVX2: test6: +; FIXME: v16i8 -> v16i16 is scalarized. +; AVX2-NOT: pmovsx +} + +define void @test7(<2 x i16>* %in, <2 x i64>* %out) nounwind { + %wide.load35 = load <2 x i16>* %in, align 1 + %sext = sext <2 x i16> %wide.load35 to <2 x i64> + store <2 x i64> zeroinitializer, <2 x i64>* undef, align 8 + store <2 x i64> %sext, <2 x i64>* %out, align 8 + ret void + + +; SSE41: test7: +; SSE41: pmovsxwq + +; AVX1: test7: +; AVX1: vpmovsxwq + +; AVX2: test7: +; AVX2: vpmovsxwq +} + +define void @test8(<4 x i16>* %in, <4 x i64>* %out) nounwind { + %wide.load35 = load <4 x i16>* %in, align 1 + %sext = sext <4 x i16> %wide.load35 to <4 x i64> + store <4 x i64> zeroinitializer, <4 x i64>* undef, align 8 + store <4 x i64> %sext, <4 x i64>* %out, align 8 + ret void + +; AVX2: test8: +; AVX2: vpmovsxwq +} + +define void @test9(<4 x i16>* %in, <4 x i32>* %out) nounwind { + %wide.load35 = load <4 x i16>* %in, align 1 + %sext = sext <4 x i16> %wide.load35 to <4 x i32> + store <4 x i32> zeroinitializer, <4 x i32>* undef, align 8 + store <4 x i32> %sext, <4 x i32>* %out, align 8 + ret void + +; SSE41: test9: +; SSE41: pmovsxwd + +; AVX1: test9: +; AVX1: vpmovsxwd + +; AVX2: test9: +; AVX2: vpmovsxwd +} + +define void @test10(<8 x i16>* %in, <8 x i32>* %out) nounwind { + %wide.load35 = load <8 x i16>* %in, align 1 + %sext = sext <8 x i16> %wide.load35 to <8 x i32> + store <8 x i32> zeroinitializer, <8 x i32>* undef, align 8 + store <8 x i32> %sext, <8 x i32>* %out, align 8 + ret void + +; AVX2: test10: +; AVX2: vpmovsxwd +} + +define void @test11(<2 x i32>* %in, <2 x i64>* %out) nounwind { + %wide.load35 = load <2 x i32>* %in, align 1 + %sext = sext <2 x i32> %wide.load35 to <2 x i64> + store <2 x i64> zeroinitializer, <2 x i64>* undef, align 8 + store <2 x i64> %sext, <2 x i64>* %out, align 8 + ret void + +; SSE41: test11: +; SSE41: pmovsxdq + +; AVX1: test11: +; AVX1: vpmovsxdq + +; AVX2: test11: +; AVX2: vpmovsxdq +} + +define void @test12(<4 x i32>* %in, <4 x i64>* %out) nounwind { + %wide.load35 = load <4 x i32>* %in, align 1 + %sext = sext <4 x i32> %wide.load35 to <4 x i64> + store <4 x i64> zeroinitializer, <4 x i64>* undef, align 8 + store <4 x i64> %sext, <4 x i64>* %out, align 8 + ret void + +; AVX2: test12: +; AVX2: vpmovsxdq +} diff --git a/test/CodeGen/X86/pointer-vector.ll b/test/CodeGen/X86/pointer-vector.ll index 58423d1959..0ee9987526 100644 --- a/test/CodeGen/X86/pointer-vector.ll +++ b/test/CodeGen/X86/pointer-vector.ll @@ -1,5 +1,5 @@ ; RUN: llc < %s -mtriple=i686-linux -mcpu=corei7 | FileCheck %s -; RUN: opt -instsimplify %s -disable-output +; RUN: opt -instsimplify -disable-output < %s ;CHECK: SHUFF0 define <8 x i32*> @SHUFF0(<4 x i32*> %ptrv) nounwind { diff --git a/test/CodeGen/X86/psubus.ll b/test/CodeGen/X86/psubus.ll new file mode 100644 index 0000000000..aff4afbd2e --- /dev/null +++ b/test/CodeGen/X86/psubus.ll @@ -0,0 +1,340 @@ +; RUN: llc -mcpu=core2 < %s | FileCheck %s -check-prefix=SSE2 +; RUN: llc -mcpu=corei7-avx < %s | FileCheck %s -check-prefix=AVX1 +; RUN: llc -mcpu=core-avx2 < %s | FileCheck %s -check-prefix=AVX2 + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +define void @test1(i16* nocapture %head) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i16* %head, i64 %index + %1 = bitcast i16* %0 to <8 x i16>* + %2 = load <8 x i16>* %1, align 2 + %3 = icmp slt <8 x i16> %2, zeroinitializer + %4 = xor <8 x i16> %2, <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768> + %5 = select <8 x i1> %3, <8 x i16> %4, <8 x i16> zeroinitializer + store <8 x i16> %5, <8 x i16>* %1, align 2 + %index.next = add i64 %index, 8 + %6 = icmp eq i64 %index.next, 16384 + br i1 %6, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: @test1 +; SSE2: psubusw LCPI0_0(%rip), %xmm0 + +; AVX1: @test1 +; AVX1: vpsubusw LCPI0_0(%rip), %xmm0, %xmm0 + +; AVX2: @test1 +; AVX2: vpsubusw LCPI0_0(%rip), %xmm0, %xmm0 +} + +define void @test2(i16* nocapture %head) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i16* %head, i64 %index + %1 = bitcast i16* %0 to <8 x i16>* + %2 = load <8 x i16>* %1, align 2 + %3 = icmp ugt <8 x i16> %2, <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766> + %4 = add <8 x i16> %2, <i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767> + %5 = select <8 x i1> %3, <8 x i16> %4, <8 x i16> zeroinitializer + store <8 x i16> %5, <8 x i16>* %1, align 2 + %index.next = add i64 %index, 8 + %6 = icmp eq i64 %index.next, 16384 + br i1 %6, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: @test2 +; SSE2: psubusw LCPI1_0(%rip), %xmm0 + +; AVX1: @test2 +; AVX1: vpsubusw LCPI1_0(%rip), %xmm0, %xmm0 + +; AVX2: @test2 +; AVX2: vpsubusw LCPI1_0(%rip), %xmm0, %xmm0 +} + +define void @test3(i16* nocapture %head, i16 zeroext %w) nounwind { +vector.ph: + %0 = insertelement <8 x i16> undef, i16 %w, i32 0 + %broadcast15 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %1 = getelementptr inbounds i16* %head, i64 %index + %2 = bitcast i16* %1 to <8 x i16>* + %3 = load <8 x i16>* %2, align 2 + %4 = icmp ult <8 x i16> %3, %broadcast15 + %5 = sub <8 x i16> %3, %broadcast15 + %6 = select <8 x i1> %4, <8 x i16> zeroinitializer, <8 x i16> %5 + store <8 x i16> %6, <8 x i16>* %2, align 2 + %index.next = add i64 %index, 8 + %7 = icmp eq i64 %index.next, 16384 + br i1 %7, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: @test3 +; SSE2: psubusw %xmm0, %xmm1 + +; AVX1: @test3 +; AVX1: vpsubusw %xmm0, %xmm1, %xmm1 + +; AVX2: @test3 +; AVX2: vpsubusw %xmm0, %xmm1, %xmm1 +} + +define void @test4(i8* nocapture %head) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i8* %head, i64 %index + %1 = bitcast i8* %0 to <16 x i8>* + %2 = load <16 x i8>* %1, align 1 + %3 = icmp slt <16 x i8> %2, zeroinitializer + %4 = xor <16 x i8> %2, <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128> + %5 = select <16 x i1> %3, <16 x i8> %4, <16 x i8> zeroinitializer + store <16 x i8> %5, <16 x i8>* %1, align 1 + %index.next = add i64 %index, 16 + %6 = icmp eq i64 %index.next, 16384 + br i1 %6, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: @test4 +; SSE2: psubusb LCPI3_0(%rip), %xmm0 + +; AVX1: @test4 +; AVX1: vpsubusb LCPI3_0(%rip), %xmm0, %xmm0 + +; AVX2: @test4 +; AVX2: vpsubusb LCPI3_0(%rip), %xmm0, %xmm0 +} + +define void @test5(i8* nocapture %head) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i8* %head, i64 %index + %1 = bitcast i8* %0 to <16 x i8>* + %2 = load <16 x i8>* %1, align 1 + %3 = icmp ugt <16 x i8> %2, <i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126> + %4 = add <16 x i8> %2, <i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127> + %5 = select <16 x i1> %3, <16 x i8> %4, <16 x i8> zeroinitializer + store <16 x i8> %5, <16 x i8>* %1, align 1 + %index.next = add i64 %index, 16 + %6 = icmp eq i64 %index.next, 16384 + br i1 %6, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: @test5 +; SSE2: psubusb LCPI4_0(%rip), %xmm0 + +; AVX1: @test5 +; AVX1: vpsubusb LCPI4_0(%rip), %xmm0, %xmm0 + +; AVX2: @test5 +; AVX2: vpsubusb LCPI4_0(%rip), %xmm0, %xmm0 +} + +define void @test6(i8* nocapture %head, i8 zeroext %w) nounwind { +vector.ph: + %0 = insertelement <16 x i8> undef, i8 %w, i32 0 + %broadcast15 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %1 = getelementptr inbounds i8* %head, i64 %index + %2 = bitcast i8* %1 to <16 x i8>* + %3 = load <16 x i8>* %2, align 1 + %4 = icmp ult <16 x i8> %3, %broadcast15 + %5 = sub <16 x i8> %3, %broadcast15 + %6 = select <16 x i1> %4, <16 x i8> zeroinitializer, <16 x i8> %5 + store <16 x i8> %6, <16 x i8>* %2, align 1 + %index.next = add i64 %index, 16 + %7 = icmp eq i64 %index.next, 16384 + br i1 %7, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: @test6 +; SSE2: psubusb %xmm0, %xmm1 + +; AVX1: @test6 +; AVX1: vpsubusb %xmm0, %xmm1, %xmm1 + +; AVX2: @test6 +; AVX2: vpsubusb %xmm0, %xmm1, %xmm1 +} + +define void @test7(i16* nocapture %head) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i16* %head, i64 %index + %1 = bitcast i16* %0 to <16 x i16>* + %2 = load <16 x i16>* %1, align 2 + %3 = icmp slt <16 x i16> %2, zeroinitializer + %4 = xor <16 x i16> %2, <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768> + %5 = select <16 x i1> %3, <16 x i16> %4, <16 x i16> zeroinitializer + store <16 x i16> %5, <16 x i16>* %1, align 2 + %index.next = add i64 %index, 8 + %6 = icmp eq i64 %index.next, 16384 + br i1 %6, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: @test7 +; AVX2: vpsubusw LCPI6_0(%rip), %ymm0, %ymm0 +} + +define void @test8(i16* nocapture %head) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i16* %head, i64 %index + %1 = bitcast i16* %0 to <16 x i16>* + %2 = load <16 x i16>* %1, align 2 + %3 = icmp ugt <16 x i16> %2, <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766> + %4 = add <16 x i16> %2, <i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767> + %5 = select <16 x i1> %3, <16 x i16> %4, <16 x i16> zeroinitializer + store <16 x i16> %5, <16 x i16>* %1, align 2 + %index.next = add i64 %index, 8 + %6 = icmp eq i64 %index.next, 16384 + br i1 %6, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: @test8 +; AVX2: vpsubusw LCPI7_0(%rip), %ymm0, %ymm0 +} + +define void @test9(i16* nocapture %head, i16 zeroext %w) nounwind { +vector.ph: + %0 = insertelement <16 x i16> undef, i16 %w, i32 0 + %broadcast15 = shufflevector <16 x i16> %0, <16 x i16> undef, <16 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %1 = getelementptr inbounds i16* %head, i64 %index + %2 = bitcast i16* %1 to <16 x i16>* + %3 = load <16 x i16>* %2, align 2 + %4 = icmp ult <16 x i16> %3, %broadcast15 + %5 = sub <16 x i16> %3, %broadcast15 + %6 = select <16 x i1> %4, <16 x i16> zeroinitializer, <16 x i16> %5 + store <16 x i16> %6, <16 x i16>* %2, align 2 + %index.next = add i64 %index, 8 + %7 = icmp eq i64 %index.next, 16384 + br i1 %7, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + + +; AVX2: @test9 +; AVX2: vpsubusw %ymm0, %ymm1, %ymm1 +} + +define void @test10(i8* nocapture %head) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i8* %head, i64 %index + %1 = bitcast i8* %0 to <32 x i8>* + %2 = load <32 x i8>* %1, align 1 + %3 = icmp slt <32 x i8> %2, zeroinitializer + %4 = xor <32 x i8> %2, <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128> + %5 = select <32 x i1> %3, <32 x i8> %4, <32 x i8> zeroinitializer + store <32 x i8> %5, <32 x i8>* %1, align 1 + %index.next = add i64 %index, 16 + %6 = icmp eq i64 %index.next, 16384 + br i1 %6, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + + +; AVX2: @test10 +; AVX2: vpsubusb LCPI9_0(%rip), %ymm0, %ymm0 +} + +define void @test11(i8* nocapture %head) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i8* %head, i64 %index + %1 = bitcast i8* %0 to <32 x i8>* + %2 = load <32 x i8>* %1, align 1 + %3 = icmp ugt <32 x i8> %2, <i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126> + %4 = add <32 x i8> %2, <i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127> + %5 = select <32 x i1> %3, <32 x i8> %4, <32 x i8> zeroinitializer + store <32 x i8> %5, <32 x i8>* %1, align 1 + %index.next = add i64 %index, 16 + %6 = icmp eq i64 %index.next, 16384 + br i1 %6, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: @test11 +; AVX2: vpsubusb LCPI10_0(%rip), %ymm0, %ymm0 +} + +define void @test12(i8* nocapture %head, i8 zeroext %w) nounwind { +vector.ph: + %0 = insertelement <32 x i8> undef, i8 %w, i32 0 + %broadcast15 = shufflevector <32 x i8> %0, <32 x i8> undef, <32 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %1 = getelementptr inbounds i8* %head, i64 %index + %2 = bitcast i8* %1 to <32 x i8>* + %3 = load <32 x i8>* %2, align 1 + %4 = icmp ult <32 x i8> %3, %broadcast15 + %5 = sub <32 x i8> %3, %broadcast15 + %6 = select <32 x i1> %4, <32 x i8> zeroinitializer, <32 x i8> %5 + store <32 x i8> %6, <32 x i8>* %2, align 1 + %index.next = add i64 %index, 16 + %7 = icmp eq i64 %index.next, 16384 + br i1 %7, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: @test12 +; AVX2: vpsubusb %ymm0, %ymm1, %ymm1 +} diff --git a/test/CodeGen/X86/ret-mmx.ll b/test/CodeGen/X86/ret-mmx.ll index 865e147a4a..778e4722cd 100644 --- a/test/CodeGen/X86/ret-mmx.ll +++ b/test/CodeGen/X86/ret-mmx.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin11 -mattr=+mmx,+sse2 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-apple-darwin11 -mcpu=core2 -mattr=+mmx,+sse2 | FileCheck %s ; rdar://6602459 @g_v1di = external global <1 x i64> diff --git a/test/CodeGen/X86/select.ll b/test/CodeGen/X86/select.ll index 3bec3acdbf..09ca07b31a 100644 --- a/test/CodeGen/X86/select.ll +++ b/test/CodeGen/X86/select.ll @@ -282,7 +282,7 @@ define i32 @test13(i32 %a, i32 %b) nounwind { ; ATOM: test13: ; ATOM: cmpl ; ATOM-NEXT: sbbl -; ATOM-NEXT: ret +; ATOM: ret } define i32 @test14(i32 %a, i32 %b) nounwind { @@ -299,7 +299,7 @@ define i32 @test14(i32 %a, i32 %b) nounwind { ; ATOM: cmpl ; ATOM-NEXT: sbbl ; ATOM-NEXT: notl -; ATOM-NEXT: ret +; ATOM: ret } ; rdar://10961709 diff --git a/test/CodeGen/X86/sse-align-2.ll b/test/CodeGen/X86/sse-align-2.ll index 102c3fb06c..22cd772306 100644 --- a/test/CodeGen/X86/sse-align-2.ll +++ b/test/CodeGen/X86/sse-align-2.ll @@ -1,12 +1,21 @@ -; RUN: llc < %s -march=x86-64 | grep movup | count 2 +; RUN: llc < %s -march=x86-64 -mcpu=penryn | FileCheck %s define <4 x float> @foo(<4 x float>* %p, <4 x float> %x) nounwind { %t = load <4 x float>* %p, align 4 %z = fmul <4 x float> %t, %x ret <4 x float> %z } + +; CHECK: foo: +; CHECK: movups +; CHECK: ret + define <2 x double> @bar(<2 x double>* %p, <2 x double> %x) nounwind { %t = load <2 x double>* %p, align 8 %z = fmul <2 x double> %t, %x ret <2 x double> %z } + +; CHECK: bar: +; CHECK: movupd +; CHECK: ret diff --git a/test/CodeGen/X86/sse-domains.ll b/test/CodeGen/X86/sse-domains.ll index c99287bdfb..168959a5d6 100644 --- a/test/CodeGen/X86/sse-domains.ll +++ b/test/CodeGen/X86/sse-domains.ll @@ -55,10 +55,10 @@ while.end: ; instructions, they are still dependent on themselves. ; CHECK: xorps [[XMM1:%xmm[0-9]+]] ; CHECK: , [[XMM1]] -; CHECK: cvtsi2ss %{{.*}}, [[XMM1]] +; CHECK: cvtsi2ssl %{{.*}}, [[XMM1]] ; CHECK: xorps [[XMM2:%xmm[0-9]+]] ; CHECK: , [[XMM2]] -; CHECK: cvtsi2ss %{{.*}}, [[XMM2]] +; CHECK: cvtsi2ssl %{{.*}}, [[XMM2]] ; define float @f2(i32 %m) nounwind uwtable readnone ssp { entry: diff --git a/test/CodeGen/X86/sse2-mul.ll b/test/CodeGen/X86/sse2-mul.ll new file mode 100644 index 0000000000..0466d60ec3 --- /dev/null +++ b/test/CodeGen/X86/sse2-mul.ll @@ -0,0 +1,14 @@ +; RUN: llc < %s -march=x86-64 -mcpu=core2 | FileCheck %s + +define <4 x i32> @test1(<4 x i32> %x, <4 x i32> %y) { + %m = mul <4 x i32> %x, %y + ret <4 x i32> %m +; CHECK: test1: +; CHECK: pshufd $49 +; CHECK: pmuludq +; CHECK: pshufd $49 +; CHECK: pmuludq +; CHECK: shufps $-120 +; CHECK: pshufd $-40 +; CHECK: ret +} diff --git a/test/CodeGen/X86/store_op_load_fold.ll b/test/CodeGen/X86/store_op_load_fold.ll index 6e47eb397d..070cccdb87 100644 --- a/test/CodeGen/X86/store_op_load_fold.ll +++ b/test/CodeGen/X86/store_op_load_fold.ll @@ -1,13 +1,30 @@ -; RUN: llc < %s -march=x86 | not grep mov +; RUN: llc < %s -mtriple=i686-darwin | FileCheck %s ; ; Test the add and load are folded into the store instruction. @X = internal global i16 0 ; <i16*> [#uses=2] define void @foo() nounwind { +; CHECK: foo: +; CHECK-NOT: mov +; CHECK: add +; CHECK-NEXT: ret %tmp.0 = load i16* @X ; <i16> [#uses=1] %tmp.3 = add i16 %tmp.0, 329 ; <i16> [#uses=1] store i16 %tmp.3, i16* @X ret void } +; rdar://12838504 +%struct.S2 = type { i64, i16, [2 x i8], i8, [3 x i8], [7 x i8], i8, [8 x i8] } +@s2 = external global %struct.S2, align 16 +define void @test2() nounwind uwtable ssp { +; CHECK: test2: +; CHECK: mov +; CHECK-NEXT: and +; CHECK-NEXT: ret + %bf.load35 = load i56* bitcast ([7 x i8]* getelementptr inbounds (%struct.S2* @s2, i32 0, i32 5) to i56*), align 16 + %bf.clear36 = and i56 %bf.load35, -1125895611875329 + store i56 %bf.clear36, i56* bitcast ([7 x i8]* getelementptr inbounds (%struct.S2* @s2, i32 0, i32 5) to i56*), align 16 + ret void +} diff --git a/test/CodeGen/X86/v8i1-masks.ll b/test/CodeGen/X86/v8i1-masks.ll new file mode 100644 index 0000000000..abb4b39bd6 --- /dev/null +++ b/test/CodeGen/X86/v8i1-masks.ll @@ -0,0 +1,39 @@ +; RUN: llc -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -o - < %s | FileCheck %s + +;CHECK: and_masks +;CHECK: vmovups +;CHECK: vcmpltp +;CHECK: vcmpltp +;CHECK: vandps +;CHECK: vandps +;CHECK: vmovups +;CHECK: ret + +define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp { + %v0 = load <8 x float>* %a, align 16 + %v1 = load <8 x float>* %b, align 16 + %m0 = fcmp olt <8 x float> %v1, %v0 + %v2 = load <8 x float>* %c, align 16 + %m1 = fcmp olt <8 x float> %v2, %v0 + %mand = and <8 x i1> %m1, %m0 + %r = zext <8 x i1> %mand to <8 x i32> + store <8 x i32> %r, <8 x i32>* undef, align 16 + ret void +} + +;CHECK: neg_mask +;CHECK: vcmpltps +;CHECK: vxorps +;CHECK: vandps +;CHECK: vmovups +;CHECK: ret +define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp { + %v0 = load <8 x float>* %a, align 16 + %v1 = load <8 x float>* %b, align 16 + %m0 = fcmp olt <8 x float> %v1, %v0 + %mand = xor <8 x i1> %m0, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1> + %r = zext <8 x i1> %mand to <8 x i32> + store <8 x i32> %r, <8 x i32>* undef, align 16 + ret void +} + diff --git a/test/CodeGen/X86/vec_compare.ll b/test/CodeGen/X86/vec_compare.ll index 367dd27f30..b6d91a3f77 100644 --- a/test/CodeGen/X86/vec_compare.ll +++ b/test/CodeGen/X86/vec_compare.ll @@ -41,3 +41,27 @@ define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) nounwind { %D = sext <4 x i1> %C to <4 x i32> ret <4 x i32> %D } + +define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) nounwind { +; CHECK: test5: +; CHECK: pcmpeqd +; CHECK: pshufd $-79 +; CHECK: pand +; CHECK: ret + %C = icmp eq <2 x i64> %A, %B + %D = sext <2 x i1> %C to <2 x i64> + ret <2 x i64> %D +} + +define <2 x i64> @test6(<2 x i64> %A, <2 x i64> %B) nounwind { +; CHECK: test6: +; CHECK: pcmpeqd +; CHECK: pshufd $-79 +; CHECK: pand +; CHECK: pcmpeqd +; CHECK: pxor +; CHECK: ret + %C = icmp ne <2 x i64> %A, %B + %D = sext <2 x i1> %C to <2 x i64> + ret <2 x i64> %D +} diff --git a/test/CodeGen/X86/vec_sdiv_to_shift.ll b/test/CodeGen/X86/vec_sdiv_to_shift.ll new file mode 100644 index 0000000000..35e052d97b --- /dev/null +++ b/test/CodeGen/X86/vec_sdiv_to_shift.ll @@ -0,0 +1,72 @@ +; RUN: llc < %s -march=x86-64 -mcpu=penryn -mattr=+avx2 | FileCheck %s + + +define <8 x i16> @sdiv_vec8x16(<8 x i16> %var) { +entry: +; CHECK: sdiv_vec8x16 +; CHECK: psraw $15 +; CHECK: vpsrlw $11 +; CHECK: vpaddw +; CHECK: vpsraw $5 +; CHECK: ret + %0 = sdiv <8 x i16> %var, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> + ret <8 x i16> %0 +} + +define <4 x i32> @sdiv_zero(<4 x i32> %var) { +entry: +; CHECK: sdiv_zero +; CHECK-NOT sra +; CHECK: ret + %0 = sdiv <4 x i32> %var, <i32 0, i32 0, i32 0, i32 0> + ret <4 x i32> %0 +} + +define <4 x i32> @sdiv_vec4x32(<4 x i32> %var) { +entry: +; CHECK: sdiv_vec4x32 +; CHECK: vpsrad $31 +; CHECK: vpsrld $28 +; CHECK: vpaddd +; CHECK: vpsrad $4 +; CHECK: ret +%0 = sdiv <4 x i32> %var, <i32 16, i32 16, i32 16, i32 16> +ret <4 x i32> %0 +} + +define <4 x i32> @sdiv_negative(<4 x i32> %var) { +entry: +; CHECK: sdiv_negative +; CHECK: vpsrad $31 +; CHECK: vpsrld $28 +; CHECK: vpaddd +; CHECK: vpsrad $4 +; CHECK: vpsubd +; CHECK: ret +%0 = sdiv <4 x i32> %var, <i32 -16, i32 -16, i32 -16, i32 -16> +ret <4 x i32> %0 +} + +define <8 x i32> @sdiv8x32(<8 x i32> %var) { +entry: +; CHECK: sdiv8x32 +; CHECK: vpsrad $31 +; CHECK: vpsrld $26 +; CHECK: vpaddd +; CHECK: vpsrad $6 +; CHECK: ret +%0 = sdiv <8 x i32> %var, <i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64> +ret <8 x i32> %0 +} + +define <16 x i16> @sdiv16x16(<16 x i16> %var) { +entry: +; CHECK: sdiv16x16 +; CHECK: vpsraw $15 +; CHECK: vpsrlw $14 +; CHECK: vpaddw +; CHECK: vpsraw $2 +; CHECK: ret + %a0 = sdiv <16 x i16> %var, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> + ret <16 x i16> %a0 +} diff --git a/test/CodeGen/X86/vector-gep.ll b/test/CodeGen/X86/vector-gep.ll index 3476e36c64..d08e2a0746 100644 --- a/test/CodeGen/X86/vector-gep.ll +++ b/test/CodeGen/X86/vector-gep.ll @@ -1,5 +1,5 @@ ; RUN: llc < %s -march=x86 -mcpu=corei7-avx | FileCheck %s -; RUN: opt -instsimplify %s -disable-output +; RUN: opt -instsimplify -disable-output < %s ;CHECK: AGEP0: define <4 x i32*> @AGEP0(i32* %ptr) nounwind { diff --git a/test/CodeGen/X86/vselect-minmax.ll b/test/CodeGen/X86/vselect-minmax.ll new file mode 100644 index 0000000000..cf654b6f20 --- /dev/null +++ b/test/CodeGen/X86/vselect-minmax.ll @@ -0,0 +1,2788 @@ +; RUN: llc -march=x86-64 -mcpu=core2 < %s | FileCheck %s -check-prefix=SSE2 +; RUN: llc -march=x86-64 -mcpu=corei7 < %s | FileCheck %s -check-prefix=SSE4 +; RUN: llc -march=x86-64 -mcpu=corei7-avx < %s | FileCheck %s -check-prefix=AVX1 +; RUN: llc -march=x86-64 -mcpu=core-avx2 -mattr=+avx2 < %s | FileCheck %s -check-prefix=AVX2 + +define void @test1(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp slt <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test1: +; SSE4: pminsb + +; AVX1: test1: +; AVX1: vpminsb + +; AVX2: test1: +; AVX2: vpminsb +} + +define void @test2(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp sle <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test2: +; SSE4: pminsb + +; AVX1: test2: +; AVX1: vpminsb + +; AVX2: test2: +; AVX2: vpminsb +} + +define void @test3(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp sgt <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test3: +; SSE4: pmaxsb + +; AVX1: test3: +; AVX1: vpmaxsb + +; AVX2: test3: +; AVX2: vpmaxsb +} + +define void @test4(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp sge <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test4: +; SSE4: pmaxsb + +; AVX1: test4: +; AVX1: vpmaxsb + +; AVX2: test4: +; AVX2: vpmaxsb +} + +define void @test5(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp ult <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test5: +; SSE2: pminub + +; AVX1: test5: +; AVX1: vpminub + +; AVX2: test5: +; AVX2: vpminub +} + +define void @test6(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp ule <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test6: +; SSE2: pminub + +; AVX1: test6: +; AVX1: vpminub + +; AVX2: test6: +; AVX2: vpminub +} + +define void @test7(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp ugt <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test7: +; SSE2: pmaxub + +; AVX1: test7: +; AVX1: vpmaxub + +; AVX2: test7: +; AVX2: vpmaxub +} + +define void @test8(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp uge <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test8: +; SSE2: pmaxub + +; AVX1: test8: +; AVX1: vpmaxub + +; AVX2: test8: +; AVX2: vpmaxub +} + +define void @test9(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp slt <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test9: +; SSE2: pminsw + +; AVX1: test9: +; AVX1: vpminsw + +; AVX2: test9: +; AVX2: vpminsw +} + +define void @test10(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp sle <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test10: +; SSE2: pminsw + +; AVX1: test10: +; AVX1: vpminsw + +; AVX2: test10: +; AVX2: vpminsw +} + +define void @test11(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp sgt <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test11: +; SSE2: pmaxsw + +; AVX1: test11: +; AVX1: vpmaxsw + +; AVX2: test11: +; AVX2: vpmaxsw +} + +define void @test12(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp sge <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test12: +; SSE2: pmaxsw + +; AVX1: test12: +; AVX1: vpmaxsw + +; AVX2: test12: +; AVX2: vpmaxsw +} + +define void @test13(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp ult <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test13: +; SSE4: pminuw + +; AVX1: test13: +; AVX1: vpminuw + +; AVX2: test13: +; AVX2: vpminuw +} + +define void @test14(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp ule <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test14: +; SSE4: pminuw + +; AVX1: test14: +; AVX1: vpminuw + +; AVX2: test14: +; AVX2: vpminuw +} + +define void @test15(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp ugt <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test15: +; SSE4: pmaxuw + +; AVX1: test15: +; AVX1: vpmaxuw + +; AVX2: test15: +; AVX2: vpmaxuw +} + +define void @test16(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp uge <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test16: +; SSE4: pmaxuw + +; AVX1: test16: +; AVX1: vpmaxuw + +; AVX2: test16: +; AVX2: vpmaxuw +} + +define void @test17(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp slt <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test17: +; SSE4: pminsd + +; AVX1: test17: +; AVX1: vpminsd + +; AVX2: test17: +; AVX2: vpminsd +} + +define void @test18(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp sle <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test18: +; SSE4: pminsd + +; AVX1: test18: +; AVX1: vpminsd + +; AVX2: test18: +; AVX2: vpminsd +} + +define void @test19(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp sgt <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test19: +; SSE4: pmaxsd + +; AVX1: test19: +; AVX1: vpmaxsd + +; AVX2: test19: +; AVX2: vpmaxsd +} + +define void @test20(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp sge <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test20: +; SSE4: pmaxsd + +; AVX1: test20: +; AVX1: vpmaxsd + +; AVX2: test20: +; AVX2: vpmaxsd +} + +define void @test21(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp ult <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test21: +; SSE4: pminud + +; AVX1: test21: +; AVX1: vpminud + +; AVX2: test21: +; AVX2: vpminud +} + +define void @test22(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp ule <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test22: +; SSE4: pminud + +; AVX1: test22: +; AVX1: vpminud + +; AVX2: test22: +; AVX2: vpminud +} + +define void @test23(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp ugt <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test23: +; SSE4: pmaxud + +; AVX1: test23: +; AVX1: vpmaxud + +; AVX2: test23: +; AVX2: vpmaxud +} + +define void @test24(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp uge <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test24: +; SSE4: pmaxud + +; AVX1: test24: +; AVX1: vpmaxud + +; AVX2: test24: +; AVX2: vpmaxud +} + +define void @test25(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp slt <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test25: +; AVX2: vpminsb +} + +define void @test26(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp sle <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test26: +; AVX2: vpminsb +} + +define void @test27(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp sgt <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test27: +; AVX2: vpmaxsb +} + +define void @test28(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp sge <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test28: +; AVX2: vpmaxsb +} + +define void @test29(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp ult <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test29: +; AVX2: vpminub +} + +define void @test30(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp ule <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test30: +; AVX2: vpminub +} + +define void @test31(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp ugt <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test31: +; AVX2: vpmaxub +} + +define void @test32(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp uge <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test32: +; AVX2: vpmaxub +} + +define void @test33(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp slt <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test33: +; AVX2: vpminsw +} + +define void @test34(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp sle <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test34: +; AVX2: vpminsw +} + +define void @test35(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp sgt <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test35: +; AVX2: vpmaxsw +} + +define void @test36(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp sge <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test36: +; AVX2: vpmaxsw +} + +define void @test37(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp ult <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test37: +; AVX2: vpminuw +} + +define void @test38(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp ule <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test38: +; AVX2: vpminuw +} + +define void @test39(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp ugt <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test39: +; AVX2: vpmaxuw +} + +define void @test40(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp uge <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test40: +; AVX2: vpmaxuw +} + +define void @test41(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp slt <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test41: +; AVX2: vpminsd +} + +define void @test42(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp sle <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test42: +; AVX2: vpminsd +} + +define void @test43(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp sgt <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test43: +; AVX2: vpmaxsd +} + +define void @test44(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp sge <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test44: +; AVX2: vpmaxsd +} + +define void @test45(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp ult <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test45: +; AVX2: vpminud +} + +define void @test46(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp ule <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test46: +; AVX2: vpminud +} + +define void @test47(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp ugt <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test47: +; AVX2: vpmaxud +} + +define void @test48(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp uge <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test48: +; AVX2: vpmaxud +} + +define void @test49(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp slt <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test49: +; SSE4: pmaxsb + +; AVX1: test49: +; AVX1: vpmaxsb + +; AVX2: test49: +; AVX2: vpmaxsb +} + +define void @test50(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp sle <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test50: +; SSE4: pmaxsb + +; AVX1: test50: +; AVX1: vpmaxsb + +; AVX2: test50: +; AVX2: vpmaxsb +} + +define void @test51(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp sgt <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test51: +; SSE4: pminsb + +; AVX1: test51: +; AVX1: vpminsb + +; AVX2: test51: +; AVX2: vpminsb +} + +define void @test52(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp sge <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test52: +; SSE4: pminsb + +; AVX1: test52: +; AVX1: vpminsb + +; AVX2: test52: +; AVX2: vpminsb +} + +define void @test53(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp ult <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test53: +; SSE2: pmaxub + +; AVX1: test53: +; AVX1: vpmaxub + +; AVX2: test53: +; AVX2: vpmaxub +} + +define void @test54(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp ule <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test54: +; SSE2: pmaxub + +; AVX1: test54: +; AVX1: vpmaxub + +; AVX2: test54: +; AVX2: vpmaxub +} + +define void @test55(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp ugt <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test55: +; SSE2: pminub + +; AVX1: test55: +; AVX1: vpminub + +; AVX2: test55: +; AVX2: vpminub +} + +define void @test56(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <16 x i8>* + %ptr.b = bitcast i8* %gep.b to <16 x i8>* + %load.a = load <16 x i8>* %ptr.a, align 2 + %load.b = load <16 x i8>* %ptr.b, align 2 + %cmp = icmp uge <16 x i8> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a + store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test56: +; SSE2: pminub + +; AVX1: test56: +; AVX1: vpminub + +; AVX2: test56: +; AVX2: vpminub +} + +define void @test57(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp slt <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test57: +; SSE2: pmaxsw + +; AVX1: test57: +; AVX1: vpmaxsw + +; AVX2: test57: +; AVX2: vpmaxsw +} + +define void @test58(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp sle <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test58: +; SSE2: pmaxsw + +; AVX1: test58: +; AVX1: vpmaxsw + +; AVX2: test58: +; AVX2: vpmaxsw +} + +define void @test59(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp sgt <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test59: +; SSE2: pminsw + +; AVX1: test59: +; AVX1: vpminsw + +; AVX2: test59: +; AVX2: vpminsw +} + +define void @test60(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp sge <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE2: test60: +; SSE2: pminsw + +; AVX1: test60: +; AVX1: vpminsw + +; AVX2: test60: +; AVX2: vpminsw +} + +define void @test61(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp ult <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test61: +; SSE4: pmaxuw + +; AVX1: test61: +; AVX1: vpmaxuw + +; AVX2: test61: +; AVX2: vpmaxuw +} + +define void @test62(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp ule <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test62: +; SSE4: pmaxuw + +; AVX1: test62: +; AVX1: vpmaxuw + +; AVX2: test62: +; AVX2: vpmaxuw +} + +define void @test63(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp ugt <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test63: +; SSE4: pminuw + +; AVX1: test63: +; AVX1: vpminuw + +; AVX2: test63: +; AVX2: vpminuw +} + +define void @test64(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <8 x i16>* + %ptr.b = bitcast i16* %gep.b to <8 x i16>* + %load.a = load <8 x i16>* %ptr.a, align 2 + %load.b = load <8 x i16>* %ptr.b, align 2 + %cmp = icmp uge <8 x i16> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a + store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test64: +; SSE4: pminuw + +; AVX1: test64: +; AVX1: vpminuw + +; AVX2: test64: +; AVX2: vpminuw +} + +define void @test65(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp slt <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test65: +; SSE4: pmaxsd + +; AVX1: test65: +; AVX1: vpmaxsd + +; AVX2: test65: +; AVX2: vpmaxsd +} + +define void @test66(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp sle <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test66: +; SSE4: pmaxsd + +; AVX1: test66: +; AVX1: vpmaxsd + +; AVX2: test66: +; AVX2: vpmaxsd +} + +define void @test67(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp sgt <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test67: +; SSE4: pminsd + +; AVX1: test67: +; AVX1: vpminsd + +; AVX2: test67: +; AVX2: vpminsd +} + +define void @test68(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp sge <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test68: +; SSE4: pminsd + +; AVX1: test68: +; AVX1: vpminsd + +; AVX2: test68: +; AVX2: vpminsd +} + +define void @test69(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp ult <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test69: +; SSE4: pmaxud + +; AVX1: test69: +; AVX1: vpmaxud + +; AVX2: test69: +; AVX2: vpmaxud +} + +define void @test70(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp ule <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test70: +; SSE4: pmaxud + +; AVX1: test70: +; AVX1: vpmaxud + +; AVX2: test70: +; AVX2: vpmaxud +} + +define void @test71(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp ugt <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test71: +; SSE4: pminud + +; AVX1: test71: +; AVX1: vpminud + +; AVX2: test71: +; AVX2: vpminud +} + +define void @test72(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <4 x i32>* + %ptr.b = bitcast i32* %gep.b to <4 x i32>* + %load.a = load <4 x i32>* %ptr.a, align 2 + %load.b = load <4 x i32>* %ptr.b, align 2 + %cmp = icmp uge <4 x i32> %load.a, %load.b + %sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a + store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 4 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; SSE4: test72: +; SSE4: pminud + +; AVX1: test72: +; AVX1: vpminud + +; AVX2: test72: +; AVX2: vpminud +} + +define void @test73(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp slt <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test73: +; AVX2: vpmaxsb +} + +define void @test74(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp sle <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test74: +; AVX2: vpmaxsb +} + +define void @test75(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp sgt <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test75: +; AVX2: vpminsb +} + +define void @test76(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp sge <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test76: +; AVX2: vpminsb +} + +define void @test77(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp ult <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test77: +; AVX2: vpmaxub +} + +define void @test78(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp ule <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test78: +; AVX2: vpmaxub +} + +define void @test79(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp ugt <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test79: +; AVX2: vpminub +} + +define void @test80(i8* nocapture %a, i8* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i8* %a, i64 %index + %gep.b = getelementptr inbounds i8* %b, i64 %index + %ptr.a = bitcast i8* %gep.a to <32 x i8>* + %ptr.b = bitcast i8* %gep.b to <32 x i8>* + %load.a = load <32 x i8>* %ptr.a, align 2 + %load.b = load <32 x i8>* %ptr.b, align 2 + %cmp = icmp uge <32 x i8> %load.a, %load.b + %sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a + store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2 + %index.next = add i64 %index, 32 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test80: +; AVX2: vpminub +} + +define void @test81(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp slt <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test81: +; AVX2: vpmaxsw +} + +define void @test82(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp sle <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test82: +; AVX2: vpmaxsw +} + +define void @test83(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp sgt <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test83: +; AVX2: vpminsw +} + +define void @test84(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp sge <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test84: +; AVX2: vpminsw +} + +define void @test85(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp ult <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test85: +; AVX2: vpmaxuw +} + +define void @test86(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp ule <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test86: +; AVX2: vpmaxuw +} + +define void @test87(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp ugt <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test87: +; AVX2: vpminuw +} + +define void @test88(i16* nocapture %a, i16* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i16* %a, i64 %index + %gep.b = getelementptr inbounds i16* %b, i64 %index + %ptr.a = bitcast i16* %gep.a to <16 x i16>* + %ptr.b = bitcast i16* %gep.b to <16 x i16>* + %load.a = load <16 x i16>* %ptr.a, align 2 + %load.b = load <16 x i16>* %ptr.b, align 2 + %cmp = icmp uge <16 x i16> %load.a, %load.b + %sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a + store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2 + %index.next = add i64 %index, 16 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test88: +; AVX2: vpminuw +} + +define void @test89(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp slt <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test89: +; AVX2: vpmaxsd +} + +define void @test90(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp sle <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test90: +; AVX2: vpmaxsd +} + +define void @test91(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp sgt <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test91: +; AVX2: vpminsd +} + +define void @test92(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp sge <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test92: +; AVX2: vpminsd +} + +define void @test93(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp ult <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test93: +; AVX2: vpmaxud +} + +define void @test94(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp ule <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test94: +; AVX2: vpmaxud +} + +define void @test95(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp ugt <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test95: +; AVX2: vpminud +} + +define void @test96(i32* nocapture %a, i32* nocapture %b) nounwind { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %gep.a = getelementptr inbounds i32* %a, i64 %index + %gep.b = getelementptr inbounds i32* %b, i64 %index + %ptr.a = bitcast i32* %gep.a to <8 x i32>* + %ptr.b = bitcast i32* %gep.b to <8 x i32>* + %load.a = load <8 x i32>* %ptr.a, align 2 + %load.b = load <8 x i32>* %ptr.b, align 2 + %cmp = icmp uge <8 x i32> %load.a, %load.b + %sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a + store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2 + %index.next = add i64 %index, 8 + %loop = icmp eq i64 %index.next, 16384 + br i1 %loop, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + ret void + +; AVX2: test96: +; AVX2: vpminud +} diff --git a/test/CodeGen/X86/vsplit-and.ll b/test/CodeGen/X86/vsplit-and.ll index ee98806c0f..3b7fdff84e 100644 --- a/test/CodeGen/X86/vsplit-and.ll +++ b/test/CodeGen/X86/vsplit-and.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-linux -mcpu=penryn | FileCheck %s define void @t0(<2 x i64>* %dst, <2 x i64> %src1, <2 x i64> %src2) nounwind readonly { ; CHECK: t0 diff --git a/test/CodeGen/X86/x86-64-dead-stack-adjust.ll b/test/CodeGen/X86/x86-64-dead-stack-adjust.ll index 902c9d5ae0..9c01f16f24 100644 --- a/test/CodeGen/X86/x86-64-dead-stack-adjust.ll +++ b/test/CodeGen/X86/x86-64-dead-stack-adjust.ll @@ -1,5 +1,5 @@ ; RUN: llc < %s -mcpu=nehalem | not grep rsp -; RUN: llc < %s -mcpu=nehalem | grep cvttsd2siq +; RUN: llc < %s -mcpu=nehalem | grep cvttsd2si target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" target triple = "x86_64-apple-darwin8" |
