; Test how we handle eliding ptrtoint instructions.
; TODO(kschimpf) Expand these tests as further CL's are added for issue 3544.
; RUN: llvm-as < %s | pnacl-freeze --pnacl-version=1 \
; RUN: | pnacl-bcanalyzer -dump-records \
; RUN: | FileCheck %s -check-prefix=PF1
; RUN: llvm-as < %s | pnacl-freeze --pnacl-version=1 | pnacl-thaw \
; RUN: | llvm-dis - | FileCheck %s -check-prefix=TD1
; RUN: llvm-as < %s | pnacl-freeze --pnacl-version=2 \
; RUN: | pnacl-bcanalyzer -dump-records \
; RUN: | FileCheck %s -check-prefix=PF2
; RUN: llvm-as < %s | pnacl-freeze --pnacl-version=2 | pnacl-thaw \
; RUN: | llvm-dis - | FileCheck %s -check-prefix=TD2
; ------------------------------------------------------
declare i32 @bar(i32)
@bytes = internal global [4 x i8] c"abcd"
; ------------------------------------------------------
; Show simple case where we use ptrtoint
define void @AllocCastSimple() {
%1 = alloca i8, i32 4, align 8
%2 = ptrtoint i8* %1 to i32
%3 = bitcast [4 x i8]* @bytes to i32*
store i32 %2, i32* %3, align 1
ret void
}
; TD1: define void @AllocCastSimple() {
; TD1-NEXT: %1 = alloca i8, i32 4, align 8
; TD1-NEXT: %2 = ptrtoint i8* %1 to i32
; TD1-NEXT: %3 = bitcast [4 x i8]* @bytes to i32*
; TD1-NEXT: store i32 %2, i32* %3, align 1
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @AllocCastSimple() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = ptrtoint i8* %1 to i32
; TD2-NEXT: %3 = bitcast [4 x i8]* @bytes to i32*
; TD2-NEXT: store i32 %2, i32* %3, align 1
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Same as above, but with the cast order changed. Shows
; that we always inject casts back in a fixed order. Hence,
; in PNaCl version 2, the casts will be reversed.
define void @AllocCastSimpleReversed() {
%1 = alloca i8, i32 4, align 8
%2 = bitcast [4 x i8]* @bytes to i32*
%3 = ptrtoint i8* %1 to i32
store i32 %3, i32* %2, align 1
ret void
}
; TD1: define void @AllocCastSimpleReversed() {
; TD1-NEXT: %1 = alloca i8, i32 4, align 8
; TD1-NEXT: %2 = bitcast [4 x i8]* @bytes to i32*
; TD1-NEXT: %3 = ptrtoint i8* %1 to i32
; TD1-NEXT: store i32 %3, i32* %2, align 1
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @AllocCastSimpleReversed() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = ptrtoint i8* %1 to i32
; TD2-NEXT: %3 = bitcast [4 x i8]* @bytes to i32*
; TD2-NEXT: store i32 %2, i32* %3, align 1
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show case where we delete ptrtoint because they aren't used.
define void @AllocCastDelete() {
%1 = alloca i8, i32 4, align 8
%2 = ptrtoint i8* %1 to i32
%3 = alloca i8, i32 4, align 8
%4 = ptrtoint i8* %3 to i32
ret void
}
; TD1: define void @AllocCastDelete() {
; TD1-NEXT: %1 = alloca i8, i32 4, align 8
; TD1-NEXT: %2 = ptrtoint i8* %1 to i32
; TD1-NEXT: %3 = alloca i8, i32 4, align 8
; TD1-NEXT: %4 = ptrtoint i8* %3 to i32
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @AllocCastDelete() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = alloca i8, i32 4, align 8
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show case where we have optimized the ptrtoint (and bitcast) into a
; single instruction, and will only be inserted before the first use
; in the block.
define void @AllocCastOpt() {
%1 = alloca i8, i32 4, align 8
%2 = bitcast [4 x i8]* @bytes to i32*
%3 = ptrtoint i8* %1 to i32
store i32 %3, i32* %2, align 1
store i32 %3, i32* %2, align 1
ret void
}
; TD1: define void @AllocCastOpt() {
; TD1-NEXT: %1 = alloca i8, i32 4, align 8
; TD1-NEXT: %2 = bitcast [4 x i8]* @bytes to i32*
; TD1-NEXT: %3 = ptrtoint i8* %1 to i32
; TD1-NEXT: store i32 %3, i32* %2, align 1
; TD1-NEXT: store i32 %3, i32* %2, align 1
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @AllocCastOpt() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = ptrtoint i8* %1 to i32
; TD2-NEXT: %3 = bitcast [4 x i8]* @bytes to i32*
; TD2-NEXT: store i32 %2, i32* %3, align 1
; TD2-NEXT: store i32 %2, i32* %3, align 1
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show case where ptrtoint (and bitcast) for store are not immediately
; before the store, the casts will be moved to the store.
define void @AllocCastMove(i32) {
%2 = alloca i8, i32 4, align 8
%3 = bitcast [4 x i8]* @bytes to i32*
%4 = ptrtoint i8* %2 to i32
%5 = add i32 %0, 1
store i32 %4, i32* %3, align 1
ret void
}
; TD1: define void @AllocCastMove(i32) {
; TD1-NEXT: %2 = alloca i8, i32 4, align 8
; TD1-NEXT: %3 = bitcast [4 x i8]* @bytes to i32*
; TD1-NEXT: %4 = ptrtoint i8* %2 to i32
; TD1-NEXT: %5 = add i32 %0, 1
; TD1-NEXT: store i32 %4, i32* %3, align 1
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @AllocCastMove(i32) {
; TD2-NEXT: %2 = alloca i8, i32 4, align 8
; TD2-NEXT: %3 = add i32 %0, 1
; TD2-NEXT: %4 = ptrtoint i8* %2 to i32
; TD2-NEXT: %5 = bitcast [4 x i8]* @bytes to i32*
; TD2-NEXT: store i32 %4, i32* %5, align 1
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show case where ptrtoint on global variable is merged in a store, and
; order is kept.
define void @StoreGlobal() {
%1 = alloca i8, i32 4, align 8
%2 = ptrtoint [4 x i8]* @bytes to i32
%3 = bitcast i8* %1 to i32*
store i32 %2, i32* %3, align 1
ret void
}
; TD1: define void @StoreGlobal() {
; TD1-NEXT: %1 = alloca i8, i32 4, align 8
; TD1-NEXT: %2 = ptrtoint [4 x i8]* @bytes to i32
; TD1-NEXT: %3 = bitcast i8* %1 to i32*
; TD1-NEXT: store i32 %2, i32* %3, align 1
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @StoreGlobal() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = ptrtoint [4 x i8]* @bytes to i32
; TD2-NEXT: %3 = bitcast i8* %1 to i32*
; TD2-NEXT: store i32 %2, i32* %3, align 1
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Same as above, but with cast order reversed.
define void @StoreGlobalCastsReversed() {
%1 = alloca i8, i32 4, align 8
%2 = bitcast i8* %1 to i32*
%3 = ptrtoint [4 x i8]* @bytes to i32
store i32 %3, i32* %2, align 1
ret void
}
; TD1: define void @StoreGlobalCastsReversed() {
; TD1-NEXT: %1 = alloca i8, i32 4, align 8
; TD1-NEXT: %2 = bitcast i8* %1 to i32*
; TD1-NEXT: %3 = ptrtoint [4 x i8]* @bytes to i32
; TD1-NEXT: store i32 %3, i32* %2, align 1
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @StoreGlobalCastsReversed() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = ptrtoint [4 x i8]* @bytes to i32
; TD2-NEXT: %3 = bitcast i8* %1 to i32*
; TD2-NEXT: store i32 %2, i32* %3, align 1
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show that we will move the ptrtoint of a global to the use.
define i32 @StoreGlobalMovePtr2Int() {
%1 = ptrtoint [4 x i8]* @bytes to i32
%2 = alloca i8, i32 4, align 8
%3 = bitcast i8* %2 to i32*
store i32 %1, i32* %3, align 1
ret i32 0
}
; TD1: define i32 @StoreGlobalMovePtr2Int() {
; TD1-NEXT: %1 = ptrtoint [4 x i8]* @bytes to i32
; TD1-NEXT: %2 = alloca i8, i32 4, align 8
; TD1-NEXT: %3 = bitcast i8* %2 to i32*
; TD1-NEXT: store i32 %1, i32* %3, align 1
; TD1-NEXT: ret i32 0
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define i32 @StoreGlobalMovePtr2Int() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = ptrtoint [4 x i8]* @bytes to i32
; TD2-NEXT: %3 = bitcast i8* %1 to i32*
; TD2-NEXT: store i32 %2, i32* %3, align 1
; TD2-NEXT: ret i32 0
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show that we handle add instructions with pointer casts.
define void @CastAddAlloca() {
%1 = alloca i8, i32 4, align 8
%2 = ptrtoint i8* %1 to i32
; Simple add.
%3 = add i32 1, 2
; Cast first.
%4 = add i32 %2, 2
; Cast second.
%5 = add i32 1, %2
; Cast both.
%6 = add i32 %2, %2
ret void
}
; TD1: define void @CastAddAlloca() {
; TD1-NEXT: %1 = alloca i8, i32 4, align 8
; TD1-NEXT: %2 = ptrtoint i8* %1 to i32
; TD1-NEXT: %3 = add i32 1, 2
; TD1-NEXT: %4 = add i32 %2, 2
; TD1-NEXT: %5 = add i32 1, %2
; TD1-NEXT: %6 = add i32 %2, %2
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @CastAddAlloca() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = add i32 1, 2
; TD2-NEXT: %3 = ptrtoint i8* %1 to i32
; TD2-NEXT: %4 = add i32 %3, 2
; TD2-NEXT: %5 = add i32 1, %3
; TD2-NEXT: %6 = add i32 %3, %3
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show that we handle add instructions with pointer casts.
define void @CastAddGlobal() {
%1 = ptrtoint [4 x i8]* @bytes to i32
; Simple Add.
%2 = add i32 1, 2
; Cast first.
%3 = add i32 %1, 2
; Cast Second.
%4 = add i32 1, %1
; Cast both.
%5 = add i32 %1, %1
ret void
}
; TD1: define void @CastAddGlobal() {
; TD1-NEXT: %1 = ptrtoint [4 x i8]* @bytes to i32
; TD1-NEXT: %2 = add i32 1, 2
; TD1-NEXT: %3 = add i32 %1, 2
; TD1-NEXT: %4 = add i32 1, %1
; TD1-NEXT: %5 = add i32 %1, %1
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @CastAddGlobal() {
; TD2-NEXT: %1 = add i32 1, 2
; TD2-NEXT: %2 = ptrtoint [4 x i8]* @bytes to i32
; TD2-NEXT: %3 = add i32 %2, 2
; TD2-NEXT: %4 = add i32 1, %2
; TD2-NEXT: %5 = add i32 %2, %2
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show that we can handle pointer conversions for other scalar binary operators.
define void @CastBinop() {
%1 = alloca i8, i32 4, align 8
%2 = ptrtoint i8* %1 to i32
%3 = ptrtoint [4 x i8]* @bytes to i32
%4 = sub i32 %2, %3
%5 = mul i32 %2, %3
%6 = udiv i32 %2, %3
%7 = urem i32 %2, %3
%8 = srem i32 %2, %3
%9 = shl i32 %2, %3
%10 = lshr i32 %2, %3
%11 = ashr i32 %2, %3
%12 = and i32 %2, %3
%13 = or i32 %2, %3
%14 = xor i32 %2, %3
ret void
}
; TD1: define void @CastBinop() {
; TD1-NEXT: %1 = alloca i8, i32 4, align 8
; TD1-NEXT: %2 = ptrtoint i8* %1 to i32
; TD1-NEXT: %3 = ptrtoint [4 x i8]* @bytes to i32
; TD1-NEXT: %4 = sub i32 %2, %3
; TD1-NEXT: %5 = mul i32 %2, %3
; TD1-NEXT: %6 = udiv i32 %2, %3
; TD1-NEXT: %7 = urem i32 %2, %3
; TD1-NEXT: %8 = srem i32 %2, %3
; TD1-NEXT: %9 = shl i32 %2, %3
; TD1-NEXT: %10 = lshr i32 %2, %3
; TD1-NEXT: %11 = ashr i32 %2, %3
; TD1-NEXT: %12 = and i32 %2, %3
; TD1-NEXT: %13 = or i32 %2, %3
; TD1-NEXT: %14 = xor i32 %2, %3
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @CastBinop() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = ptrtoint i8* %1 to i32
; TD2-NEXT: %3 = ptrtoint [4 x i8]* @bytes to i32
; TD2-NEXT: %4 = sub i32 %2, %3
; TD2-NEXT: %5 = mul i32 %2, %3
; TD2-NEXT: %6 = udiv i32 %2, %3
; TD2-NEXT: %7 = urem i32 %2, %3
; TD2-NEXT: %8 = srem i32 %2, %3
; TD2-NEXT: %9 = shl i32 %2, %3
; TD2-NEXT: %10 = lshr i32 %2, %3
; TD2-NEXT: %11 = ashr i32 %2, %3
; TD2-NEXT: %12 = and i32 %2, %3
; TD2-NEXT: %13 = or i32 %2, %3
; TD2-NEXT: %14 = xor i32 %2, %3
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show that we handle (non-special) bitcasts by converting pointer
; casts to integer.
define void @TestCasts() {
%1 = alloca i8, i32 4, align 8
%2 = ptrtoint i8* %1 to i32
%3 = trunc i32 257 to i8
%4 = trunc i32 %2 to i8
%5 = zext i32 257 to i64
%6 = zext i32 %2 to i64
%7 = sext i32 -1 to i64
%8 = sext i32 %2 to i64
%9 = uitofp i32 1 to float
%10 = uitofp i32 %2 to float
%11 = sitofp i32 -1 to float
%12 = sitofp i32 %2 to float
ret void
}
; TD1: define void @TestCasts() {
; TD1-NEXT: %1 = alloca i8, i32 4, align 8
; TD1-NEXT: %2 = ptrtoint i8* %1 to i32
; TD1-NEXT: %3 = trunc i32 257 to i8
; TD1-NEXT: %4 = trunc i32 %2 to i8
; TD1-NEXT: %5 = zext i32 257 to i64
; TD1-NEXT: %6 = zext i32 %2 to i64
; TD1-NEXT: %7 = sext i32 -1 to i64
; TD1-NEXT: %8 = sext i32 %2 to i64
; TD1-NEXT: %9 = uitofp i32 1 to float
; TD1-NEXT: %10 = uitofp i32 %2 to float
; TD1-NEXT: %11 = sitofp i32 -1 to float
; TD1-NEXT: %12 = sitofp i32 %2 to float
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @TestCasts() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = trunc i32 257 to i8
; TD2-NEXT: %3 = ptrtoint i8* %1 to i32
; TD2-NEXT: %4 = trunc i32 %3 to i8
; TD2-NEXT: %5 = zext i32 257 to i64
; TD2-NEXT: %6 = zext i32 %3 to i64
; TD2-NEXT: %7 = sext i32 -1 to i64
; TD2-NEXT: %8 = sext i32 %3 to i64
; TD2-NEXT: %9 = uitofp i32 1 to float
; TD2-NEXT: %10 = uitofp i32 %3 to float
; TD2-NEXT: %11 = sitofp i32 -1 to float
; TD2-NEXT: %12 = sitofp i32 %3 to float
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show that if a ptrtoint is used in something other than known scalar operations,
; it gets copied to the bitcode file
; TODO(kschimpf): Remove this once all scalar operations have been handled.
define void @TestSavedPtrToInt() {
%1 = alloca i8, i32 4, align 8
%2 = ptrtoint i8* %1 to i32
%3 = add i32 %2, 0
%4 = call i32 @bar(i32 %2)
ret void
}
; TD1: define void @TestSavedPtrToInt() {
; TD1-NEXT: %1 = alloca i8, i32 4, align 8
; TD1-NEXT: %2 = ptrtoint i8* %1 to i32
; TD1-NEXT: %3 = add i32 %2, 0
; TD1-NEXT: %4 = call i32 @bar(i32 %2)
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @TestSavedPtrToInt() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = ptrtoint i8* %1 to i32
; TD2-NEXT: %3 = add i32 %2, 0
; TD2-NEXT: %4 = call i32 @bar(i32 %2)
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show that we can handle pointer conversions for icmp.
define void @CastIcmp() {
%1 = alloca i8, i32 4, align 8
%2 = ptrtoint i8* %1 to i32
%3 = ptrtoint [4 x i8]* @bytes to i32
%4 = icmp eq i32 1, 2
%5 = icmp eq i32 %2, 2
%6 = icmp eq i32 1, %3
%7 = icmp eq i32 %2, %3
%8 = icmp eq i32 %3, %2
ret void
}
; TD1: define void @CastIcmp() {
; TD1-NEXT: %1 = alloca i8, i32 4, align 8
; TD1-NEXT: %2 = ptrtoint i8* %1 to i32
; TD1-NEXT: %3 = ptrtoint [4 x i8]* @bytes to i32
; TD1-NEXT: %4 = icmp eq i32 1, 2
; TD1-NEXT: %5 = icmp eq i32 %2, 2
; TD1-NEXT: %6 = icmp eq i32 1, %3
; TD1-NEXT: %7 = icmp eq i32 %2, %3
; TD1-NEXT: %8 = icmp eq i32 %3, %2
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @CastIcmp() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = icmp eq i32 1, 2
; TD2-NEXT: %3 = ptrtoint i8* %1 to i32
; TD2-NEXT: %4 = icmp eq i32 %3, 2
; TD2-NEXT: %5 = ptrtoint [4 x i8]* @bytes to i32
; TD2-NEXT: %6 = icmp eq i32 1, %5
; TD2-NEXT: %7 = icmp eq i32 %3, %5
; TD2-NEXT: %8 = icmp eq i32 %5, %3
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show that we can handle pointer conversions for Select.
define void @CastSelect() {
%1 = alloca i8, i32 4, align 8
%2 = ptrtoint i8* %1 to i32
%3 = ptrtoint [4 x i8]* @bytes to i32
%4 = select i1 true, i32 1, i32 2
%5 = select i1 true, i32 %2, i32 2
%6 = select i1 true, i32 1, i32 %3
%7 = select i1 true, i32 %2, i32 %3
%8 = select i1 true, i32 %3, i32 %2
ret void
}
; TD1: define void @CastSelect() {
; TD1-NEXT: %1 = alloca i8, i32 4, align 8
; TD1-NEXT: %2 = ptrtoint i8* %1 to i32
; TD1-NEXT: %3 = ptrtoint [4 x i8]* @bytes to i32
; TD1-NEXT: %4 = select i1 true, i32 1, i32 2
; TD1-NEXT: %5 = select i1 true, i32 %2, i32 2
; TD1-NEXT: %6 = select i1 true, i32 1, i32 %3
; TD1-NEXT: %7 = select i1 true, i32 %2, i32 %3
; TD1-NEXT: %8 = select i1 true, i32 %3, i32 %2
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; TD2: define void @CastSelect() {
; TD2-NEXT: %1 = alloca i8, i32 4, align 8
; TD2-NEXT: %2 = select i1 true, i32 1, i32 2
; TD2-NEXT: %3 = ptrtoint i8* %1 to i32
; TD2-NEXT: %4 = select i1 true, i32 %3, i32 2
; TD2-NEXT: %5 = ptrtoint [4 x i8]* @bytes to i32
; TD2-NEXT: %6 = select i1 true, i32 1, i32 %5
; TD2-NEXT: %7 = select i1 true, i32 %3, i32 %5
; TD2-NEXT: %8 = select i1 true, i32 %5, i32 %3
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; ------------------------------------------------------
; Show that if a phi node refers to a pointer cast, we add
; them at the end of the incoming block.
define void @PhiBackwardRefs(i1) {
%2 = alloca i8, i32 4, align 8
%3 = bitcast i8* %2 to i32*
%4 = alloca i8, i32 4, align 8
%5 = ptrtoint i8* %4 to i32
br i1 %0, label %true, label %false
true:
%6 = load i32* %3
br label %merge
false:
%7 = load i32* %3
br label %merge
merge:
%8 = phi i32 [%5, %true], [%5, %false]
%9 = phi i32 [%6, %true], [%7, %false]
ret void
}
; TD1: define void @PhiBackwardRefs(i1) {
; TD1-NEXT: %2 = alloca i8, i32 4, align 8
; TD1-NEXT: %3 = bitcast i8* %2 to i32*
; TD1-NEXT: %4 = alloca i8, i32 4, align 8
; TD1-NEXT: %5 = ptrtoint i8* %4 to i32
; TD1-NEXT: br i1 %0, label %true, label %false
; TD1: true:
; TD1-NEXT: %6 = load i32* %3
; TD1-NEXT: br label %merge
; TD1: false:
; TD1-NEXT: %7 = load i32* %3
; TD1-NEXT: br label %merge
; TD1: merge:
; TD1-NEXT: %8 = phi i32 [ %5, %true ], [ %5, %false ]
; TD1-NEXT: %9 = phi i32 [ %6, %true ], [ %7, %false ]
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1:
; TD2: define void @PhiBackwardRefs(i1) {
; TD2-NEXT: %2 = alloca i8, i32 4, align 8
; TD2-NEXT: %3 = alloca i8, i32 4, align 8
; TD2-NEXT: br i1 %0, label %true, label %false
; TD2: true:
; TD2-NEXT: %4 = bitcast i8* %2 to i32*
; TD2-NEXT: %5 = load i32* %4
; TD2-NEXT: %6 = ptrtoint i8* %3 to i32
; TD2-NEXT: br label %merge
; TD2: false:
; TD2-NEXT: %7 = bitcast i8* %2 to i32*
; TD2-NEXT: %8 = load i32* %7
; TD2-NEXT: %9 = ptrtoint i8* %3 to i32
; TD2-NEXT: br label %merge
; TD2: merge:
; TD2-NEXT: %10 = phi i32 [ %6, %true ], [ %9, %false ]
; TD2-NEXT: %11 = phi i32 [ %5, %true ], [ %8, %false ]
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2:
; ------------------------------------------------------
; Like PhiBackwardRefs except the phi nodes forward reference
; instructions instead of backwards references.
define void @PhiForwardRefs(i1) {
br label %start
merge:
%2 = phi i32 [%9, %true], [%9, %false]
%3 = phi i32 [%4, %true], [%5, %false]
ret void
true:
%4 = load i32* %7
br label %merge
false:
%5 = load i32* %7
br label %merge
start:
%6 = alloca i8, i32 4, align 8
%7 = bitcast i8* %6 to i32*
%8 = alloca i8, i32 4, align 8
%9 = ptrtoint i8* %8 to i32
br i1 %0, label %true, label %false
}
; TD1: define void @PhiForwardRefs(i1) {
; TD1-NEXT: br label %start
; TD1: merge:
; TD1-NEXT: %2 = phi i32 [ %9, %true ], [ %9, %false ]
; TD1-NEXT: %3 = phi i32 [ %4, %true ], [ %5, %false ]
; TD1-NEXT: ret void
; TD1: true:
; TD1-NEXT: %4 = load i32* %7
; TD1-NEXT: br label %merge
; TD1: false:
; TD1-NEXT: %5 = load i32* %7
; TD1-NEXT: br label %merge
; TD1: start:
; TD1-NEXT: %6 = alloca i8, i32 4, align 8
; TD1-NEXT: %7 = bitcast i8* %6 to i32*
; TD1-NEXT: %8 = alloca i8, i32 4, align 8
; TD1-NEXT: %9 = ptrtoint i8* %8 to i32
; TD1-NEXT: br i1 %0, label %true, label %false
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1:
; TD2: define void @PhiForwardRefs(i1) {
; TD2-NEXT: br label %start
; TD2: merge
; TD2-NEXT: %2 = phi i32 [ %6, %true ], [ %9, %false ]
; TD2-NEXT: %3 = phi i32 [ %5, %true ], [ %8, %false ]
; TD2-NEXT: ret void
; TD2: true:
; TD2-NEXT: %4 = bitcast i8* %10 to i32*
; TD2-NEXT: %5 = load i32* %4
; TD2-NEXT: %6 = ptrtoint i8* %11 to i32
; TD2-NEXT: br label %merge
; TD2: false:
; TD2-NEXT: %7 = bitcast i8* %10 to i32*
; TD2-NEXT: %8 = load i32* %7
; TD2-NEXT: %9 = ptrtoint i8* %11 to i32
; TD2-NEXT: br label %merge
; TD2: start:
; TD2-NEXT: %10 = alloca i8, i32 4, align 8
; TD2-NEXT: %11 = alloca i8, i32 4, align 8
; TD2-NEXT: br i1 %0, label %true, label %false
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2:
; ------------------------------------------------------
; Show that if a phi node incoming block already has a pointer cast,
; we use it instead of adding one at the end of the block. In this
; example, we reuse instruction %7 in block true for phi node %10.
define void @PhiMergeCast(i1) {
%2 = alloca i8, i32 4, align 8
%3 = bitcast i8* %2 to i32*
%4 = alloca i8, i32 4, align 8
%5 = ptrtoint i8* %4 to i32
br i1 %0, label %true, label %false
true:
%6 = load i32* %3
%7 = ptrtoint i8* %4 to i32
%8 = add i32 %6, %7
br label %merge
false:
%9 = load i32* %3
br label %merge
merge:
%10 = phi i32 [%5, %true], [%5, %false]
%11 = phi i32 [%6, %true], [%9, %false]
ret void
}
; TD1: define void @PhiMergeCast(i1) {
; TD1-NEXT: %2 = alloca i8, i32 4, align 8
; TD1-NEXT: %3 = bitcast i8* %2 to i32*
; TD1-NEXT: %4 = alloca i8, i32 4, align 8
; TD1-NEXT: %5 = ptrtoint i8* %4 to i32
; TD1-NEXT: br i1 %0, label %true, label %false
; TD1: true:
; TD1-NEXT: %6 = load i32* %3
; TD1-NEXT: %7 = ptrtoint i8* %4 to i32
; TD1-NEXT: %8 = add i32 %6, %7
; TD1-NEXT: br label %merge
; TD1: false:
; TD1-NEXT: %9 = load i32* %3
; TD1-NEXT: br label %merge
; TD1: merge:
; TD1-NEXT: %10 = phi i32 [ %5, %true ], [ %5, %false ]
; TD1-NEXT: %11 = phi i32 [ %6, %true ], [ %9, %false ]
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1:
; TD2: define void @PhiMergeCast(i1) {
; TD2-NEXT: %2 = alloca i8, i32 4, align 8
; TD2-NEXT: %3 = alloca i8, i32 4, align 8
; TD2-NEXT: br i1 %0, label %true, label %false
; TD2: true:
; TD2-NEXT: %4 = bitcast i8* %2 to i32*
; TD2-NEXT: %5 = load i32* %4
; TD2-NEXT: %6 = ptrtoint i8* %3 to i32
; TD2-NEXT: %7 = add i32 %5, %6
; TD2-NEXT: br label %merge
; TD2: false:
; TD2-NEXT: %8 = bitcast i8* %2 to i32*
; TD2-NEXT: %9 = load i32* %8
; TD2-NEXT: %10 = ptrtoint i8* %3 to i32
; TD2-NEXT: br label %merge
; TD2: merge:
; TD2-NEXT: %11 = phi i32 [ %6, %true ], [ %10, %false ]
; TD2-NEXT: %12 = phi i32 [ %5, %true ], [ %9, %false ]
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2:
; ------------------------------------------------------
; Show that we must introduce a cast reference for each
; reachable block, but one is sufficient.
define void @LongReachingCasts(i1) {
%2 = alloca i8, i32 4, align 8
%3 = ptrtoint i8* %2 to i32
%4 = bitcast [4 x i8]* @bytes to i32*
br i1 %0, label %Split1, label %Split2
Split1:
br i1 %0, label %b1, label %b2
Split2:
br i1 %0, label %b3, label %b4
b1:
store i32 %3, i32* %4, align 1
store i32 %3, i32* %4, align 1
ret void
b2:
store i32 %3, i32* %4, align 1
store i32 %3, i32* %4, align 1
ret void
b3:
store i32 %3, i32* %4, align 1
store i32 %3, i32* %4, align 1
ret void
b4:
store i32 %3, i32* %4, align 1
store i32 %3, i32* %4, align 1
ret void
}
; TD1: define void @LongReachingCasts(i1) {
; TD1-NEXT: %2 = alloca i8, i32 4, align 8
; TD1-NEXT: %3 = ptrtoint i8* %2 to i32
; TD1-NEXT: %4 = bitcast [4 x i8]* @bytes to i32*
; TD1-NEXT: br i1 %0, label %Split1, label %Split2
; TD1: Split1:
; TD1-NEXT: br i1 %0, label %b1, label %b2
; TD1: Split2:
; TD1-NEXT: br i1 %0, label %b3, label %b4
; TD1: b1:
; TD1-NEXT: store i32 %3, i32* %4, align 1
; TD1-NEXT: store i32 %3, i32* %4, align 1
; TD1-NEXT: ret void
; TD1: b2:
; TD1-NEXT: store i32 %3, i32* %4, align 1
; TD1-NEXT: store i32 %3, i32* %4, align 1
; TD1-NEXT: ret void
; TD1: b3:
; TD1-NEXT: store i32 %3, i32* %4, align 1
; TD1-NEXT: store i32 %3, i32* %4, align 1
; TD1-NEXT: ret void
; TD1: b4:
; TD1-NEXT: store i32 %3, i32* %4, align 1
; TD1-NEXT: store i32 %3, i32* %4, align 1
; TD1-NEXT: ret void
; TD1-NEXT: }
; PF1:
; PF1:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1-NEXT:
; PF1:
; TD2: define void @LongReachingCasts(i1) {
; TD2-NEXT: %2 = alloca i8, i32 4, align 8
; TD2-NEXT: br i1 %0, label %Split1, label %Split2
; TD2: Split1:
; TD2-NEXT: br i1 %0, label %b1, label %b2
; TD2: Split2:
; TD2-NEXT: br i1 %0, label %b3, label %b4
; TD2: b1:
; TD2-NEXT: %3 = ptrtoint i8* %2 to i32
; TD2-NEXT: %4 = bitcast [4 x i8]* @bytes to i32*
; TD2-NEXT: store i32 %3, i32* %4, align 1
; TD2-NEXT: store i32 %3, i32* %4, align 1
; TD2-NEXT: ret void
; TD2: b2:
; TD2-NEXT: %5 = ptrtoint i8* %2 to i32
; TD2-NEXT: %6 = bitcast [4 x i8]* @bytes to i32*
; TD2-NEXT: store i32 %5, i32* %6, align 1
; TD2-NEXT: store i32 %5, i32* %6, align 1
; TD2-NEXT: ret void
; TD2: b3:
; TD2-NEXT: %7 = ptrtoint i8* %2 to i32
; TD2-NEXT: %8 = bitcast [4 x i8]* @bytes to i32*
; TD2-NEXT: store i32 %7, i32* %8, align 1
; TD2-NEXT: store i32 %7, i32* %8, align 1
; TD2-NEXT: ret void
; TD2: b4:
; TD2-NEXT: %9 = ptrtoint i8* %2 to i32
; TD2-NEXT: %10 = bitcast [4 x i8]* @bytes to i32*
; TD2-NEXT: store i32 %9, i32* %10, align 1
; TD2-NEXT: store i32 %9, i32* %10, align 1
; TD2-NEXT: ret void
; TD2-NEXT: }
; PF2:
; PF2:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2-NEXT:
; PF2: