; RUN: llc < %s -march=ppc64 -verify-machineinstrs
;
; This test is disabled until PPCISelLowering learns to insert proper 64-bit
; code for ATOMIC_CMP_SWAP. Currently, it is inserting 32-bit instructions with
; 64-bit operands which causes the machine code verifier to throw a tantrum.
;
; XFAIL: *
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc64-apple-darwin9"
@sc = common global i8 0
@uc = common global i8 0
@ss = common global i16 0
@us = common global i16 0
@si = common global i32 0
@ui = common global i32 0
@sl = common global i64 0, align 8
@ul = common global i64 0, align 8
@sll = common global i64 0, align 8
@ull = common global i64 0, align 8
define void @test_op_ignore() nounwind {
entry:
%0 = atomicrmw add i8* @sc, i8 1 monotonic
%1 = atomicrmw add i8* @uc, i8 1 monotonic
%2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%3 = atomicrmw add i16* %2, i16 1 monotonic
%4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%5 = atomicrmw add i16* %4, i16 1 monotonic
%6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%7 = atomicrmw add i32* %6, i32 1 monotonic
%8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%9 = atomicrmw add i32* %8, i32 1 monotonic
%10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%11 = atomicrmw add i64* %10, i64 1 monotonic
%12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%13 = atomicrmw add i64* %12, i64 1 monotonic
%14 = atomicrmw sub i8* @sc, i8 1 monotonic
%15 = atomicrmw sub i8* @uc, i8 1 monotonic
%16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%17 = atomicrmw sub i16* %16, i16 1 monotonic
%18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%19 = atomicrmw sub i16* %18, i16 1 monotonic
%20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%21 = atomicrmw sub i32* %20, i32 1 monotonic
%22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%23 = atomicrmw sub i32* %22, i32 1 monotonic
%24 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%25 = atomicrmw sub i64* %24, i64 1 monotonic
%26 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%27 = atomicrmw sub i64* %26, i64 1 monotonic
%28 = atomicrmw or i8* @sc, i8 1 monotonic
%29 = atomicrmw or i8* @uc, i8 1 monotonic
%30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%31 = atomicrmw or i16* %30, i16 1 monotonic
%32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%33 = atomicrmw or i16* %32, i16 1 monotonic
%34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%35 = atomicrmw or i32* %34, i32 1 monotonic
%36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%37 = atomicrmw or i32* %36, i32 1 monotonic
%38 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%39 = atomicrmw or i64* %38, i64 1 monotonic
%40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%41 = atomicrmw or i64* %40, i64 1 monotonic
%42 = atomicrmw xor i8* @sc, i8 1 monotonic
%43 = atomicrmw xor i8* @uc, i8 1 monotonic
%44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%45 = atomicrmw