diff options
-rw-r--r-- | lib/Target/X86/X86InstrInfo.cpp | 45 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrInfo.td | 7 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrSSE.td | 4 | ||||
-rw-r--r-- | lib/Target/X86/X86RegisterInfo.cpp | 66 | ||||
-rw-r--r-- | lib/Target/X86/X86RegisterInfo.td | 105 | ||||
-rw-r--r-- | test/CodeGen/X86/coalesce-esp.ll | 36 |
6 files changed, 225 insertions, 38 deletions
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 4b29305086..afdb0f0f77 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -1695,14 +1695,22 @@ bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB, /* Source and destination have the same register class. */; else if (CommonRC->hasSuperClass(SrcRC)) CommonRC = SrcRC; - else if (!DestRC->hasSubClass(SrcRC)) - CommonRC = 0; + else if (!DestRC->hasSubClass(SrcRC)) { + // Neither of GR64_NOREX or GR64_NOSP is a superclass of the other, + // but we want to copy then as GR64. + if (SrcRC->hasSuperClass(&X86::GR64RegClass) && + DestRC->hasSuperClass(&X86::GR64RegClass)) + CommonRC = &X86::GR64RegClass; + else + CommonRC = 0; + } if (CommonRC) { unsigned Opc; - if (CommonRC == &X86::GR64RegClass) { + if (CommonRC == &X86::GR64RegClass || CommonRC == &X86::GR64_NOSPRegClass) { Opc = X86::MOV64rr; - } else if (CommonRC == &X86::GR32RegClass) { + } else if (CommonRC == &X86::GR32RegClass || + CommonRC == &X86::GR32_NOSPRegClass) { Opc = X86::MOV32rr; } else if (CommonRC == &X86::GR16RegClass) { Opc = X86::MOV16rr; @@ -1727,7 +1735,8 @@ bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB, Opc = X86::MOV8rr_NOREX; else Opc = X86::MOV8rr; - } else if (CommonRC == &X86::GR64_NOREXRegClass) { + } else if (CommonRC == &X86::GR64_NOREXRegClass || + CommonRC == &X86::GR64_NOREX_NOSPRegClass) { Opc = X86::MOV64rr; } else if (CommonRC == &X86::GR32_NOREXRegClass) { Opc = X86::MOV32rr; @@ -1755,16 +1764,17 @@ bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB, BuildMI(MBB, MI, DL, get(Opc), DestReg).addReg(SrcReg); return true; } - + // Moving EFLAGS to / from another register requires a push and a pop. if (SrcRC == &X86::CCRRegClass) { if (SrcReg != X86::EFLAGS) return false; - if (DestRC == &X86::GR64RegClass) { + if (DestRC == &X86::GR64RegClass || DestRC == &X86::GR64_NOSPRegClass) { BuildMI(MBB, MI, DL, get(X86::PUSHFQ)); BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg); return true; - } else if (DestRC == &X86::GR32RegClass) { + } else if (DestRC == &X86::GR32RegClass || + DestRC == &X86::GR32_NOSPRegClass) { BuildMI(MBB, MI, DL, get(X86::PUSHFD)); BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg); return true; @@ -1772,11 +1782,12 @@ bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB, } else if (DestRC == &X86::CCRRegClass) { if (DestReg != X86::EFLAGS) return false; - if (SrcRC == &X86::GR64RegClass) { + if (SrcRC == &X86::GR64RegClass || DestRC == &X86::GR64_NOSPRegClass) { BuildMI(MBB, MI, DL, get(X86::PUSH64r)).addReg(SrcReg); BuildMI(MBB, MI, DL, get(X86::POPFQ)); return true; - } else if (SrcRC == &X86::GR32RegClass) { + } else if (SrcRC == &X86::GR32RegClass || + DestRC == &X86::GR32_NOSPRegClass) { BuildMI(MBB, MI, DL, get(X86::PUSH32r)).addReg(SrcReg); BuildMI(MBB, MI, DL, get(X86::POPFD)); return true; @@ -1834,9 +1845,9 @@ static unsigned getStoreRegOpcode(unsigned SrcReg, bool isStackAligned, TargetMachine &TM) { unsigned Opc = 0; - if (RC == &X86::GR64RegClass) { + if (RC == &X86::GR64RegClass || RC == &X86::GR64_NOSPRegClass) { Opc = X86::MOV64mr; - } else if (RC == &X86::GR32RegClass) { + } else if (RC == &X86::GR32RegClass || RC == &X86::GR32_NOSPRegClass) { Opc = X86::MOV32mr; } else if (RC == &X86::GR16RegClass) { Opc = X86::MOV16mr; @@ -1861,7 +1872,8 @@ static unsigned getStoreRegOpcode(unsigned SrcReg, Opc = X86::MOV8mr_NOREX; else Opc = X86::MOV8mr; - } else if (RC == &X86::GR64_NOREXRegClass) { + } else if (RC == &X86::GR64_NOREXRegClass || + RC == &X86::GR64_NOREX_NOSPRegClass) { Opc = X86::MOV64mr; } else if (RC == &X86::GR32_NOREXRegClass) { Opc = X86::MOV32mr; @@ -1926,9 +1938,9 @@ static unsigned getLoadRegOpcode(unsigned DestReg, bool isStackAligned, const TargetMachine &TM) { unsigned Opc = 0; - if (RC == &X86::GR64RegClass) { + if (RC == &X86::GR64RegClass || RC == &X86::GR64_NOSPRegClass) { Opc = X86::MOV64rm; - } else if (RC == &X86::GR32RegClass) { + } else if (RC == &X86::GR32RegClass || RC == &X86::GR32_NOSPRegClass) { Opc = X86::MOV32rm; } else if (RC == &X86::GR16RegClass) { Opc = X86::MOV16rm; @@ -1953,7 +1965,8 @@ static unsigned getLoadRegOpcode(unsigned DestReg, Opc = X86::MOV8rm_NOREX; else Opc = X86::MOV8rm; - } else if (RC == &X86::GR64_NOREXRegClass) { + } else if (RC == &X86::GR64_NOREXRegClass || + RC == &X86::GR64_NOREX_NOSPRegClass) { Opc = X86::MOV64rm; } else if (RC == &X86::GR32_NOREXRegClass) { Opc = X86::MOV32rm; diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 0e40965a4c..c547c76ec1 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -167,12 +167,15 @@ def i32imm_pcrel : Operand<i32> { let PrintMethod = "print_pcrel_imm"; } +// A version of ptr_rc which excludes SP, ESP, and RSP. This is used for +// the index operand of an address, to conform to x86 encoding restrictions. +def ptr_rc_nosp : PointerLikeRegClass<1>; // *mem - Operand definitions for the funky X86 addressing mode operands. // class X86MemOperand<string printMethod> : Operand<iPTR> { let PrintMethod = printMethod; - let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm, i8imm); + let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm); } def i8mem : X86MemOperand<"printi8mem">; @@ -191,7 +194,7 @@ def f256mem : X86MemOperand<"printf256mem">; // plain GR64, so that it doesn't potentially require a REX prefix. def i8mem_NOREX : Operand<i64> { let PrintMethod = "printi8mem"; - let MIOperandInfo = (ops GR64_NOREX, i8imm, GR64_NOREX, i32imm, i8imm); + let MIOperandInfo = (ops GR64_NOREX, i8imm, GR64_NOREX_NOSP, i32imm, i8imm); } def lea32mem : Operand<i32> { diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 853f88ed19..2440aecf5f 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -86,11 +86,11 @@ def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [], def ssmem : Operand<v4f32> { let PrintMethod = "printf32mem"; - let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm, i8imm); + let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm); } def sdmem : Operand<v2f64> { let PrintMethod = "printf64mem"; - let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm, i8imm); + let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm); } //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index e5d563c9ad..864e33b224 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -37,10 +37,16 @@ #include "llvm/Target/TargetOptions.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" using namespace llvm; +static cl::opt<bool> +StrictIndexRegclass("strict-index-regclass", + cl::desc("Use a special register class to avoid letting SP " + "be used as an index")); + X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, const TargetInstrInfo &tii) : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? @@ -165,20 +171,25 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, return A; } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) { if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || - A == &X86::GR64_NOREXRegClass) + A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || + A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_ABCDRegClass; else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || - A == &X86::GR32_NOREXRegClass) + A == &X86::GR32_NOREXRegClass || + A == &X86::GR32_NOSPRegClass) return &X86::GR32_ABCDRegClass; else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || A == &X86::GR16_NOREXRegClass) return &X86::GR16_ABCDRegClass; } else if (B == &X86::GR8_NOREXRegClass) { - if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass) + if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_NOREXRegClass; else if (A == &X86::GR64_ABCDRegClass) return &X86::GR64_ABCDRegClass; - else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass) + else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || + A == &X86::GR32_NOSPRegClass) return &X86::GR32_NOREXRegClass; else if (A == &X86::GR32_ABCDRegClass) return &X86::GR32_ABCDRegClass; @@ -192,10 +203,12 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, // 8-bit hi if (B == &X86::GR8_ABCD_HRegClass) { if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || - A == &X86::GR64_NOREXRegClass) + A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || + A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_ABCDRegClass; else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || - A == &X86::GR32_NOREXRegClass) + A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) return &X86::GR32_ABCDRegClass; else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || A == &X86::GR16_NOREXRegClass) @@ -209,17 +222,21 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, return A; } else if (B == &X86::GR16_ABCDRegClass) { if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || - A == &X86::GR64_NOREXRegClass) + A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || + A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_ABCDRegClass; else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || - A == &X86::GR32_NOREXRegClass) + A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) return &X86::GR32_ABCDRegClass; } else if (B == &X86::GR16_NOREXRegClass) { - if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass) + if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_NOREXRegClass; else if (A == &X86::GR64_ABCDRegClass) return &X86::GR64_ABCDRegClass; - else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass) + else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || + A == &X86::GR32_NOSPRegClass) return &X86::GR32_NOREXRegClass; else if (A == &X86::GR32_ABCDRegClass) return &X86::GR64_ABCDRegClass; @@ -227,15 +244,18 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, break; case 4: // 32-bit - if (B == &X86::GR32RegClass) { + if (B == &X86::GR32RegClass || B == &X86::GR32_NOSPRegClass) { if (A->getSize() == 8) return A; } else if (B == &X86::GR32_ABCDRegClass) { if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || - A == &X86::GR64_NOREXRegClass) + A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || + A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_ABCDRegClass; } else if (B == &X86::GR32_NOREXRegClass) { - if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass) + if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_NOREXRegClass; else if (A == &X86::GR64_ABCDRegClass) return &X86::GR64_ABCDRegClass; @@ -247,9 +267,23 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, const TargetRegisterClass *X86RegisterInfo:: getPointerRegClass(unsigned Kind) const { - if (TM.getSubtarget<X86Subtarget>().is64Bit()) - return &X86::GR64RegClass; - return &X86::GR32RegClass; + switch (Kind) { + default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); + case 0: // Normal GPRs. + if (TM.getSubtarget<X86Subtarget>().is64Bit()) + return &X86::GR64RegClass; + return &X86::GR32RegClass; + case 1: // Normal GRPs except the stack pointer (for encoding reasons). + if (!StrictIndexRegclass) { + if (TM.getSubtarget<X86Subtarget>().is64Bit()) + return &X86::GR64RegClass; + return &X86::GR32RegClass; + } else { + if (TM.getSubtarget<X86Subtarget>().is64Bit()) + return &X86::GR64_NOSPRegClass; + return &X86::GR32_NOSPRegClass; + } + } } const TargetRegisterClass * diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td index bfffee7b72..d2197b2415 100644 --- a/lib/Target/X86/X86RegisterInfo.td +++ b/lib/Target/X86/X86RegisterInfo.td @@ -416,7 +416,9 @@ def GR32 : RegisterClass<"X86", [i32], 32, }]; } - +// GR64 - 64-bit GPRs. This oddly includes RIP, which isn't accurate, since +// RIP isn't really a register and it can't be used anywhere except in an +// address, but it doesn't cause trouble. def GR64 : RegisterClass<"X86", [i64], 64, [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, RBX, R14, R15, R12, R13, RBP, RSP, RIP]> { @@ -561,7 +563,7 @@ def GR64_NOREX : RegisterClass<"X86", [i64], 64, GR64_NOREXClass::allocation_order_end(const MachineFunction &MF) const { const TargetMachine &TM = MF.getTarget(); const TargetRegisterInfo *RI = TM.getRegisterInfo(); - // Does the function dedicate RBP / EBP to being a frame ptr? + // Does the function dedicate RBP to being a frame ptr? if (RI->hasFP(MF)) // If so, don't allocate RSP or RBP. return end() - 2; @@ -572,6 +574,105 @@ def GR64_NOREX : RegisterClass<"X86", [i64], 64, }]; } +// GR32_NOSP - GR32 registers except ESP. +def GR32_NOSP : RegisterClass<"X86", [i32], 32, + [EAX, ECX, EDX, ESI, EDI, EBX, EBP, + R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D]> { + let SubRegClassList = [GR8, GR8, GR16]; + let MethodProtos = [{ + iterator allocation_order_begin(const MachineFunction &MF) const; + iterator allocation_order_end(const MachineFunction &MF) const; + }]; + let MethodBodies = [{ + static const unsigned X86_GR32_NOSP_AO_64[] = { + X86::EAX, X86::ECX, X86::EDX, X86::ESI, X86::EDI, + X86::R8D, X86::R9D, X86::R10D, X86::R11D, + X86::EBX, X86::R14D, X86::R15D, X86::R12D, X86::R13D, X86::EBP + }; + + GR32_NOSPClass::iterator + GR32_NOSPClass::allocation_order_begin(const MachineFunction &MF) const { + const TargetMachine &TM = MF.getTarget(); + const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>(); + if (Subtarget.is64Bit()) + return X86_GR32_NOSP_AO_64; + else + return begin(); + } + + GR32_NOSPClass::iterator + GR32_NOSPClass::allocation_order_end(const MachineFunction &MF) const { + const TargetMachine &TM = MF.getTarget(); + const TargetRegisterInfo *RI = TM.getRegisterInfo(); + const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>(); + if (Subtarget.is64Bit()) { + // Does the function dedicate RBP to being a frame ptr? + if (RI->hasFP(MF)) + // If so, don't allocate EBP. + return array_endof(X86_GR32_NOSP_AO_64) - 1; + else + // If not, any reg in this class is ok. + return array_endof(X86_GR32_NOSP_AO_64); + } else { + // Does the function dedicate EBP to being a frame ptr? + if (RI->hasFP(MF)) + // If so, don't allocate EBP. + return begin() + 6; + else + // If not, any reg in this class is ok. + return begin() + 7; + } + } + }]; +} + +// GR64_NOSP - GR64 registers except RSP (and RIP). +def GR64_NOSP : RegisterClass<"X86", [i64], 64, + [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, + RBX, R14, R15, R12, R13, RBP]> { + let SubRegClassList = [GR8, GR8, GR16, GR32_NOSP]; + let MethodProtos = [{ + iterator allocation_order_end(const MachineFunction &MF) const; + }]; + let MethodBodies = [{ + GR64_NOSPClass::iterator + GR64_NOSPClass::allocation_order_end(const MachineFunction &MF) const { + const TargetMachine &TM = MF.getTarget(); + const TargetRegisterInfo *RI = TM.getRegisterInfo(); + const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>(); + if (!Subtarget.is64Bit()) + return begin(); // None of these are allocatable in 32-bit. + if (RI->hasFP(MF)) // Does the function dedicate RBP to being a frame ptr? + return end()-1; // If so, don't allocate RBP + else + return end(); // If not, any reg in this class is ok. + } + }]; +} + +// GR64_NOREX_NOSP - GR64_NOREX registers except RSP. +def GR64_NOREX_NOSP : RegisterClass<"X86", [i64], 64, + [RAX, RCX, RDX, RSI, RDI, RBX, RBP]> { + let SubRegClassList = [GR8_NOREX, GR8_NOREX, GR16_NOREX, GR32_NOREX]; + let MethodProtos = [{ + iterator allocation_order_end(const MachineFunction &MF) const; + }]; + let MethodBodies = [{ + GR64_NOREX_NOSPClass::iterator + GR64_NOREX_NOSPClass::allocation_order_end(const MachineFunction &MF) const { + const TargetMachine &TM = MF.getTarget(); + const TargetRegisterInfo *RI = TM.getRegisterInfo(); + // Does the function dedicate RBP to being a frame ptr? + if (RI->hasFP(MF)) + // If so, don't allocate RBP. + return end() - 1; + else + // If not, any reg in this class is ok. + return end(); + } + }]; +} + // A class to support the 'A' assembler constraint: EAX then EDX. def GRAD : RegisterClass<"X86", [i32], 32, [EAX, EDX]>; diff --git a/test/CodeGen/X86/coalesce-esp.ll b/test/CodeGen/X86/coalesce-esp.ll new file mode 100644 index 0000000000..8a1597870d --- /dev/null +++ b/test/CodeGen/X86/coalesce-esp.ll @@ -0,0 +1,36 @@ +; RUN: llvm-as < %s | llc -strict-index-regclass | grep {movl %esp, %eax} +; PR4572 + +; Don't coalesce with %esp if it would end up putting %esp in +; the index position of an address, because that can't be +; encoded on x86. It would actually be slightly better to +; swap the address operands though, since there's no scale. + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32" +target triple = "i386-pc-mingw32" + %"struct.std::valarray<unsigned int>" = type { i32, i32* } + +define void @_ZSt17__gslice_to_indexjRKSt8valarrayIjES2_RS0_(i32 %__o, %"struct.std::valarray<unsigned int>"* nocapture %__l, %"struct.std::valarray<unsigned int>"* nocapture %__s, %"struct.std::valarray<unsigned int>"* nocapture %__i) nounwind { +entry: + %0 = alloca i32, i32 undef, align 4 ; <i32*> [#uses=1] + br i1 undef, label %return, label %bb4 + +bb4: ; preds = %bb7.backedge, %entry + %indvar = phi i32 [ %indvar.next, %bb7.backedge ], [ 0, %entry ] ; <i32> [#uses=2] + %scevgep24.sum = sub i32 undef, %indvar ; <i32> [#uses=2] + %scevgep25 = getelementptr i32* %0, i32 %scevgep24.sum ; <i32*> [#uses=1] + %scevgep27 = getelementptr i32* undef, i32 %scevgep24.sum ; <i32*> [#uses=1] + %1 = load i32* %scevgep27, align 4 ; <i32> [#uses=0] + br i1 undef, label %bb7.backedge, label %bb5 + +bb5: ; preds = %bb4 + store i32 0, i32* %scevgep25, align 4 + br label %bb7.backedge + +bb7.backedge: ; preds = %bb5, %bb4 + %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1] + br label %bb4 + +return: ; preds = %entry + ret void +} |