diff options
author | Misha Brukman <brukman+llvm@gmail.com> | 2004-08-11 23:42:15 +0000 |
---|---|---|
committer | Misha Brukman <brukman+llvm@gmail.com> | 2004-08-11 23:42:15 +0000 |
commit | ca9309f22e626db9019de371cf7b61a7e35d08e2 (patch) | |
tree | e5d9582707f35a5589d32395bff60242a9d7994d /lib/Target/PowerPC/PPC64ISelSimple.cpp | |
parent | 55eee3dc7aa74164e5e223420855e7740141da56 (diff) |
64-bit instruction selector and AIX-specific 64-bit asm printer
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@15669 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/PowerPC/PPC64ISelSimple.cpp')
-rw-r--r-- | lib/Target/PowerPC/PPC64ISelSimple.cpp | 3199 |
1 files changed, 3199 insertions, 0 deletions
diff --git a/lib/Target/PowerPC/PPC64ISelSimple.cpp b/lib/Target/PowerPC/PPC64ISelSimple.cpp new file mode 100644 index 0000000000..9695c7d698 --- /dev/null +++ b/lib/Target/PowerPC/PPC64ISelSimple.cpp @@ -0,0 +1,3199 @@ +//===-- PPC64ISelSimple.cpp - A simple instruction selector for PowerPC ---===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "isel" +#include "PowerPC.h" +#include "PowerPCInstrBuilder.h" +#include "PowerPCInstrInfo.h" +#include "PPC64TargetMachine.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/Instructions.h" +#include "llvm/Pass.h" +#include "llvm/CodeGen/IntrinsicLowering.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/SSARegMap.h" +#include "llvm/Target/MRegisterInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Support/InstVisitor.h" +#include "Support/Debug.h" +#include "Support/Statistic.h" +#include <vector> +using namespace llvm; + +namespace { + Statistic<> GEPFolds("ppc64-codegen", "Number of GEPs folded"); + + /// TypeClass - Used by the PowerPC backend to group LLVM types by their basic + /// PPC Representation. + /// + enum TypeClass { + cByte, cShort, cInt, cFP32, cFP64, cLong + }; +} + +/// getClass - Turn a primitive type into a "class" number which is based on the +/// size of the type, and whether or not it is floating point. +/// +static inline TypeClass getClass(const Type *Ty) { + switch (Ty->getTypeID()) { + case Type::SByteTyID: + case Type::UByteTyID: return cByte; // Byte operands are class #0 + case Type::ShortTyID: + case Type::UShortTyID: return cShort; // Short operands are class #1 + case Type::IntTyID: + case Type::UIntTyID: + case Type::PointerTyID: return cInt; // Ints and pointers are class #2 + + case Type::FloatTyID: return cFP32; // Single float is #3 + case Type::DoubleTyID: return cFP64; // Double Point is #4 + + case Type::LongTyID: + case Type::ULongTyID: return cLong; // Longs are class #5 + default: + assert(0 && "Invalid type to getClass!"); + return cByte; // not reached + } +} + +// getClassB - Just like getClass, but treat boolean values as ints. +static inline TypeClass getClassB(const Type *Ty) { + if (Ty == Type::BoolTy) return cInt; + return getClass(Ty); +} + +namespace { + struct ISel : public FunctionPass, InstVisitor<ISel> { + PPC64TargetMachine &TM; + MachineFunction *F; // The function we are compiling into + MachineBasicBlock *BB; // The current MBB we are compiling + int VarArgsFrameIndex; // FrameIndex for start of varargs area + + std::map<Value*, unsigned> RegMap; // Mapping between Values and SSA Regs + + // External functions used in the Module + Function *fmodfFn, *fmodFn, *__cmpdi2Fn, *__moddi3Fn, *__divdi3Fn, + *__umoddi3Fn, *__udivdi3Fn, *__fixsfdiFn, *__fixdfdiFn, *__fixunssfdiFn, + *__fixunsdfdiFn, *__floatdisfFn, *__floatdidfFn, *mallocFn, *freeFn; + + // MBBMap - Mapping between LLVM BB -> Machine BB + std::map<const BasicBlock*, MachineBasicBlock*> MBBMap; + + // AllocaMap - Mapping from fixed sized alloca instructions to the + // FrameIndex for the alloca. + std::map<AllocaInst*, unsigned> AllocaMap; + + // A Reg to hold the base address used for global loads and stores, and a + // flag to set whether or not we need to emit it for this function. + unsigned GlobalBaseReg; + bool GlobalBaseInitialized; + + ISel(TargetMachine &tm) : TM(reinterpret_cast<PPC64TargetMachine&>(tm)), + F(0), BB(0) {} + + bool doInitialization(Module &M) { + // Add external functions that we may call + Type *i = Type::IntTy; + Type *d = Type::DoubleTy; + Type *f = Type::FloatTy; + Type *l = Type::LongTy; + Type *ul = Type::ULongTy; + Type *voidPtr = PointerType::get(Type::SByteTy); + // float fmodf(float, float); + fmodfFn = M.getOrInsertFunction("fmodf", f, f, f, 0); + // double fmod(double, double); + fmodFn = M.getOrInsertFunction("fmod", d, d, d, 0); + // int __cmpdi2(long, long); + __cmpdi2Fn = M.getOrInsertFunction("__cmpdi2", i, l, l, 0); + // long __moddi3(long, long); + __moddi3Fn = M.getOrInsertFunction("__moddi3", l, l, l, 0); + // long __divdi3(long, long); + __divdi3Fn = M.getOrInsertFunction("__divdi3", l, l, l, 0); + // unsigned long __umoddi3(unsigned long, unsigned long); + __umoddi3Fn = M.getOrInsertFunction("__umoddi3", ul, ul, ul, 0); + // unsigned long __udivdi3(unsigned long, unsigned long); + __udivdi3Fn = M.getOrInsertFunction("__udivdi3", ul, ul, ul, 0); + // long __fixsfdi(float) + __fixsfdiFn = M.getOrInsertFunction("__fixsfdi", l, f, 0); + // long __fixdfdi(double) + __fixdfdiFn = M.getOrInsertFunction("__fixdfdi", l, d, 0); + // unsigned long __fixunssfdi(float) + __fixunssfdiFn = M.getOrInsertFunction("__fixunssfdi", ul, f, 0); + // unsigned long __fixunsdfdi(double) + __fixunsdfdiFn = M.getOrInsertFunction("__fixunsdfdi", ul, d, 0); + // float __floatdisf(long) + __floatdisfFn = M.getOrInsertFunction("__floatdisf", f, l, 0); + // double __floatdidf(long) + __floatdidfFn = M.getOrInsertFunction("__floatdidf", d, l, 0); + // void* malloc(size_t) + mallocFn = M.getOrInsertFunction("malloc", voidPtr, Type::UIntTy, 0); + // void free(void*) + freeFn = M.getOrInsertFunction("free", Type::VoidTy, voidPtr, 0); + return false; + } + + /// runOnFunction - Top level implementation of instruction selection for + /// the entire function. + /// + bool runOnFunction(Function &Fn) { + // First pass over the function, lower any unknown intrinsic functions + // with the IntrinsicLowering class. + LowerUnknownIntrinsicFunctionCalls(Fn); + + F = &MachineFunction::construct(&Fn, TM); + + // Create all of the machine basic blocks for the function... + for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) + F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I)); + + BB = &F->front(); + + // Make sure we re-emit a set of the global base reg if necessary + GlobalBaseInitialized = false; + + // Copy incoming arguments off of the stack... + LoadArgumentsToVirtualRegs(Fn); + + // Instruction select everything except PHI nodes + visit(Fn); + + // Select the PHI nodes + SelectPHINodes(); + + RegMap.clear(); + MBBMap.clear(); + AllocaMap.clear(); + F = 0; + // We always build a machine code representation for the function + return true; + } + + virtual const char *getPassName() const { + return "PowerPC Simple Instruction Selection"; + } + + /// visitBasicBlock - This method is called when we are visiting a new basic + /// block. This simply creates a new MachineBasicBlock to emit code into + /// and adds it to the current MachineFunction. Subsequent visit* for + /// instructions will be invoked for all instructions in the basic block. + /// + void visitBasicBlock(BasicBlock &LLVM_BB) { + BB = MBBMap[&LLVM_BB]; + } + + /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the + /// function, lowering any calls to unknown intrinsic functions into the + /// equivalent LLVM code. + /// + void LowerUnknownIntrinsicFunctionCalls(Function &F); + + /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function + /// from the stack into virtual registers. + /// + void LoadArgumentsToVirtualRegs(Function &F); + + /// SelectPHINodes - Insert machine code to generate phis. This is tricky + /// because we have to generate our sources into the source basic blocks, + /// not the current one. + /// + void SelectPHINodes(); + + // Visitation methods for various instructions. These methods simply emit + // fixed PowerPC code for each instruction. + + // Control flow operators + void visitReturnInst(ReturnInst &RI); + void visitBranchInst(BranchInst &BI); + + struct ValueRecord { + Value *Val; + unsigned Reg; + const Type *Ty; + ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {} + ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {} + }; + + // This struct is for recording the necessary operations to emit the GEP + struct CollapsedGepOp { + bool isMul; + Value *index; + ConstantSInt *size; + CollapsedGepOp(bool mul, Value *i, ConstantSInt *s) : + isMul(mul), index(i), size(s) {} + }; + + void doCall(const ValueRecord &Ret, MachineInstr *CallMI, + const std::vector<ValueRecord> &Args, bool isVarArg); + void visitCallInst(CallInst &I); + void visitIntrinsicCall(Intrinsic::ID ID, CallInst &I); + + // Arithmetic operators + void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass); + void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); } + void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); } + void visitMul(BinaryOperator &B); + + void visitDiv(BinaryOperator &B) { visitDivRem(B); } + void visitRem(BinaryOperator &B) { visitDivRem(B); } + void visitDivRem(BinaryOperator &B); + + // Bitwise operators + void visitAnd(BinaryOperator &B) { visitSimpleBinary(B, 2); } + void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); } + void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); } + + // Comparison operators... + void visitSetCondInst(SetCondInst &I); + unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1, + MachineBasicBlock *MBB, + MachineBasicBlock::iterator MBBI); + void visitSelectInst(SelectInst &SI); + + + // Memory Instructions + void visitLoadInst(LoadInst &I); + void visitStoreInst(StoreInst &I); + void visitGetElementPtrInst(GetElementPtrInst &I); + void visitAllocaInst(AllocaInst &I); + void visitMallocInst(MallocInst &I); + void visitFreeInst(FreeInst &I); + + // Other operators + void visitShiftInst(ShiftInst &I); + void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass + void visitCastInst(CastInst &I); + void visitVANextInst(VANextInst &I); + void visitVAArgInst(VAArgInst &I); + + void visitInstruction(Instruction &I) { + std::cerr << "Cannot instruction select: " << I; + abort(); + } + + /// promote32 - Make a value 32-bits wide, and put it somewhere. + /// + void promote32(unsigned targetReg, const ValueRecord &VR); + + /// emitGEPOperation - Common code shared between visitGetElementPtrInst and + /// constant expression GEP support. + /// + void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator IP, + Value *Src, User::op_iterator IdxBegin, + User::op_iterator IdxEnd, unsigned TargetReg, + bool CollapseRemainder, ConstantSInt **Remainder, + unsigned *PendingAddReg); + + /// emitCastOperation - Common code shared between visitCastInst and + /// constant expression cast support. + /// + void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator IP, + Value *Src, const Type *DestTy, unsigned TargetReg); + + /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary + /// and constant expression support. + /// + void emitSimpleBinaryOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, + unsigned OperatorClass, unsigned TargetReg); + + /// emitBinaryFPOperation - This method handles emission of floating point + /// Add (0), Sub (1), Mul (2), and Div (3) operations. + void emitBinaryFPOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, + unsigned OperatorClass, unsigned TargetReg); + + void emitMultiply(MachineBasicBlock *BB, MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, unsigned TargetReg); + + void doMultiply(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + unsigned DestReg, Value *Op0, Value *Op1); + + /// doMultiplyConst - This method will multiply the value in Op0Reg by the + /// value of the ContantInt *CI + void doMultiplyConst(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + unsigned DestReg, Value *Op0, ConstantInt *CI); + + void emitDivRemOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, bool isDiv, + unsigned TargetReg); + + /// emitSetCCOperation - Common code shared between visitSetCondInst and + /// constant expression support. + /// + void emitSetCCOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, unsigned Opcode, + unsigned TargetReg); + + /// emitShiftOperation - Common code shared between visitShiftInst and + /// constant expression support. + /// + void emitShiftOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Op, Value *ShiftAmount, bool isLeftShift, + const Type *ResultTy, unsigned DestReg); + + /// emitSelectOperation - Common code shared between visitSelectInst and the + /// constant expression support. + /// + void emitSelectOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Cond, Value *TrueVal, Value *FalseVal, + unsigned DestReg); + + /// copyGlobalBaseToRegister - Output the instructions required to put the + /// base address to use for accessing globals into a register. + /// + void ISel::copyGlobalBaseToRegister(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + unsigned R); + + /// copyConstantToRegister - Output the instructions required to put the + /// specified constant into the specified register. + /// + void copyConstantToRegister(MachineBasicBlock *MBB, + MachineBasicBlock::iterator MBBI, + Constant *C, unsigned Reg); + + void emitUCOM(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI, + unsigned LHS, unsigned RHS); + + /// makeAnotherReg - This method returns the next register number we haven't + /// yet used. + /// + unsigned makeAnotherReg(const Type *Ty) { + assert(dynamic_cast<const PowerPCRegisterInfo*>(TM.getRegisterInfo()) && + "Current target doesn't have PPC reg info??"); + const PowerPCRegisterInfo *PPCRI = + static_cast<const PowerPCRegisterInfo*>(TM.getRegisterInfo()); + // Add the mapping of regnumber => reg class to MachineFunction + const TargetRegisterClass *RC = PPCRI->getRegClassForType(Ty); + return F->getSSARegMap()->createVirtualRegister(RC); + } + + /// getReg - This method turns an LLVM value into a register number. + /// + unsigned getReg(Value &V) { return getReg(&V); } // Allow references + unsigned getReg(Value *V) { + // Just append to the end of the current bb. + MachineBasicBlock::iterator It = BB->end(); + return getReg(V, BB, It); + } + unsigned getReg(Value *V, MachineBasicBlock *MBB, + MachineBasicBlock::iterator IPt); + + /// canUseAsImmediateForOpcode - This method returns whether a ConstantInt + /// is okay to use as an immediate argument to a certain binary operation + bool canUseAsImmediateForOpcode(ConstantInt *CI, unsigned Opcode); + + /// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca + /// that is to be statically allocated with the initial stack frame + /// adjustment. + unsigned getFixedSizedAllocaFI(AllocaInst *AI); + }; +} + +/// dyn_castFixedAlloca - If the specified value is a fixed size alloca +/// instruction in the entry block, return it. Otherwise, return a null +/// pointer. +static AllocaInst *dyn_castFixedAlloca(Value *V) { + if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { + BasicBlock *BB = AI->getParent(); + if (isa<ConstantUInt>(AI->getArraySize()) && BB ==&BB->getParent()->front()) + return AI; + } + return 0; +} + +/// getReg - This method turns an LLVM value into a register number. +/// +unsigned ISel::getReg(Value *V, MachineBasicBlock *MBB, + MachineBasicBlock::iterator IPt) { + if (Constant *C = dyn_cast<Constant>(V)) { + unsigned Reg = makeAnotherReg(V->getType()); + copyConstantToRegister(MBB, IPt, C, Reg); + return Reg; + } else if (AllocaInst *AI = dyn_castFixedAlloca(V)) { + unsigned Reg = makeAnotherReg(V->getType()); + unsigned FI = getFixedSizedAllocaFI(AI); + addFrameReference(BuildMI(*MBB, IPt, PPC::ADDI, 2, Reg), FI, 0, false); + return Reg; + } + + unsigned &Reg = RegMap[V]; + if (Reg == 0) { + Reg = makeAnotherReg(V->getType()); + RegMap[V] = Reg; + } + + return Reg; +} + +/// canUseAsImmediateForOpcode - This method returns whether a ConstantInt +/// is okay to use as an immediate argument to a certain binary operator. +/// +/// Operator is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for Xor. +bool ISel::canUseAsImmediateForOpcode(ConstantInt *CI, unsigned Operator) { + ConstantSInt *Op1Cs; + ConstantUInt *Op1Cu; + + // ADDI, Compare, and non-indexed Load take SIMM + bool cond1 = (Operator == 0) + && (Op1Cs = dyn_cast<ConstantSInt>(CI)) + && (Op1Cs->getValue() <= 32767) + && (Op1Cs->getValue() >= -32768); + + // SUBI takes -SIMM since it is a mnemonic for ADDI + bool cond2 = (Operator == 1) + && (Op1Cs = dyn_cast<ConstantSInt>(CI)) + && (Op1Cs->getValue() <= 32768) + && (Op1Cs->getValue() >= -32767); + + // ANDIo, ORI, and XORI take unsigned values + bool cond3 = (Operator >= 2) + && (Op1Cs = dyn_cast<ConstantSInt>(CI)) + && (Op1Cs->getValue() >= 0) + && (Op1Cs->getValue() <= 32767); + + // ADDI and SUBI take SIMMs, so we have to make sure the UInt would fit + bool cond4 = (Operator < 2) + && (Op1Cu = dyn_cast<ConstantUInt>(CI)) + && (Op1Cu->getValue() <= 32767); + + // ANDIo, ORI, and XORI take UIMMs, so they can be larger + bool cond5 = (Operator >= 2) + && (Op1Cu = dyn_cast<ConstantUInt>(CI)) + && (Op1Cu->getValue() <= 65535); + + if (cond1 || cond2 || cond3 || cond4 || cond5) + return true; + + return false; +} + +/// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca +/// that is to be statically allocated with the initial stack frame +/// adjustment. +unsigned ISel::getFixedSizedAllocaFI(AllocaInst *AI) { + // Already computed this? + std::map<AllocaInst*, unsigned>::iterator I = AllocaMap.lower_bound(AI); + if (I != AllocaMap.end() && I->first == AI) return I->second; + + const Type *Ty = AI->getAllocatedType(); + ConstantUInt *CUI = cast<ConstantUInt>(AI->getArraySize()); + unsigned TySize = TM.getTargetData().getTypeSize(Ty); + TySize *= CUI->getValue(); // Get total allocated size... + unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty); + + // Create a new stack object using the frame manager... + int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment); + AllocaMap.insert(I, std::make_pair(AI, FrameIdx)); + return FrameIdx; +} + + +/// copyGlobalBaseToRegister - Output the instructions required to put the +/// base address to use for accessing globals into a register. +/// +void ISel::copyGlobalBaseToRegister(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + unsigned R) { + if (!GlobalBaseInitialized) { + // Insert the set of GlobalBaseReg into the first MBB of the function + MachineBasicBlock &FirstMBB = F->front(); + MachineBasicBlock::iterator MBBI = FirstMBB.begin(); + GlobalBaseReg = makeAnotherReg(Type::IntTy); + BuildMI(FirstMBB, MBBI, PPC::IMPLICIT_DEF, 0, PPC::LR); + BuildMI(FirstMBB, MBBI, PPC::MovePCtoLR, 0, GlobalBaseReg); + GlobalBaseInitialized = true; + } + // Emit our copy of GlobalBaseReg to the destination register in the + // current MBB + BuildMI(*MBB, IP, PPC::OR, 2, R).addReg(GlobalBaseReg) + .addReg(GlobalBaseReg); +} + +/// copyConstantToRegister - Output the instructions required to put the +/// specified constant into the specified register. +/// +void ISel::copyConstantToRegister(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Constant *C, unsigned R) { + if (C->getType()->isIntegral()) { + unsigned Class = getClassB(C->getType()); + + if (Class == cLong) { + if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(C)) { + uint64_t uval = CUI->getValue(); + if (uval < (1LL << 32)) { + ConstantUInt *CU = ConstantUInt::get(Type::UIntTy, uval); + copyConstantToRegister(MBB, IP, CU, R); + return; + } + } else if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(C)) { + int64_t val = CUI->getValue(); + if (val < (1LL << 31)) { + ConstantUInt *CU = ConstantUInt::get(Type::UIntTy, val); + copyConstantToRegister(MBB, IP, CU, R); + return; + } + } else { + std::cerr << "Unhandled long constant type!\n"; + abort(); + } + // Spill long to the constant pool and load it + MachineConstantPool *CP = F->getConstantPool(); + unsigned CPI = CP->getConstantPoolIndex(C); + BuildMI(*MBB, IP, PPC::LD, 1, R) + .addReg(PPC::R2).addConstantPoolIndex(CPI); + } + + assert(Class <= cInt && "Type not handled yet!"); + + // Handle bool + if (C->getType() == Type::BoolTy) { + BuildMI(*MBB, IP, PPC::LI, 1, R).addSImm(C == ConstantBool::True); + return; + } + + // Handle int + if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(C)) { + unsigned uval = CUI->getValue(); + if (uval < 32768) { + BuildMI(*MBB, IP, PPC::LI, 1, R).addSImm(uval); + } else { + unsigned Temp = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC::LIS, 1, Temp).addSImm(uval >> 16); + BuildMI(*MBB, IP, PPC::ORI, 2, R).addReg(Temp).addImm(uval); + } + return; + } else if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(C)) { + int sval = CSI->getValue(); + if (sval < 32768 && sval >= -32768) { + BuildMI(*MBB, IP, PPC::LI, 1, R).addSImm(sval); + } else { + unsigned Temp = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC::LIS, 1, Temp).addSImm(sval >> 16); + BuildMI(*MBB, IP, PPC::ORI, 2, R).addReg(Temp).addImm(sval); + } + return; + } + std::cerr << "Unhandled integer constant!\n"; + abort(); + } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { + // We need to spill the constant to memory... + MachineConstantPool *CP = F->getConstantPool(); + unsigned CPI = CP->getConstantPoolIndex(CFP); + const Type *Ty = CFP->getType(); + unsigned LoadOpcode = (Ty == Type::FloatTy) ? PPC::LFS : PPC::LFD; + BuildMI(*MBB,IP,LoadOpcode,2,R).addConstantPoolIndex(CPI).addReg(PPC::R2); + } else if (isa<ConstantPointerNull>(C)) { + // Copy zero (null pointer) to the register. + BuildMI(*MBB, IP, PPC::LI, 1, R).addSImm(0); + } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) { + unsigned TmpReg = makeAnotherReg(GV->getType()); + BuildMI(*MBB, IP, PPC::LD, 2, TmpReg).addGlobalAddress(GV).addReg(PPC::R2); + BuildMI(*MBB, IP, PPC::LWA, 2, R).addSImm(0).addReg(TmpReg); + } else { + std::cerr << "Offending constant: " << *C << "\n"; + assert(0 && "Type not handled yet!"); + } +} + +/// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from +/// the stack into virtual registers. +void ISel::LoadArgumentsToVirtualRegs(Function &Fn) { + unsigned ArgOffset = 24; + unsigned GPR_remaining = 8; + unsigned FPR_remaining = 13; + unsigned GPR_idx = 0, FPR_idx = 0; + static const unsigned GPR[] = { + PPC::R3, PPC::R4, PPC::R5, PPC::R6, + PPC::R7, PPC::R8, PPC::R9, PPC::R10, + }; + static const unsigned FPR[] = { + PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, + PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 + }; + + MachineFrameInfo *MFI = F->getFrameInfo(); + + for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) { + bool ArgLive = !I->use_empty(); + unsigned Reg = ArgLive ? getReg(*I) : 0; + int FI; // Frame object index + + switch (getClassB(I->getType())) { + case cByte: + if (ArgLive) { + FI = MFI->CreateFixedObject(4, ArgOffset); + if (GPR_remaining > 0) { + BuildMI(BB, PPC::IMPLICIT_DEF, 0, GPR[GPR_idx]); + BuildMI(BB, PPC::OR, 2, Reg).addReg(GPR[GPR_idx]) + .addReg(GPR[GPR_idx]); + } else { + addFrameReference(BuildMI(BB, PPC::LBZ, 2, Reg), FI); + } + } + break; + case cShort: + if (ArgLive) { + FI = MFI->CreateFixedObject(4, ArgOffset); + if (GPR_remaining > 0) { + BuildMI(BB, PPC::IMPLICIT_DEF, 0, GPR[GPR_idx]); + BuildMI(BB, PPC::OR, 2, Reg).addReg(GPR[GPR_idx]) + .addReg(GPR[GPR_idx]); + } else { + addFrameReference(BuildMI(BB, PPC::LHZ, 2, Reg), FI); + } + } + break; + case cInt: + if (ArgLive) { + FI = MFI->CreateFixedObject(4, ArgOffset); + if (GPR_remaining > 0) { + BuildMI(BB, PPC::IMPLICIT_DEF, 0, GPR[GPR_idx]); + BuildMI(BB, PPC::OR, 2, Reg).addReg(GPR[GPR_idx]) + .addReg(GPR[GPR_idx]); + } else { + addFrameReference(BuildMI(BB, PPC::LWZ, 2, Reg), FI); + } + } + break; + case cLong: + if (ArgLive) { + FI = MFI->CreateFixedObject(8, ArgOffset); + if (GPR_remaining > 1) { + BuildMI(BB, PPC::IMPLICIT_DEF, 0, GPR[GPR_idx]); + BuildMI(BB, PPC::OR, 2, Reg).addReg(GPR[GPR_idx]) + .addReg(GPR[GPR_idx]); + } else { + addFrameReference(BuildMI(BB, PPC::LD, 2, Reg), FI); + } + } + // longs require 4 additional bytes + ArgOffset += 4; + break; + case cFP32: + if (ArgLive) { + FI = MFI->CreateFixedObject(4, ArgOffset); + + if (FPR_remaining > 0) { + BuildMI(BB, PPC::IMPLICIT_DEF, 0, FPR[FPR_idx]); + BuildMI(BB, PPC::FMR, 1, Reg).addReg(FPR[FPR_idx]); + FPR_remaining--; + FPR_idx++; + } else { + addFrameReference(BuildMI(BB, PPC::LFS, 2, Reg), FI); + } + } + break; + case cFP64: + if (ArgLive) { + FI = MFI->CreateFixedObject(8, ArgOffset); + + if (FPR_remaining > 0) { + BuildMI(BB, PPC::IMPLICIT_DEF, 0, FPR[FPR_idx]); + BuildMI(BB, PPC::FMR, 1, Reg).addReg(FPR[FPR_idx]); + FPR_remaining--; + FPR_idx++; + } else { + addFrameReference(BuildMI(BB, PPC::LFD, 2, Reg), FI); + } + } + + // doubles require 4 additional bytes and use 2 GPRs of param space + ArgOffset += 4; + if (GPR_remaining > 0) { + GPR_remaining--; + GPR_idx++; + } + break; + default: + assert(0 && "Unhandled argument type!"); + } + ArgOffset += 4; // Each argument takes at least 4 bytes on the stack... + if (GPR_remaining > 0) { + GPR_remaining--; // uses up 2 GPRs + GPR_idx++; + } + } + + // If the function takes variable number of arguments, add a frame offset for + // the start of the first vararg value... this is used to expand + // llvm.va_start. + if (Fn.getFunctionType()->isVarArg()) + VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset); +} + + +/// SelectPHINodes - Insert machine code to generate phis. This is tricky +/// because we have to generate our sources into the source basic blocks, not +/// the current one. +/// +void ISel::SelectPHINodes() { + const TargetInstrInfo &TII = *TM.getInstrInfo(); + const Function &LF = *F->getFunction(); // The LLVM function... + for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) { + const BasicBlock *BB = I; + MachineBasicBlock &MBB = *MBBMap[I]; + + // Loop over all of the PHI nodes in the LLVM basic block... + MachineBasicBlock::iterator PHIInsertPoint = MBB.begin(); + for (BasicBlock::const_iterator I = BB->begin(); + PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I)); ++I) { + + // Create a new machine instr PHI node, and insert it. + unsigned PHIReg = getReg(*PN); + MachineInstr *PhiMI = BuildMI(MBB, PHIInsertPoint, + PPC::PHI, PN->getNumOperands(), PHIReg); + + // PHIValues - Map of blocks to incoming virtual registers. We use this + // so that we only initialize one incoming value for a particular block, + // even if the block has multiple entries in the PHI node. + // + std::map<MachineBasicBlock*, unsigned> PHIValues; + + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { + MachineBasicBlock *PredMBB = 0; + for (MachineBasicBlock::pred_iterator PI = MBB.pred_begin (), + PE = MBB.pred_end (); PI != PE; ++PI) + if (PN->getIncomingBlock(i) == (*PI)->getBasicBlock()) { + PredMBB = *PI; + break; + } + assert (PredMBB && "Couldn't find incoming machine-cfg edge for phi"); + + unsigned ValReg; + std::map<MachineBasicBlock*, unsigned>::iterator EntryIt = + PHIValues.lower_bound(PredMBB); + + if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) { + // We already inserted an initialization of the register for this + // predecessor. Recycle it. + ValReg = EntryIt->second; + } else { + // Get the incoming value into a virtual register. + // + Value *Val = PN->getIncomingValue(i); + + // If this is a constant or GlobalValue, we may have to insert code + // into the basic block to compute it into a virtual register. + if ((isa<Constant>(Val) && !isa<ConstantExpr>(Val)) || + isa<GlobalValue>(Val)) { + // Simple constants get emitted at the end of the basic block, + // before any terminator instructions. We "know" that the code to + // move a constant into a register will never clobber any flags. + ValReg = getReg(Val, PredMBB, PredMBB->getFirstTerminator()); + } else { + // Because we don't want to clobber any values which might be in + // physical registers with the computation of this constant (which + // might be arbitrarily complex if it is a constant expression), + // just insert the computation at the top of the basic block. + MachineBasicBlock::iterator PI = PredMBB->begin(); + + // Skip over any PHI nodes though! + while (PI != PredMBB->end() && PI->getOpcode() == PPC::PHI) + ++PI; + + ValReg = getReg(Val, PredMBB, PI); + } + + // Remember that we inserted a value for this PHI for this predecessor + PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg)); + } + + PhiMI->addRegOperand(ValReg); + PhiMI->addMachineBasicBlockOperand(PredMBB); + } + + // Now that we emitted all of the incoming values for the PHI node, make + // sure to reposition the InsertPoint after the PHI that we just added. + // This is needed because we might have inserted a constant into this + // block, right after the PHI's which is before the old insert point! + PHIInsertPoint = PhiMI; + ++PHIInsertPoint; + } + } +} + + +// canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold +// it into the conditional branch or select instruction which is the only user +// of the cc instruction. This is the case if the conditional branch is the +// only user of the setcc, and if the setcc is in the same basic block as the +// conditional branch. +// +static SetCondInst *canFoldSetCCIntoBranchOrSelect(Value *V) { + if (SetCondInst *SCI = dyn_cast<SetCondInst>(V)) + if (SCI->hasOneUse()) { + Instruction *User = cast<Instruction>(SCI->use_back()); + if ((isa<BranchInst>(User) || isa<SelectInst>(User)) && + SCI->getParent() == User->getParent()) + return SCI; + } + return 0; +} + + +// canFoldGEPIntoLoadOrStore - Return the GEP instruction if we can fold it into +// the load or store instruction that is the only user of the GEP. +// +static GetElementPtrInst *canFoldGEPIntoLoadOrStore(Value *V) { + if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) + if (GEPI->hasOneUse()) { + Instruction *User = cast<Instruction>(GEPI->use_back()); + if (isa<StoreInst>(User) && + GEPI->getParent() == User->getParent() && + User->getOperand(0) != GEPI && + User->getOperand(1) == GEPI) { + ++GEPFolds; + return GEPI; + } + if (isa<LoadInst>(User) && + GEPI->getParent() == User->getParent() && + User->getOperand(0) == GEPI) { + ++GEPFolds; + return GEPI; + } + } + return 0; +} + + +// Return a fixed numbering for setcc instructions which does not depend on the +// order of the opcodes. +// +static unsigned getSetCCNumber(unsigned Opcode) { + switch (Opcode) { + default: assert(0 && "Unknown setcc instruction!"); + case Instruction::SetEQ: return 0; + case Instruction::SetNE: return 1; + case Instruction::SetLT: return 2; + case Instruction::SetGE: return 3; + case Instruction::SetGT: return 4; + case Instruction::SetLE: return 5; + } +} + +static unsigned getPPCOpcodeForSetCCNumber(unsigned Opcode) { + switch (Opcode) { + default: assert(0 && "Unknown setcc instruction!"); + case Instruction::SetEQ: return PPC::BEQ; + case Instruction::SetNE: return PPC::BNE; + case Instruction::SetLT: return PPC::BLT; + case Instruction::SetGE: return PPC::BGE; + case Instruction::SetGT: return PPC::BGT; + case Instruction::SetLE: return PPC::BLE; + } +} |