diff options
author | Eric Christopher <echristo@apple.com> | 2010-04-15 01:43:08 +0000 |
---|---|---|
committer | Eric Christopher <echristo@apple.com> | 2010-04-15 01:43:08 +0000 |
commit | e57aa9e367af227214140e1a174cc561aabaa0b9 (patch) | |
tree | 60b0408a4193c80a2fd5906dd44a2a66ef6268af /lib/CodeGen/CGBuiltin.cpp | |
parent | b13c170a280673f4cf4d7d11ec818392407254d4 (diff) |
Rewrite handling of 64-bit palignr intrinsics to be vector shuffles.
Stop multiplying constant by 8 accordingly in the header and change
intrinsic definition for what types we expect.
Add to existing palignr test to check that we're emitting the correct things.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@101332 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/CGBuiltin.cpp')
-rw-r--r-- | lib/CodeGen/CGBuiltin.cpp | 34 |
1 files changed, 32 insertions, 2 deletions
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 85ab1dceba..95c41db86e 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -982,8 +982,38 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, return Builder.CreateStore(Ops[1], Ops[0]); } case X86::BI__builtin_ia32_palignr: { - Function *F = CGM.getIntrinsic(Intrinsic::x86_ssse3_palign_r); - return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size()); + unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); + + // If palignr is shifting the pair of input vectors less than 9 bytes, + // emit a shuffle instruction. + if (shiftVal <= 8) { + const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext); + + llvm::SmallVector<llvm::Constant*, 8> Indices; + for (unsigned i = 0; i != 8; ++i) + Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i)); + + Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); + return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); + } + + // If palignr is shifting the pair of input vectors more than 8 but less + // than 16 bytes, emit a logical right shift of the destination. + if (shiftVal < 16) { + // MMX has these as 1 x i64 vectors for some odd optimization reasons. + const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext); + const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 1); + + Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); + Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8); + + // create i32 constant + llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr"); + } + + // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. + return llvm::Constant::getNullValue(ConvertType(E->getType())); } case X86::BI__builtin_ia32_palignr128: { unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |