diff options
author | Nate Begeman <natebegeman@mac.com> | 2010-06-10 00:17:56 +0000 |
---|---|---|
committer | Nate Begeman <natebegeman@mac.com> | 2010-06-10 00:17:56 +0000 |
commit | d075c01c359b9cc120c3accc7166990f9f4ac423 (patch) | |
tree | 07d3885554dc6f769d80156bea8511920421668b /lib/CodeGen/CGBuiltin.cpp | |
parent | c3926645d70842eae22641df1bf69da457a0ff11 (diff) |
support _lane ops, and multiplies by scalar.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@105770 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/CGBuiltin.cpp')
-rw-r--r-- | lib/CodeGen/CGBuiltin.cpp | 28 |
1 files changed, 22 insertions, 6 deletions
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 3e8fec5b35..dbf5352028 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -894,13 +894,24 @@ const llvm::Type *GetNeonType(LLVMContext &Ctx, unsigned type, bool q) { return 0; } +Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { + unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements(); + SmallVector<Constant*, 16> Indices(nElts, C); + Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); + return Builder.CreateShuffleVector(V, V, SV, "lane"); +} + Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, - const char *name) { + const char *name, bool splat) { unsigned j = 0; for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); ai != ae; ++ai, ++j) Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); + if (splat) { + Ops[j-1] = EmitNeonSplat(Ops[j-1], cast<Constant>(Ops[j])); + Ops.resize(j); + } return Builder.CreateCall(F, Ops.begin(), Ops.end(), name); } @@ -917,9 +928,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, a, b); } - llvm::SmallVector<Value*, 4> Ops; // Determine the type of this overloaded NEON intrinsic. assert(BuiltinID > ARM::BI__builtin_thread_pointer); + + llvm::SmallVector<Value*, 4> Ops; for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) Ops.push_back(EmitScalarExpr(E->getArg(i))); @@ -931,11 +943,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, unsigned type = Result.getZExtValue(); bool usgn = type & 0x08; bool quad = type & 0x10; + bool splat = false; const llvm::Type *Ty = GetNeonType(VMContext, type & 0x7, quad); if (!Ty) return 0; + // FIXME: multiplies by scalar do not currently match their patterns because + // they are implemented via mul(splat(scalar_to_vector)) rather than + // mul(dup(scalar)) + unsigned Int; switch (BuiltinID) { default: return 0; @@ -1087,12 +1104,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, case ARM::BI__builtin_neon_vminq_v: Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins; return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmin"); - // FIXME: vmlal_lane -> splat, drop imm + case ARM::BI__builtin_neon_vmlal_lane_v: + splat = true; case ARM::BI__builtin_neon_vmlal_v: Int = usgn ? Intrinsic::arm_neon_vmlalu : Intrinsic::arm_neon_vmlals; - return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlal"); - // FIXME: vmlal_n, vmla_n, vmlsl_n, vmls_n, vmull_n, vmul_n, - // vqdmlal_n, vqdmlsl_n, vqdmulh_n, vqdmull_n, vqrdmulh_n -> splat,-_n + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlal", splat); case ARM::BI__builtin_neon_vmovl_v: Int = usgn ? Intrinsic::arm_neon_vmovlu : Intrinsic::arm_neon_vmovls; return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmovl"); |