diff options
author | Anton Yartsev <anton.yartsev@gmail.com> | 2011-03-27 09:32:40 +0000 |
---|---|---|
committer | Anton Yartsev <anton.yartsev@gmail.com> | 2011-03-27 09:32:40 +0000 |
commit | d06fea8580658470f92fb5d0d3d7ab5b475728dc (patch) | |
tree | 9ee9d5c46327ce3f931ba10af09aabf844a85bc6 | |
parent | bda0d6bda0f1a08a9fdf3ee4cf550b6b10d454ec (diff) |
supported: AltiVec vector initialization with a single literal according to PIM section 2.5.1 - after initialization all elements have the value specified by the literal
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@128375 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | include/clang/Basic/DiagnosticSemaKinds.td | 2 | ||||
-rw-r--r-- | lib/CodeGen/CGExprConstant.cpp | 29 | ||||
-rw-r--r-- | lib/Sema/SemaCXXCast.cpp | 10 | ||||
-rw-r--r-- | lib/Sema/SemaExpr.cpp | 55 | ||||
-rw-r--r-- | test/CodeGen/altivec.c | 31 | ||||
-rw-r--r-- | test/CodeGen/builtins-ppc-altivec.c | 325 | ||||
-rw-r--r-- | test/Sema/altivec-init.c | 12 |
7 files changed, 278 insertions, 186 deletions
diff --git a/include/clang/Basic/DiagnosticSemaKinds.td b/include/clang/Basic/DiagnosticSemaKinds.td index 8ab9ff1e0b..55223ac134 100644 --- a/include/clang/Basic/DiagnosticSemaKinds.td +++ b/include/clang/Basic/DiagnosticSemaKinds.td @@ -2226,6 +2226,8 @@ def err_bitfield_width_exceeds_type_size : Error< "size of bit-field %0 (%1 bits) exceeds size of its type (%2 bits)">; def err_anon_bitfield_width_exceeds_type_size : Error< "size of anonymous bit-field (%0 bits) exceeds size of its type (%1 bits)">; +def err_incorrect_number_of_vector_initializers : Error< + "number of elements must be either one or match the size of the vector">; // Used by C++ which allows bit-fields that are wider than the type. def warn_bitfield_width_exceeds_type_size: Warning< diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp index ce25dc7ac5..3a2fb9bd9d 100644 --- a/lib/CodeGen/CGExprConstant.cpp +++ b/lib/CodeGen/CGExprConstant.cpp @@ -979,12 +979,29 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, llvm::SmallVector<llvm::Constant *, 4> Inits; unsigned NumElts = Result.Val.getVectorLength(); - for (unsigned i = 0; i != NumElts; ++i) { - APValue &Elt = Result.Val.getVectorElt(i); - if (Elt.isInt()) - Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt())); - else - Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat())); + if (Context.getLangOptions().AltiVec && + isa<CastExpr>(E) && + cast<CastExpr>(E)->getCastKind() == CK_VectorSplat) { + // AltiVec vector initialization with a single literal + APValue &Elt = Result.Val.getVectorElt(0); + + llvm::Constant* InitValue = Elt.isInt() + ? cast<llvm::Constant> + (llvm::ConstantInt::get(VMContext, Elt.getInt())) + : cast<llvm::Constant> + (llvm::ConstantFP::get(VMContext, Elt.getFloat())); + + for (unsigned i = 0; i != NumElts; ++i) + Inits.push_back(InitValue); + + } else { + for (unsigned i = 0; i != NumElts; ++i) { + APValue &Elt = Result.Val.getVectorElt(i); + if (Elt.isInt()) + Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt())); + else + Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat())); + } } return llvm::ConstantVector::get(Inits); } diff --git a/lib/Sema/SemaCXXCast.cpp b/lib/Sema/SemaCXXCast.cpp index 557d27a943..d10042ad03 100644 --- a/lib/Sema/SemaCXXCast.cpp +++ b/lib/Sema/SemaCXXCast.cpp @@ -1517,6 +1517,16 @@ Sema::CXXCheckCStyleCast(SourceRange R, QualType CastTy, ExprValueKind &VK, return ret; } + // Case of AltiVec vector initialization with a single literal + if (CastTy->isVectorType() + && CastTy->getAs<VectorType>()->getVectorKind() == + VectorType::AltiVecVector + && (CastExpr->getType()->isIntegerType() + || CastExpr->getType()->isFloatingType())) { + Kind = CK_VectorSplat; + return false; + } + // Make sure we determine the value kind before we bail out for // dependent types. VK = Expr::getValueKindForType(CastTy); diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp index dd81b38932..b5c67b8fe6 100644 --- a/lib/Sema/SemaExpr.cpp +++ b/lib/Sema/SemaExpr.cpp @@ -5105,8 +5105,16 @@ bool Sema::CheckCastTypes(SourceRange TyR, QualType castType, if (castType->isExtVectorType()) return CheckExtVectorCast(TyR, castType, castExpr, Kind); - if (castType->isVectorType()) - return CheckVectorCast(TyR, castType, castExpr->getType(), Kind); + if (castType->isVectorType()) { + if (castType->getAs<VectorType>()->getVectorKind() == + VectorType::AltiVecVector && + (castExpr->getType()->isIntegerType() || + castExpr->getType()->isFloatingType())) { + Kind = CK_VectorSplat; + return false; + } else + return CheckVectorCast(TyR, castType, castExpr->getType(), Kind); + } if (castExpr->getType()->isVectorType()) return CheckVectorCast(TyR, castExpr->getType(), castType, Kind); @@ -5254,9 +5262,9 @@ Sema::ActOnCastOfParenListExpr(Scope *S, SourceLocation LParenLoc, TypeSourceInfo *TInfo) { ParenListExpr *PE = cast<ParenListExpr>(Op); QualType Ty = TInfo->getType(); - bool isAltiVecLiteral = false; + bool isVectorLiteral = false; - // Check for an altivec literal, + // Check for an altivec or OpenCL literal, // i.e. all the elements are integer constants. if (getLangOptions().AltiVec && Ty->isVectorType()) { if (PE->getNumExprs() == 0) { @@ -5265,18 +5273,45 @@ Sema::ActOnCastOfParenListExpr(Scope *S, SourceLocation LParenLoc, } if (PE->getNumExprs() == 1) { if (!PE->getExpr(0)->getType()->isVectorType()) - isAltiVecLiteral = true; + isVectorLiteral = true; } else - isAltiVecLiteral = true; + isVectorLiteral = true; } - // If this is an altivec initializer, '(' type ')' '(' init, ..., init ')' + // If this is a vector initializer, '(' type ')' '(' init, ..., init ')' // then handle it as such. - if (isAltiVecLiteral) { + if (isVectorLiteral) { llvm::SmallVector<Expr *, 8> initExprs; - for (unsigned i = 0, e = PE->getNumExprs(); i != e; ++i) - initExprs.push_back(PE->getExpr(i)); + // '(...)' form of vector initialization in AltiVec: the number of + // initializers must be one or must match the size of the vector. + // If a single value is specified in the initializer then it will be + // replicated to all the components of the vector + if (Ty->getAs<VectorType>()->getVectorKind() == + VectorType::AltiVecVector) { + unsigned numElems = Ty->getAs<VectorType>()->getNumElements(); + // The number of initializers must be one or must match the size of the + // vector. If a single value is specified in the initializer then it will + // be replicated to all the components of the vector + if (PE->getNumExprs() == 1) { + QualType ElemTy = Ty->getAs<VectorType>()->getElementType(); + Expr *Literal = PE->getExpr(0); + ImpCastExprToType(Literal, ElemTy, + PrepareScalarCast(*this, Literal, ElemTy)); + return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal); + } + else if (PE->getNumExprs() < numElems) { + Diag(PE->getExprLoc(), + diag::err_incorrect_number_of_vector_initializers); + return ExprError(); + } + else + for (unsigned i = 0, e = PE->getNumExprs(); i != e; ++i) + initExprs.push_back(PE->getExpr(i)); + } + else + for (unsigned i = 0, e = PE->getNumExprs(); i != e; ++i) + initExprs.push_back(PE->getExpr(i)); // FIXME: This means that pretty-printing the final AST will produce curly // braces instead of the original commas. diff --git a/test/CodeGen/altivec.c b/test/CodeGen/altivec.c index 9e38df5093..ec1efd9ba1 100644 --- a/test/CodeGen/altivec.c +++ b/test/CodeGen/altivec.c @@ -1,4 +1,31 @@ // RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s -// CHECK: @test0 = global <4 x i32> <i32 1, i32 1, i32 1, i32 1> -vector int test0 = (vector int)(1); +// Check initialization + +vector int test0 = (vector int)(1); // CHECK: @test0 = global <4 x i32> <i32 1, i32 1, i32 1, i32 1> +vector float test1 = (vector float)(1.0); // CHECK: @test1 = global <4 x float> <float 1.000000e+{{0+}}, float 1.000000e+{{0+}}, float 1.000000e+{{0+}}, float 1.000000e+{{0+}}> + +void test2() +{ + vector int vi; + vector float vf; + vi = (vector int)(1); // CHECK: <i32 1, i32 1, i32 1, i32 1> + vf = (vector float)(1.0); // CHECK: <float 1.000000e+{{0+}}, float 1.000000e+{{0+}}, float 1.000000e+{{0+}}, float 1.000000e+{{0+}}> + vi = (vector int)(1, 2, 3, 4); // CHECK: <i32 1, i32 2, i32 3, i32 4> + vi = (vector int)(1, 2, 3, 4, 5); // CHECK: <i32 1, i32 2, i32 3, i32 4> + + vi = (vector int){1}; // CHECK: <i32 1, i32 0, i32 0, i32 0> + vi = (vector int){1, 2}; // CHECK: <i32 1, i32 2, i32 0, i32 0> + vi = (vector int){1, 2, 3, 4}; // CHECK: <i32 1, i32 2, i32 3, i32 4> + +} + +// Check pre/post increment/decrement +void test3() { + vector int vi; + vi++; // CHECK: add nsw <4 x i32> {{.*}} <i32 1, i32 1, i32 1, i32 1> + vector unsigned int vui; + --vui; // CHECK: add <4 x i32> {{.*}} <i32 -1, i32 -1, i32 -1, i32 -1> + vector float vf; + vf++; // CHECK: fadd <4 x float> {{.*}} <float 1.000000e+{{0+}}, float 1.000000e+{{0+}}, float 1.000000e+{{0+}}, float 1.000000e+{{0+}}> +} diff --git a/test/CodeGen/builtins-ppc-altivec.c b/test/CodeGen/builtins-ppc-altivec.c index e03e69c28c..586f1133a8 100644 --- a/test/CodeGen/builtins-ppc-altivec.c +++ b/test/CodeGen/builtins-ppc-altivec.c @@ -1789,23 +1789,23 @@ void test6() { /* vec_lvlx */ res_vsc = vec_lvlx(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vsc = vec_lvlx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vuc = vec_lvlx(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vuc = vec_lvlx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vbc = vec_lvlx(0, &vbc); // CHECK: @llvm.ppc.altivec.lvx @@ -1814,23 +1814,23 @@ void test6() { // CHECK: @llvm.ppc.altivec.vperm res_vs = vec_lvlx(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vs = vec_lvlx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vus = vec_lvlx(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vus = vec_lvlx(0, &vus); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vbs = vec_lvlx(0, &vbs); // CHECK: @llvm.ppc.altivec.lvx @@ -1844,23 +1844,23 @@ void test6() { // CHECK: @llvm.ppc.altivec.vperm res_vi = vec_lvlx(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vi = vec_lvlx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vui = vec_lvlx(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vui = vec_lvlx(0, &vui); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vbi = vec_lvlx(0, &vbi); // CHECK: @llvm.ppc.altivec.lvx @@ -1869,29 +1869,29 @@ void test6() { // CHECK: @llvm.ppc.altivec.vperm res_vf = vec_lvlx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx - // CHECK: store <4 x float> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x float> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm /* vec_lvlxl */ res_vsc = vec_lvlxl(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vsc = vec_lvlxl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vuc = vec_lvlxl(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vuc = vec_lvlxl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vbc = vec_lvlxl(0, &vbc); // CHECK: @llvm.ppc.altivec.lvxl @@ -1900,23 +1900,23 @@ void test6() { // CHECK: @llvm.ppc.altivec.vperm res_vs = vec_lvlxl(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vs = vec_lvlxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vus = vec_lvlxl(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vus = vec_lvlxl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vbs = vec_lvlxl(0, &vbs); // CHECK: @llvm.ppc.altivec.lvxl @@ -1930,23 +1930,23 @@ void test6() { // CHECK: @llvm.ppc.altivec.vperm res_vi = vec_lvlxl(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vi = vec_lvlxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vui = vec_lvlxl(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vui = vec_lvlxl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vbi = vec_lvlxl(0, &vbi); // CHECK: @llvm.ppc.altivec.lvxl @@ -1955,29 +1955,29 @@ void test6() { // CHECK: @llvm.ppc.altivec.vperm res_vf = vec_lvlxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl - // CHECK: store <4 x float> zeroinitializer // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x float> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm /* vec_lvrx */ - res_vsc = vec_lvrx(0, ¶m_sc); // CHECK: store <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vsc = vec_lvrx(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vsc = vec_lvrx(0, &vsc); // CHECK: store <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vsc = vec_lvrx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vuc = vec_lvrx(0, ¶m_uc); // CHECK: store <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vuc = vec_lvrx(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vuc = vec_lvrx(0, &vuc); // CHECK: store <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vuc = vec_lvrx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vbc = vec_lvrx(0, &vbc); // CHECK: store <16 x i8> zeroinitializer @@ -1985,24 +1985,24 @@ void test6() { // CHECK: @llvm.ppc.altivec.lvsl // CHECK: @llvm.ppc.altivec.vperm - res_vs = vec_lvrx(0, ¶m_s); // CHECK: store <8 x i16> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vs = vec_lvrx(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vs = vec_lvrx(0, &vs); // CHECK: store <8 x i16> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vs = vec_lvrx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vus = vec_lvrx(0, ¶m_us); // CHECK: store <8 x i16> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vus = vec_lvrx(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vus = vec_lvrx(0, &vus); // CHECK: store <8 x i16> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vus = vec_lvrx(0, &vus); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vbs = vec_lvrx(0, &vbs); // CHECK: store <8 x i16> zeroinitializer @@ -2015,24 +2015,24 @@ void test6() { // CHECK: @llvm.ppc.altivec.lvsl // CHECK: @llvm.ppc.altivec.vperm - res_vi = vec_lvrx(0, ¶m_i); // CHECK: store <4 x i32> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vi = vec_lvrx(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vi = vec_lvrx(0, &vi); // CHECK: store <4 x i32> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vi = vec_lvrx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vui = vec_lvrx(0, ¶m_ui); // CHECK: store <4 x i32> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vui = vec_lvrx(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vui = vec_lvrx(0, &vui); // CHECK: store <4 x i32> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vui = vec_lvrx(0, &vui); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vbi = vec_lvrx(0, &vbi); // CHECK: store <4 x i32> zeroinitializer @@ -2040,30 +2040,30 @@ void test6() { // CHECK: @llvm.ppc.altivec.lvsl // CHECK: @llvm.ppc.altivec.vperm - res_vf = vec_lvrx(0, &vf); // CHECK: store <4 x float> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + res_vf = vec_lvrx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x float> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm /* vec_lvrxl */ - res_vsc = vec_lvrxl(0, ¶m_sc); // CHECK: store <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vsc = vec_lvrxl(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vsc = vec_lvrxl(0, &vsc); // CHECK: store <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vsc = vec_lvrxl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vuc = vec_lvrxl(0, ¶m_uc); // CHECK: store <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vuc = vec_lvrxl(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vuc = vec_lvrxl(0, &vuc); // CHECK: store <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vuc = vec_lvrxl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vbc = vec_lvrxl(0, &vbc); // CHECK: store <16 x i8> zeroinitializer @@ -2071,24 +2071,24 @@ void test6() { // CHECK: @llvm.ppc.altivec.lvsl // CHECK: @llvm.ppc.altivec.vperm - res_vs = vec_lvrxl(0, ¶m_s); // CHECK: store <8 x i16> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vs = vec_lvrxl(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vs = vec_lvrxl(0, &vs); // CHECK: store <8 x i16> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vs = vec_lvrxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vus = vec_lvrxl(0, ¶m_us); // CHECK: store <8 x i16> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vus = vec_lvrxl(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vus = vec_lvrxl(0, &vus); // CHECK: store <8 x i16> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vus = vec_lvrxl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <8 x i16> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vbs = vec_lvrxl(0, &vbs); // CHECK: store <8 x i16> zeroinitializer @@ -2101,24 +2101,24 @@ void test6() { // CHECK: @llvm.ppc.altivec.lvsl // CHECK: @llvm.ppc.altivec.vperm - res_vi = vec_lvrxl(0, ¶m_i); // CHECK: store <4 x i32> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vi = vec_lvrxl(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vi = vec_lvrxl(0, &vi); // CHECK: store <4 x i32> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vi = vec_lvrxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vui = vec_lvrxl(0, ¶m_ui); // CHECK: store <4 x i32> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vui = vec_lvrxl(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm - res_vui = vec_lvrxl(0, &vui); // CHECK: store <4 x i32> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vui = vec_lvrxl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x i32> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm res_vbi = vec_lvrxl(0, &vbi); // CHECK: store <4 x i32> zeroinitializer @@ -2126,39 +2126,39 @@ void test6() { // CHECK: @llvm.ppc.altivec.lvsl // CHECK: @llvm.ppc.altivec.vperm - res_vf = vec_lvrxl(0, &vf); // CHECK: store <4 x float> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvxl + res_vf = vec_lvrxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <4 x float> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm /* vec_stvlx */ - vec_stvlx(vsc, 0, ¶m_sc); // CHECK: store <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + vec_stvlx(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm // CHECK: @llvm.ppc.altivec.lvsr // CHECK: @llvm.ppc.altivec.vperm // CHECK: @llvm.ppc.altivec.stvx - vec_stvlx(vsc, 0, &vsc); // CHECK: store <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + vec_stvlx(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm // CHECK: @llvm.ppc.altivec.lvsr // CHECK: @llvm.ppc.altivec.vperm // CHECK: @llvm.ppc.altivec.stvx - vec_stvlx(vuc, 0, ¶m_uc); // CHECK: store <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + vec_stvlx(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm // CHECK: @llvm.ppc.altivec.lvsr // CHECK: @llvm.ppc.altivec.vperm // CHECK: @llvm.ppc.altivec.stvx - vec_stvlx(vuc, 0, &vuc); // CHECK: store <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx + vec_stvlx(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.lvx // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: store <16 x i8> zeroinitializer // CHECK: @llvm.ppc.altivec.vperm // CHECK: @llvm.ppc.altivec.lvsr // CHECK: @llvm.ppc.altivec.vperm @@ -2172,33 +2172,33 @@ void test6() { // CHECK: @llvm.ppc.altivec.vperm // CHECK: @llvm.ppc.altivec.stvx - vec_stvlx(vs, 0, ¶m_s); // CHECK: store <8 x i16> zeroinitializer - // CHECK: @llvm.ppc.altivec.lvx |