Index: ../include/llvm/Analysis/TargetTransformInfo.h =================================================================== --- ../include/llvm/Analysis/TargetTransformInfo.h +++ ../include/llvm/Analysis/TargetTransformInfo.h @@ -317,6 +317,13 @@ bool isLegalMaskedStore(Type *DataType, int Consecutive) const; bool isLegalMaskedLoad(Type *DataType, int Consecutive) const; + /// \brief Return true if the target works with masked gather/scatter + /// instruction. AVX2 supports only gathers without masks. + /// AVX-512 architecture has full support for masked gather and + /// scatter operations. + bool isLegalMaskedScatter(Type *DataType) const; + bool isLegalMaskedGather(Type *DataType) const; + /// \brief Return the cost of the scaling factor used in the addressing /// mode represented by AM for this target, for a load/store /// of the specified type. @@ -570,6 +577,8 @@ unsigned AddrSpace) = 0; virtual bool isLegalMaskedStore(Type *DataType, int Consecutive) = 0; virtual bool isLegalMaskedLoad(Type *DataType, int Consecutive) = 0; + virtual bool isLegalMaskedScatter(Type *DataType) = 0; + virtual bool isLegalMaskedGather(Type *DataType) = 0; virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) = 0; @@ -699,6 +708,12 @@ bool isLegalMaskedLoad(Type *DataType, int Consecutive) override { return Impl.isLegalMaskedLoad(DataType, Consecutive); } + bool isLegalMaskedScatter(Type *DataType) override { + return Impl.isLegalMaskedScatter(DataType); + } + bool isLegalMaskedGather(Type *DataType) override { + return Impl.isLegalMaskedGather(DataType); + } int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) override { Index: ../include/llvm/Analysis/TargetTransformInfoImpl.h =================================================================== --- ../include/llvm/Analysis/TargetTransformInfoImpl.h +++ ../include/llvm/Analysis/TargetTransformInfoImpl.h @@ -213,6 +213,10 @@ bool isLegalMaskedLoad(Type *DataType, int Consecutive) { return false; } + bool isLegalMaskedScatter(Type *DataType) { return false; } + + bool isLegalMaskedGather(Type *DataType) { return false; } + int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) { // Guess that all legal addressing mode are free. Index: ../lib/Analysis/TargetTransformInfo.cpp =================================================================== --- ../lib/Analysis/TargetTransformInfo.cpp +++ ../lib/Analysis/TargetTransformInfo.cpp @@ -123,6 +123,14 @@ return TTIImpl->isLegalMaskedLoad(DataType, Consecutive); } +bool TargetTransformInfo::isLegalMaskedGather(Type *DataType) const { + return TTIImpl->isLegalMaskedGather(DataType); +} + +bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType) const { + return TTIImpl->isLegalMaskedGather(DataType); +} + int TargetTransformInfo::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, Index: ../lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- ../lib/CodeGen/CodeGenPrepare.cpp +++ ../lib/CodeGen/CodeGenPrepare.cpp @@ -1120,22 +1120,39 @@ // static void ScalarizeMaskedLoad(CallInst *CI) { Value *Ptr = CI->getArgOperand(0); - Value *Src0 = CI->getArgOperand(3); + Value *Alignment = CI->getArgOperand(1); Value *Mask = CI->getArgOperand(2); - VectorType *VecType = dyn_cast(CI->getType()); - Type *EltTy = VecType->getElementType(); + Value *Src0 = CI->getArgOperand(3); + unsigned AlignVal = cast(Alignment)->getZExtValue(); + VectorType *VecType = dyn_cast(CI->getType()); assert(VecType && "Unexpected return type of masked load intrinsic"); + Type *EltTy = VecType->getElementType(); + AlignVal = std::max(AlignVal, VecType->getScalarSizeInBits()); + IRBuilder<> Builder(CI->getContext()); Instruction *InsertPt = CI; BasicBlock *IfBlock = CI->getParent(); BasicBlock *CondBlock = nullptr; BasicBlock *PrevIfBlock = CI->getParent(); + Builder.SetInsertPoint(InsertPt); - Builder.SetCurrentDebugLocation(CI->getDebugLoc()); + // Shorten the way if the mask is all-true. + bool IsAllOnesMask = isa(Mask) && + cast(Mask)->isAllOnesValue(); + + if (IsAllOnesMask) { + Value *NewI = Builder.CreateAlignedLoad(Ptr, AlignVal); + CI->replaceAllUsesWith(NewI); + CI->eraseFromParent(); + return; + } + + // Adjust alignment for the scalar instruction. + AlignVal = std::max(AlignVal, VecType->getScalarSizeInBits()); // Bitcast %addr fron i8* to EltTy* Type *NewPtrType = EltTy->getPointerTo(cast(Ptr->getType())->getAddressSpace()); @@ -1181,7 +1198,7 @@ Value *Gep = Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); - LoadInst* Load = Builder.CreateLoad(Gep, false); + LoadInst* Load = Builder.CreateAlignedLoad(Gep, AlignVal); VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx)); // Create "else" block, fill it in the next iteration @@ -1232,21 +1249,35 @@ // br label %else2 // . . . static void ScalarizeMaskedStore(CallInst *CI) { - Value *Ptr = CI->getArgOperand(1); Value *Src = CI->getArgOperand(0); + Value *Ptr = CI->getArgOperand(1); + Value *Alignment = CI->getArgOperand(2); Value *Mask = CI->getArgOperand(3); + unsigned AlignVal = cast(Alignment)->getZExtValue(); VectorType *VecType = dyn_cast(Src->getType()); - Type *EltTy = VecType->getElementType(); - assert(VecType && "Unexpected data type in masked store intrinsic"); + Type *EltTy = VecType->getElementType(); + IRBuilder<> Builder(CI->getContext()); Instruction *InsertPt = CI; BasicBlock *IfBlock = CI->getParent(); Builder.SetInsertPoint(InsertPt); Builder.SetCurrentDebugLocation(CI->getDebugLoc()); + // Shorten the way if the mask is all-true. + bool IsAllOnesMask = isa(Mask) && + cast(Mask)->isAllOnesValue(); + + if (IsAllOnesMask) { + Builder.CreateAlignedStore(Src, Ptr, AlignVal); + CI->eraseFromParent(); + return; + } + + // Adjust alignment for the scalar instruction. + AlignVal = std::max(AlignVal, VecType->getScalarSizeInBits()); // Bitcast %addr fron i8* to EltTy* Type *NewPtrType = EltTy->getPointerTo(cast(Ptr->getType())->getAddressSpace()); @@ -1259,7 +1290,7 @@ // // %mask_1 = extractelement <16 x i1> %mask, i32 Idx // %to_store = icmp eq i1 %mask_1, true - // br i1 %to_load, label %cond.store, label %else + // br i1 %to_store, label %cond.store, label %else // Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, @@ -1278,7 +1309,7 @@ Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx)); Value *Gep = Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); - Builder.CreateStore(OneElt, Gep); + Builder.CreateAlignedStore(OneElt, Gep, AlignVal); // Create "else" block, fill it in the next iteration BasicBlock *NewIfBlock = @@ -1292,6 +1323,247 @@ CI->eraseFromParent(); } +// Translate a masked gather intrinsic like +// <16 x i32 > @llvm.masked.gather.v16i32( <16 x i32*> %Ptrs, i32 4, +// <16 x i1> %Mask, <16 x i32> %Src) +// to a chain of basic blocks, with loading element one-by-one if +// the appropriate mask bit is set +// +// %Ptrs = getelementptr i32, i32* %base, <16 x i64> %ind +// %Mask0 = extractelement <16 x i1> %Mask, i32 0 +// % ToLoad0 = icmp eq i1 % Mask0, true +// br i1 % 2, label %cond.load, label %else +// +// cond.load: +// % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0 +// % Load0 = load i32, i32* % Ptr0, align 4 +// % Res0 = insertelement <16 x i32> undef, i32 % Load0, i32 0 +// br label %else +// +// else: +// %res.phi.else = phi <16 x i32>[% Res0, %cond.load], [undef, % 0] +// % Mask1 = extractelement <16 x i1> %Mask, i32 1 +// % ToLoad1 = icmp eq i1 % Mask1, true +// br i1 % ToLoad1, label %cond.load1, label %else2 +// +// cond.load1: +// % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 +// % Load1 = load i32, i32* % Ptr1, align 4 +// % Res1 = insertelement <16 x i32> %res.phi.else, i32 % Load1, i32 1 +// br label %else2 +// . . . +// %81 = select <16 x i1> %Mask, <16 x i32> %res.phi.select, <16 x i32> %Src +// ret <16 x i32> %81 +static void ScalarizeMaskedGather(CallInst *CI) { + Value *Ptrs = CI->getArgOperand(0); + Value *Alignment = CI->getArgOperand(1); + Value *Mask = CI->getArgOperand(2); + Value *Src0 = CI->getArgOperand(3); + + VectorType *VecType = dyn_cast(CI->getType()); + + assert(VecType && "Unexpected return type of masked load intrinsic"); + + IRBuilder<> Builder(CI->getContext()); + Instruction *InsertPt = CI; + BasicBlock *IfBlock = CI->getParent(); + BasicBlock *CondBlock = nullptr; + BasicBlock *PrevIfBlock = CI->getParent(); + Builder.SetInsertPoint(InsertPt); + unsigned AlignVal = cast(Alignment)->getZExtValue(); + + Builder.SetCurrentDebugLocation(CI->getDebugLoc()); + + Value *UndefVal = UndefValue::get(VecType); + + // The result vector + Value *VResult = UndefVal; + unsigned VectorWidth = VecType->getNumElements(); + + // Shorten the way if the mask is all-true. + bool IsAllOnesMask = isa(Mask) && + cast(Mask)->isAllOnesValue(); + + if (IsAllOnesMask) { + for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { + Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), + "Ptr" + Twine(Idx)); + LoadInst* Load = Builder.CreateAlignedLoad(Ptr, AlignVal, + "Load" + Twine(Idx)); + VResult = Builder.CreateInsertElement(VResult, Load, + Builder.getInt32(Idx), + "Res" + Twine(Idx)); + } + CI->replaceAllUsesWith(VResult); + CI->eraseFromParent(); + return; + } + + PHINode *Phi = nullptr; + Value *PrevPhi = UndefVal; + + for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { + + // Fill the "else" block, created in the previous iteration + // + // %Mask1 = extractelement <16 x i1> %Mask, i32 1 + // %ToLoad1 = icmp eq i1 %Mask1, true + // br i1 %ToLoad1, label %cond.load, label %else + // + if (Idx > 0) { + Phi = Builder.CreatePHI(VecType, 2, "res.phi.else"); + Phi->addIncoming(VResult, CondBlock); + Phi->addIncoming(PrevPhi, PrevIfBlock); + PrevPhi = Phi; + VResult = Phi; + } + + Value *Predicate = Builder.CreateExtractElement(Mask, + Builder.getInt32(Idx), + "Mask" + Twine(Idx)); + Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, + ConstantInt::get(Predicate->getType(), 1), + "ToLoad" + Twine(Idx)); + + // Create "cond" block + // + // %EltAddr = getelementptr i32* %1, i32 0 + // %Elt = load i32* %EltAddr + // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx + // + CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.load"); + Builder.SetInsertPoint(InsertPt); + + Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), + "Ptr" + Twine(Idx)); + LoadInst* Load = Builder.CreateAlignedLoad(Ptr, AlignVal, + "Load" + Twine(Idx)); + VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx), + "Res" + Twine(Idx)); + + // Create "else" block, fill it in the next iteration + BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); + Builder.SetInsertPoint(InsertPt); + Instruction *OldBr = IfBlock->getTerminator(); + BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); + OldBr->eraseFromParent(); + PrevIfBlock = IfBlock; + IfBlock = NewIfBlock; + } + + Phi = Builder.CreatePHI(VecType, 2, "res.phi.select"); + Phi->addIncoming(VResult, CondBlock); + Phi->addIncoming(PrevPhi, PrevIfBlock); + Value *NewI = Builder.CreateSelect(Mask, Phi, Src0); + CI->replaceAllUsesWith(NewI); + CI->eraseFromParent(); +} + +// Translate a masked scatter intrinsic, like +// void @llvm.masked.scatter.v16i32(<16 x i32> %Src, <16 x i32*>* %Ptrs, i32 4, +// <16 x i1> %Mask) +// to a chain of basic blocks, that stores element one-by-one if +// the appropriate mask bit is set. +// +// % Ptrs = getelementptr i32, i32* %ptr, <16 x i64> %ind +// % Mask0 = extractelement <16 x i1> % Mask, i32 0 +// % ToStore0 = icmp eq i1 % Mask0, true +// br i1 %ToStore0, label %cond.store, label %else +// +// cond.store: +// % Elt0 = extractelement <16 x i32> %Src, i32 0 +// % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0 +// store i32 %Elt0, i32* % Ptr0, align 4 +// br label %else +// +// else: +// % Mask1 = extractelement <16 x i1> % Mask, i32 1 +// % ToStore1 = icmp eq i1 % Mask1, true +// br i1 % ToStore1, label %cond.store1, label %else2 +// +// cond.store1: +// % Elt1 = extractelement <16 x i32> %Src, i32 1 +// % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 +// store i32 % Elt1, i32* % Ptr1, align 4 +// br label %else2 +// . . . +static void ScalarizeMaskedScatter(CallInst *CI) { + Value *Src = CI->getArgOperand(0); + Value *Ptrs = CI->getArgOperand(1); + Value *Alignment = CI->getArgOperand(2); + Value *Mask = CI->getArgOperand(3); + + assert(isa(Src->getType()) && + "Unexpected data type in masked scatter intrinsic"); + assert(isa(Ptrs->getType()) && + isa(Ptrs->getType()->getVectorElementType()) && + "Vector of pointers is expected in masked scatter intrinsic"); + + IRBuilder<> Builder(CI->getContext()); + Instruction *InsertPt = CI; + BasicBlock *IfBlock = CI->getParent(); + Builder.SetInsertPoint(InsertPt); + Builder.SetCurrentDebugLocation(CI->getDebugLoc()); + + unsigned AlignVal = cast(Alignment)->getZExtValue(); + unsigned VectorWidth = Src->getType()->getVectorNumElements(); + bool IsAllOnesMask = isa(Mask) && + cast(Mask)->isAllOnesValue(); + + if (IsAllOnesMask) { + // Simple case, just store all elements from the data vector, + // one by one. + for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { + Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx), + "Elt" + Twine(Idx)); + Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), + "Ptr" + Twine(Idx)); + Builder.CreateAlignedStore(OneElt, Ptr, AlignVal); + } + CI->eraseFromParent(); + return; + } + for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { + // Fill the "else" block, created in the previous iteration + // + // % Mask1 = extractelement <16 x i1> % Mask, i32 Idx + // % ToStore = icmp eq i1 % Mask1, true + // br i1 % ToStore, label %cond.store, label %else + // + Value *Predicate = Builder.CreateExtractElement(Mask, + Builder.getInt32(Idx), + "Mask" + Twine(Idx)); + Value *Cmp = + Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, + ConstantInt::get(Predicate->getType(), 1), + "ToStore" + Twine(Idx)); + + // Create "cond" block + // + // % Elt1 = extractelement <16 x i32> %Src, i32 1 + // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 + // %store i32 % Elt1, i32* % Ptr1 + // + BasicBlock *CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store"); + Builder.SetInsertPoint(InsertPt); + + Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx), + "Elt" + Twine(Idx)); + Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), + "Ptr" + Twine(Idx)); + Builder.CreateAlignedStore(OneElt, Ptr, AlignVal); + + // Create "else" block, fill it in the next iteration + BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); + Builder.SetInsertPoint(InsertPt); + Instruction *OldBr = IfBlock->getTerminator(); + BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); + OldBr->eraseFromParent(); + IfBlock = NewIfBlock; + } + CI->eraseFromParent(); +} + bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) { BasicBlock *BB = CI->getParent(); @@ -1399,6 +1671,22 @@ } return false; } + case Intrinsic::masked_gather: { + if (!TTI->isLegalMaskedGather(CI->getType())) { + ScalarizeMaskedGather(CI); + ModifiedDT = true; + return true; + } + return false; + } + case Intrinsic::masked_scatter: { + if (!TTI->isLegalMaskedScatter(CI->getArgOperand(0)->getType())) { + ScalarizeMaskedScatter(CI); + ModifiedDT = true; + return true; + } + return false; + } case Intrinsic::aarch64_stlxr: case Intrinsic::aarch64_stxr: { ZExtInst *ExtVal = dyn_cast(CI->getArgOperand(0)); Index: ../lib/Target/X86/X86TargetTransformInfo.h =================================================================== --- ../lib/Target/X86/X86TargetTransformInfo.h +++ ../lib/Target/X86/X86TargetTransformInfo.h @@ -90,6 +90,8 @@ Type *Ty); bool isLegalMaskedLoad(Type *DataType, int Consecutive); bool isLegalMaskedStore(Type *DataType, int Consecutive); + bool isLegalMaskedGather(Type *DataType); + bool isLegalMaskedScatter(Type *DataType); bool areInlineCompatible(const Function *Caller, const Function *Callee) const; Index: ../lib/Target/X86/X86TargetTransformInfo.cpp =================================================================== --- ../lib/Target/X86/X86TargetTransformInfo.cpp +++ ../lib/Target/X86/X86TargetTransformInfo.cpp @@ -1168,7 +1168,6 @@ bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) { int DataWidth = DataTy->getPrimitiveSizeInBits(); - // Todo: AVX512 allows gather/scatter, works with strided and random as well if ((DataWidth < 32) || (Consecutive == 0)) return false; if (ST->hasAVX512() || ST->hasAVX2()) @@ -1180,6 +1179,21 @@ return isLegalMaskedLoad(DataType, Consecutive); } +bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) { + if (DataTy->isVectorTy()) + DataTy = cast(DataTy)->getVectorElementType(); + + unsigned DataWidth = DataTy->isPointerTy() ? DL.getPointerSizeInBits() : + DataTy->getPrimitiveSizeInBits(); + + // AVX-512 allows gather and scatter + return DataWidth >= 32 && ST->hasAVX512(); +} + +bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) { + return isLegalMaskedGather(DataType); +} + bool X86TTIImpl::areInlineCompatible(const Function *Caller, const Function *Callee) const { const TargetMachine &TM = getTLI()->getTargetMachine(); Index: ../test/CodeGen/X86/masked_gather_scatter.ll =================================================================== --- ../test/CodeGen/X86/masked_gather_scatter.ll +++ ../test/CodeGen/X86/masked_gather_scatter.ll @@ -1,4 +1,6 @@ ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=knl < %s | FileCheck %s -check-prefix=KNL +; RUN: opt -mtriple=x86_64-apple-darwin -codegenprepare -mcpu=corei7-avx -S < %s | FileCheck %s -check-prefix=SCALAR + target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -6,6 +8,14 @@ ; KNL-LABEL: test1 ; KNL: kxnorw %k1, %k1, %k1 ; KNL: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1} + +; SCALAR-LABEL: test1 +; SCALAR: extractelement <16 x float*> +; SCALAR-NEXT: load float +; SCALAR-NEXT: insertelement <16 x float> +; SCALAR-NEXT: extractelement <16 x float*> +; SCALAR-NEXT: load float + define <16 x float> @test1(float* %base, <16 x i32> %ind) { %broadcast.splatinsert = insertelement <16 x float*> undef, float* %base, i32 0 @@ -25,6 +35,18 @@ ; KNL-LABEL: test2 ; KNL: kmovw %esi, %k1 ; KNL: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1} + +; SCALAR-LABEL: test2 +; SCALAR: extractelement <16 x float*> +; SCALAR-NEXT: load float +; SCALAR-NEXT: insertelement <16 x float> +; SCALAR-NEXT: br label %else +; SCALAR: else: +; SCALAR-NEXT: %res.phi.else = phi +; SCALAR-NEXT: %Mask1 = extractelement <16 x i1> %imask, i32 1 +; SCALAR-NEXT: %ToLoad1 = icmp eq i1 %Mask1, true +; SCALAR-NEXT: br i1 %ToLoad1, label %cond.load1, label %else2 + define <16 x float> @test2(float* %base, <16 x i32> %ind, i16 %mask) { %broadcast.splatinsert = insertelement <16 x float*> undef, float* %base, i32 0 @@ -76,6 +98,20 @@ ; KNL: vpscatterdd {{.*}}%k2 ; KNL: vpscatterdd {{.*}}%k1 +; SCALAR-LABEL: test5 +; SCALAR: %Mask0 = extractelement <16 x i1> %imask, i32 0 +; SCALAR-NEXT: %ToStore0 = icmp eq i1 %Mask0, true +; SCALAR-NEXT: br i1 %ToStore0, label %cond.store, label %else +; SCALAR: cond.store: +; SCALAR-NEXT: %Elt0 = extractelement <16 x i32> %val, i32 0 +; SCALAR-NEXT: %Ptr0 = extractelement <16 x i32*> %gep.random, i32 0 +; SCALAR-NEXT: store i32 %Elt0, i32* %Ptr0, align 4 +; SCALAR-NEXT: br label %else +; SCALAR: else: +; SCALAR-NEXT: %Mask1 = extractelement <16 x i1> %imask, i32 1 +; SCALAR-NEXT: %ToStore1 = icmp eq i1 %Mask1, true +; SCALAR-NEXT: br i1 %ToStore1, label %cond.store1, label %else2 + define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) { %broadcast.splatinsert = insertelement <16 x i32*> undef, i32* %base, i32 0 @@ -96,6 +132,16 @@ ; KNL: kxnorw %k2, %k2, %k2 ; KNL: vpgatherqd (,%zmm{{.*}}), %ymm{{.*}} {%k2} ; KNL: vpscatterqd %ymm{{.*}}, (,%zmm{{.*}}) {%k1} + +; SCALAR-LABEL: test6 +; SCALAR: store i32 %Elt0, i32* %Ptr01, align 4 +; SCALAR-NEXT: %Elt1 = extractelement <8 x i32> %a1, i32 1 +; SCALAR-NEXT: %Ptr12 = extractelement <8 x i32*> %ptr, i32 1 +; SCALAR-NEXT: store i32 %Elt1, i32* %Ptr12, align 4 +; SCALAR-NEXT: %Elt2 = extractelement <8 x i32> %a1, i32 2 +; SCALAR-NEXT: %Ptr23 = extractelement <8 x i32*> %ptr, i32 2 +; SCALAR-NEXT: store i32 %Elt2, i32* %Ptr23, align 4 + define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) { %a = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %ptr, i32 4, <8 x i1> , <8 x i32> undef)