Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -25,6 +25,7 @@ #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/Analysis/VectorUtils.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constant.h" @@ -1175,6 +1176,20 @@ return true; } +/// Given a mask vector , return an APInt (of bitwidth Y) for each lane +/// which may be active. TODO: This is a lot like known bits, but for +/// vectors. Is there something we can common this with? +static APInt possiblyDemandedEltsInMask(Value *Mask) { + + const unsigned VWidth = cast(Mask->getType())->getNumElements(); + APInt DemandedElts = APInt::getAllOnesValue(VWidth); + if (auto *CV = dyn_cast(Mask)) + for (unsigned i = 0; i < VWidth; i++) + if (CV->getAggregateElement(i)->isNullValue()) + DemandedElts.clearBit(i); + return DemandedElts; +} + static Value *simplifyMaskedLoad(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder) { // If the mask is all ones or undefs, this is a plain vector load of the 1st @@ -1189,14 +1204,14 @@ return nullptr; } -static Instruction *simplifyMaskedStore(IntrinsicInst &II, InstCombiner &IC) { +Instruction *InstCombiner::simplifyMaskedStore(IntrinsicInst &II) { auto *ConstMask = dyn_cast(II.getArgOperand(3)); if (!ConstMask) return nullptr; // If the mask is all zeros, this instruction does nothing. if (ConstMask->isNullValue()) - return IC.eraseInstFromFunction(II); + return eraseInstFromFunction(II); // If the mask is all ones, this is a plain vector store of the 1st argument. if (ConstMask->isAllOnesValue()) { @@ -1205,6 +1220,16 @@ return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment); } + // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts + APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); + const unsigned VWidth = ConstMask->getType()->getVectorNumElements(); + APInt UndefElts(VWidth, 0); + if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), + DemandedElts, UndefElts)) { + II.setOperand(0, V); + return &II; + } + return nullptr; } @@ -1251,11 +1276,29 @@ return cast(Result); } -static Instruction *simplifyMaskedScatter(IntrinsicInst &II, InstCombiner &IC) { +Instruction *InstCombiner::simplifyMaskedScatter(IntrinsicInst &II) { // If the mask is all zeros, a scatter does nothing. auto *ConstMask = dyn_cast(II.getArgOperand(3)); if (ConstMask && ConstMask->isNullValue()) - return IC.eraseInstFromFunction(II); + return eraseInstFromFunction(II); + + auto *CV = dyn_cast_or_null(ConstMask); + if (!CV) + return nullptr; + + APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); + const unsigned VWidth = ConstMask->getType()->getVectorNumElements(); + APInt UndefElts(VWidth, 0); + if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), + DemandedElts, UndefElts)) { + II.setOperand(0, V); + return &II; + } + if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1), + DemandedElts, UndefElts)) { + II.setOperand(1, V); + return &II; + } return nullptr; } @@ -1955,11 +1998,11 @@ return replaceInstUsesWith(CI, SimplifiedMaskedOp); break; case Intrinsic::masked_store: - return simplifyMaskedStore(*II, *this); + return simplifyMaskedStore(*II); case Intrinsic::masked_gather: return simplifyMaskedGather(*II, *this); case Intrinsic::masked_scatter: - return simplifyMaskedScatter(*II, *this); + return simplifyMaskedScatter(*II); case Intrinsic::launder_invariant_group: case Intrinsic::strip_invariant_group: if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this)) Index: lib/Transforms/InstCombine/InstCombineInternal.h =================================================================== --- lib/Transforms/InstCombine/InstCombineInternal.h +++ lib/Transforms/InstCombine/InstCombineInternal.h @@ -474,6 +474,9 @@ Instruction *transformCallThroughTrampoline(CallBase &Call, IntrinsicInst &Tramp); + Instruction *simplifyMaskedStore(IntrinsicInst &II); + Instruction *simplifyMaskedScatter(IntrinsicInst &II); + /// Transform (zext icmp) to bitwise / integer operations in order to /// eliminate it. /// Index: test/Transforms/InstCombine/masked_intrinsics.ll =================================================================== --- test/Transforms/InstCombine/masked_intrinsics.ll +++ test/Transforms/InstCombine/masked_intrinsics.ll @@ -80,8 +80,7 @@ define void @store_demandedelts(<2 x double>* %ptr, double %val) { ; CHECK-LABEL: @store_demandedelts( -; CHECK-NEXT: [[VALVEC1:%.*]] = insertelement <2 x double> undef, double [[VAL:%.*]], i32 0 -; CHECK-NEXT: [[VALVEC2:%.*]] = shufflevector <2 x double> [[VALVEC1]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[VALVEC2:%.*]] = insertelement <2 x double> undef, double [[VAL:%.*]], i32 0 ; CHECK-NEXT: call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> [[VALVEC2]], <2 x double>* [[PTR:%.*]], i32 4, <2 x i1> ) ; CHECK-NEXT: ret void ; @@ -137,9 +136,8 @@ define void @scatter_demandedelts(double* %ptr, double %val) { ; CHECK-LABEL: @scatter_demandedelts( -; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[PTR:%.*]], <2 x i64> -; CHECK-NEXT: [[VALVEC1:%.*]] = insertelement <2 x double> undef, double [[VAL:%.*]], i32 0 -; CHECK-NEXT: [[VALVEC2:%.*]] = shufflevector <2 x double> [[VALVEC1]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[PTR:%.*]], <2 x i64> +; CHECK-NEXT: [[VALVEC2:%.*]] = insertelement <2 x double> undef, double [[VAL:%.*]], i32 0 ; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> [[VALVEC2]], <2 x double*> [[PTRS]], i32 8, <2 x i1> ) ; CHECK-NEXT: ret void ;