Index: llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -25,6 +25,7 @@ #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/Analysis/VectorUtils.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constant.h" @@ -1175,6 +1176,20 @@ return true; } +/// Given a mask vector , return an APInt (of bitwidth Y) for each lane +/// which may be active. TODO: This is a lot like known bits, but for +/// vectors. Is there something we can common this with? +static APInt possiblyDemandedEltsInMask(Value *Mask) { + + const unsigned VWidth = cast(Mask->getType())->getNumElements(); + APInt DemandedElts = APInt::getAllOnesValue(VWidth); + if (auto *CV = dyn_cast(Mask)) + for (unsigned i = 0; i < VWidth; i++) + if (CV->getAggregateElement(i)->isNullValue()) + DemandedElts.clearBit(i); + return DemandedElts; +} + // TODO, Obvious Missing Transforms: // * Dereferenceable address -> speculative load/select // * Narrow width by halfs excluding zero/undef lanes @@ -1196,14 +1211,14 @@ // * SimplifyDemandedVectorElts // * Single constant active lane -> store // * Narrow width by halfs excluding zero/undef lanes -static Instruction *simplifyMaskedStore(IntrinsicInst &II, InstCombiner &IC) { +Instruction *InstCombiner::simplifyMaskedStore(IntrinsicInst &II) { auto *ConstMask = dyn_cast(II.getArgOperand(3)); if (!ConstMask) return nullptr; // If the mask is all zeros, this instruction does nothing. if (ConstMask->isNullValue()) - return IC.eraseInstFromFunction(II); + return eraseInstFromFunction(II); // If the mask is all ones, this is a plain vector store of the 1st argument. if (ConstMask->isAllOnesValue()) { @@ -1212,6 +1227,15 @@ return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment); } + // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts + APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); + APInt UndefElts(DemandedElts.getBitWidth(), 0); + if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), + DemandedElts, UndefElts)) { + II.setOperand(0, V); + return &II; + } + return nullptr; } @@ -1268,11 +1292,28 @@ // * Single constant active lane -> store // * Adjacent vector addresses -> masked.store // * Narrow store width by halfs excluding zero/undef lanes -static Instruction *simplifyMaskedScatter(IntrinsicInst &II, InstCombiner &IC) { - // If the mask is all zeros, a scatter does nothing. +Instruction *InstCombiner::simplifyMaskedScatter(IntrinsicInst &II) { auto *ConstMask = dyn_cast(II.getArgOperand(3)); - if (ConstMask && ConstMask->isNullValue()) - return IC.eraseInstFromFunction(II); + if (!ConstMask) + return nullptr; + + // If the mask is all zeros, a scatter does nothing. + if (ConstMask->isNullValue()) + return eraseInstFromFunction(II); + + // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts + APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); + APInt UndefElts(DemandedElts.getBitWidth(), 0); + if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), + DemandedElts, UndefElts)) { + II.setOperand(0, V); + return &II; + } + if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1), + DemandedElts, UndefElts)) { + II.setOperand(1, V); + return &II; + } return nullptr; } @@ -1972,11 +2013,11 @@ return replaceInstUsesWith(CI, SimplifiedMaskedOp); break; case Intrinsic::masked_store: - return simplifyMaskedStore(*II, *this); + return simplifyMaskedStore(*II); case Intrinsic::masked_gather: return simplifyMaskedGather(*II, *this); case Intrinsic::masked_scatter: - return simplifyMaskedScatter(*II, *this); + return simplifyMaskedScatter(*II); case Intrinsic::launder_invariant_group: case Intrinsic::strip_invariant_group: if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this)) Index: llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h =================================================================== --- llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h @@ -474,6 +474,9 @@ Instruction *transformCallThroughTrampoline(CallBase &Call, IntrinsicInst &Tramp); + Instruction *simplifyMaskedStore(IntrinsicInst &II); + Instruction *simplifyMaskedScatter(IntrinsicInst &II); + /// Transform (zext icmp) to bitwise / integer operations in order to /// eliminate it. /// Index: llvm/trunk/test/Transforms/InstCombine/masked_intrinsics.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/masked_intrinsics.ll +++ llvm/trunk/test/Transforms/InstCombine/masked_intrinsics.ll @@ -80,8 +80,7 @@ define void @store_demandedelts(<2 x double>* %ptr, double %val) { ; CHECK-LABEL: @store_demandedelts( -; CHECK-NEXT: [[VALVEC1:%.*]] = insertelement <2 x double> undef, double [[VAL:%.*]], i32 0 -; CHECK-NEXT: [[VALVEC2:%.*]] = shufflevector <2 x double> [[VALVEC1]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[VALVEC2:%.*]] = insertelement <2 x double> undef, double [[VAL:%.*]], i32 0 ; CHECK-NEXT: call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> [[VALVEC2]], <2 x double>* [[PTR:%.*]], i32 4, <2 x i1> ) ; CHECK-NEXT: ret void ; @@ -137,9 +136,8 @@ define void @scatter_demandedelts(double* %ptr, double %val) { ; CHECK-LABEL: @scatter_demandedelts( -; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[PTR:%.*]], <2 x i64> -; CHECK-NEXT: [[VALVEC1:%.*]] = insertelement <2 x double> undef, double [[VAL:%.*]], i32 0 -; CHECK-NEXT: [[VALVEC2:%.*]] = shufflevector <2 x double> [[VALVEC1]], <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, double* [[PTR:%.*]], <2 x i64> +; CHECK-NEXT: [[VALVEC2:%.*]] = insertelement <2 x double> undef, double [[VAL:%.*]], i32 0 ; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> [[VALVEC2]], <2 x double*> [[PTRS]], i32 8, <2 x i1> ) ; CHECK-NEXT: ret void ;