diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp index a96686aaf1c5..f16b04e4402a 100644 --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -1,1339 +1,1341 @@ //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines vectorizer utilities. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/VectorUtils.h" #include "llvm/ADT/EquivalenceClasses.h" #include "llvm/Analysis/DemandedBits.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopIterator.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constants.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/Value.h" #include "llvm/Support/CommandLine.h" #define DEBUG_TYPE "vectorutils" using namespace llvm; using namespace llvm::PatternMatch; /// Maximum factor for an interleaved memory access. static cl::opt MaxInterleaveGroupFactor( "max-interleave-group-factor", cl::Hidden, cl::desc("Maximum factor for an interleaved access group (default = 8)"), cl::init(8)); /// Return true if all of the intrinsic's arguments and return type are scalars /// for the scalar form of the intrinsic, and vectors for the vector form of the /// intrinsic (except operands that are marked as always being scalar by /// hasVectorInstrinsicScalarOpd). bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) { switch (ID) { case Intrinsic::bswap: // Begin integer bit-manipulation. case Intrinsic::bitreverse: case Intrinsic::ctpop: case Intrinsic::ctlz: case Intrinsic::cttz: case Intrinsic::fshl: case Intrinsic::fshr: case Intrinsic::sadd_sat: case Intrinsic::ssub_sat: case Intrinsic::uadd_sat: case Intrinsic::usub_sat: case Intrinsic::smul_fix: case Intrinsic::smul_fix_sat: case Intrinsic::umul_fix: case Intrinsic::umul_fix_sat: case Intrinsic::sqrt: // Begin floating-point. case Intrinsic::sin: case Intrinsic::cos: case Intrinsic::exp: case Intrinsic::exp2: case Intrinsic::log: case Intrinsic::log10: case Intrinsic::log2: case Intrinsic::fabs: case Intrinsic::minnum: case Intrinsic::maxnum: case Intrinsic::minimum: case Intrinsic::maximum: case Intrinsic::copysign: case Intrinsic::floor: case Intrinsic::ceil: case Intrinsic::trunc: case Intrinsic::rint: case Intrinsic::nearbyint: case Intrinsic::round: case Intrinsic::pow: case Intrinsic::fma: case Intrinsic::fmuladd: case Intrinsic::powi: case Intrinsic::canonicalize: return true; default: return false; } } /// Identifies if the vector form of the intrinsic has a scalar operand. bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx) { switch (ID) { case Intrinsic::ctlz: case Intrinsic::cttz: case Intrinsic::powi: return (ScalarOpdIdx == 1); case Intrinsic::smul_fix: case Intrinsic::smul_fix_sat: case Intrinsic::umul_fix: case Intrinsic::umul_fix_sat: return (ScalarOpdIdx == 2); default: return false; } } /// Returns intrinsic ID for call. /// For the input call instruction it finds mapping intrinsic and returns /// its ID, in case it does not found it return not_intrinsic. Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI) { Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI); if (ID == Intrinsic::not_intrinsic) return Intrinsic::not_intrinsic; if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end || ID == Intrinsic::assume || ID == Intrinsic::sideeffect) return ID; return Intrinsic::not_intrinsic; } /// Find the operand of the GEP that should be checked for consecutive /// stores. This ignores trailing indices that have no effect on the final /// pointer. unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) { const DataLayout &DL = Gep->getModule()->getDataLayout(); unsigned LastOperand = Gep->getNumOperands() - 1; unsigned GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType()); // Walk backwards and try to peel off zeros. while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) { // Find the type we're currently indexing into. gep_type_iterator GEPTI = gep_type_begin(Gep); std::advance(GEPTI, LastOperand - 2); // If it's a type with the same allocation size as the result of the GEP we // can peel off the zero index. if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize) break; --LastOperand; } return LastOperand; } /// If the argument is a GEP, then returns the operand identified by /// getGEPInductionOperand. However, if there is some other non-loop-invariant /// operand, it returns that instead. Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { GetElementPtrInst *GEP = dyn_cast(Ptr); if (!GEP) return Ptr; unsigned InductionOperand = getGEPInductionOperand(GEP); // Check that all of the gep indices are uniform except for our induction // operand. for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) if (i != InductionOperand && !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp)) return Ptr; return GEP->getOperand(InductionOperand); } /// If a value has only one user that is a CastInst, return it. Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) { Value *UniqueCast = nullptr; for (User *U : Ptr->users()) { CastInst *CI = dyn_cast(U); if (CI && CI->getType() == Ty) { if (!UniqueCast) UniqueCast = CI; else return nullptr; } } return UniqueCast; } /// Get the stride of a pointer access in a loop. Looks for symbolic /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise. Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { auto *PtrTy = dyn_cast(Ptr->getType()); if (!PtrTy || PtrTy->isAggregateType()) return nullptr; // Try to remove a gep instruction to make the pointer (actually index at this // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the // pointer, otherwise, we are analyzing the index. Value *OrigPtr = Ptr; // The size of the pointer access. int64_t PtrAccessSize = 1; Ptr = stripGetElementPtr(Ptr, SE, Lp); const SCEV *V = SE->getSCEV(Ptr); if (Ptr != OrigPtr) // Strip off casts. while (const SCEVCastExpr *C = dyn_cast(V)) V = C->getOperand(); const SCEVAddRecExpr *S = dyn_cast(V); if (!S) return nullptr; V = S->getStepRecurrence(*SE); if (!V) return nullptr; // Strip off the size of access multiplication if we are still analyzing the // pointer. if (OrigPtr == Ptr) { if (const SCEVMulExpr *M = dyn_cast(V)) { if (M->getOperand(0)->getSCEVType() != scConstant) return nullptr; const APInt &APStepVal = cast(M->getOperand(0))->getAPInt(); // Huge step value - give up. if (APStepVal.getBitWidth() > 64) return nullptr; int64_t StepVal = APStepVal.getSExtValue(); if (PtrAccessSize != StepVal) return nullptr; V = M->getOperand(1); } } // Strip off casts. Type *StripedOffRecurrenceCast = nullptr; if (const SCEVCastExpr *C = dyn_cast(V)) { StripedOffRecurrenceCast = C->getType(); V = C->getOperand(); } // Look for the loop invariant symbolic value. const SCEVUnknown *U = dyn_cast(V); if (!U) return nullptr; Value *Stride = U->getValue(); if (!Lp->isLoopInvariant(Stride)) return nullptr; // If we have stripped off the recurrence cast we have to make sure that we // return the value that is used in this loop so that we can replace it later. if (StripedOffRecurrenceCast) Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast); return Stride; } /// Given a vector and an element number, see if the scalar value is /// already around as a register, for example if it were inserted then extracted /// from the vector. Value *llvm::findScalarElement(Value *V, unsigned EltNo) { assert(V->getType()->isVectorTy() && "Not looking at a vector?"); VectorType *VTy = cast(V->getType()); // For fixed-length vector, return undef for out of range access. if (auto *FVTy = dyn_cast(VTy)) { unsigned Width = FVTy->getNumElements(); if (EltNo >= Width) return UndefValue::get(FVTy->getElementType()); } if (Constant *C = dyn_cast(V)) return C->getAggregateElement(EltNo); if (InsertElementInst *III = dyn_cast(V)) { // If this is an insert to a variable element, we don't know what it is. if (!isa(III->getOperand(2))) return nullptr; unsigned IIElt = cast(III->getOperand(2))->getZExtValue(); // If this is an insert to the element we are looking for, return the // inserted value. if (EltNo == IIElt) return III->getOperand(1); // Otherwise, the insertelement doesn't modify the value, recurse on its // vector input. return findScalarElement(III->getOperand(0), EltNo); } - if (ShuffleVectorInst *SVI = dyn_cast(V)) { + ShuffleVectorInst *SVI = dyn_cast(V); + // Restrict the following transformation to fixed-length vector. + if (SVI && isa(SVI->getType())) { unsigned LHSWidth = - cast(SVI->getOperand(0)->getType())->getNumElements(); + cast(SVI->getOperand(0)->getType())->getNumElements(); int InEl = SVI->getMaskValue(EltNo); if (InEl < 0) return UndefValue::get(VTy->getElementType()); if (InEl < (int)LHSWidth) return findScalarElement(SVI->getOperand(0), InEl); return findScalarElement(SVI->getOperand(1), InEl - LHSWidth); } // Extract a value from a vector add operation with a constant zero. // TODO: Use getBinOpIdentity() to generalize this. Value *Val; Constant *C; if (match(V, m_Add(m_Value(Val), m_Constant(C)))) if (Constant *Elt = C->getAggregateElement(EltNo)) if (Elt->isNullValue()) return findScalarElement(Val, EltNo); // Otherwise, we don't know. return nullptr; } int llvm::getSplatIndex(ArrayRef Mask) { int SplatIndex = -1; for (int M : Mask) { // Ignore invalid (undefined) mask elements. if (M < 0) continue; // There can be only 1 non-negative mask element value if this is a splat. if (SplatIndex != -1 && SplatIndex != M) return -1; // Initialize the splat index to the 1st non-negative mask element. SplatIndex = M; } assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?"); return SplatIndex; } /// Get splat value if the input is a splat vector or return nullptr. /// This function is not fully general. It checks only 2 cases: /// the input value is (1) a splat constant vector or (2) a sequence /// of instructions that broadcasts a scalar at element 0. const llvm::Value *llvm::getSplatValue(const Value *V) { if (isa(V->getType())) if (auto *C = dyn_cast(V)) return C->getSplatValue(); // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...> Value *Splat; if (match(V, m_ShuffleVector( m_InsertElement(m_Value(), m_Value(Splat), m_ZeroInt()), m_Value(), m_ZeroMask()))) return Splat; return nullptr; } // This setting is based on its counterpart in value tracking, but it could be // adjusted if needed. const unsigned MaxDepth = 6; bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) { assert(Depth <= MaxDepth && "Limit Search Depth"); if (isa(V->getType())) { if (isa(V)) return true; // FIXME: We can allow undefs, but if Index was specified, we may want to // check that the constant is defined at that index. if (auto *C = dyn_cast(V)) return C->getSplatValue() != nullptr; } if (auto *Shuf = dyn_cast(V)) { // FIXME: We can safely allow undefs here. If Index was specified, we will // check that the mask elt is defined at the required index. if (!is_splat(Shuf->getShuffleMask())) return false; // Match any index. if (Index == -1) return true; // Match a specific element. The mask should be defined at and match the // specified index. return Shuf->getMaskValue(Index) == Index; } // The remaining tests are all recursive, so bail out if we hit the limit. if (Depth++ == MaxDepth) return false; // If both operands of a binop are splats, the result is a splat. Value *X, *Y, *Z; if (match(V, m_BinOp(m_Value(X), m_Value(Y)))) return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth); // If all operands of a select are splats, the result is a splat. if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z)))) return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) && isSplatValue(Z, Index, Depth); // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops). return false; } void llvm::narrowShuffleMaskElts(int Scale, ArrayRef Mask, SmallVectorImpl &ScaledMask) { assert(Scale > 0 && "Unexpected scaling factor"); // Fast-path: if no scaling, then it is just a copy. if (Scale == 1) { ScaledMask.assign(Mask.begin(), Mask.end()); return; } ScaledMask.clear(); for (int MaskElt : Mask) { if (MaskElt >= 0) { assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= std::numeric_limits::max() && "Overflowed 32-bits"); } for (int SliceElt = 0; SliceElt != Scale; ++SliceElt) ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt); } } bool llvm::widenShuffleMaskElts(int Scale, ArrayRef Mask, SmallVectorImpl &ScaledMask) { assert(Scale > 0 && "Unexpected scaling factor"); // Fast-path: if no scaling, then it is just a copy. if (Scale == 1) { ScaledMask.assign(Mask.begin(), Mask.end()); return true; } // We must map the original elements down evenly to a type with less elements. int NumElts = Mask.size(); if (NumElts % Scale != 0) return false; ScaledMask.clear(); ScaledMask.reserve(NumElts / Scale); // Step through the input mask by splitting into Scale-sized slices. do { ArrayRef MaskSlice = Mask.take_front(Scale); assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice."); // The first element of the slice determines how we evaluate this slice. int SliceFront = MaskSlice.front(); if (SliceFront < 0) { // Negative values (undef or other "sentinel" values) must be equal across // the entire slice. if (!is_splat(MaskSlice)) return false; ScaledMask.push_back(SliceFront); } else { // A positive mask element must be cleanly divisible. if (SliceFront % Scale != 0) return false; // Elements of the slice must be consecutive. for (int i = 1; i < Scale; ++i) if (MaskSlice[i] != SliceFront + i) return false; ScaledMask.push_back(SliceFront / Scale); } Mask = Mask.drop_front(Scale); } while (!Mask.empty()); assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask"); // All elements of the original mask can be scaled down to map to the elements // of a mask with wider elements. return true; } MapVector llvm::computeMinimumValueSizes(ArrayRef Blocks, DemandedBits &DB, const TargetTransformInfo *TTI) { // DemandedBits will give us every value's live-out bits. But we want // to ensure no extra casts would need to be inserted, so every DAG // of connected values must have the same minimum bitwidth. EquivalenceClasses ECs; SmallVector Worklist; SmallPtrSet Roots; SmallPtrSet Visited; DenseMap DBits; SmallPtrSet InstructionSet; MapVector MinBWs; // Determine the roots. We work bottom-up, from truncs or icmps. bool SeenExtFromIllegalType = false; for (auto *BB : Blocks) for (auto &I : *BB) { InstructionSet.insert(&I); if (TTI && (isa(&I) || isa(&I)) && !TTI->isTypeLegal(I.getOperand(0)->getType())) SeenExtFromIllegalType = true; // Only deal with non-vector integers up to 64-bits wide. if ((isa(&I) || isa(&I)) && !I.getType()->isVectorTy() && I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) { // Don't make work for ourselves. If we know the loaded type is legal, // don't add it to the worklist. if (TTI && isa(&I) && TTI->isTypeLegal(I.getType())) continue; Worklist.push_back(&I); Roots.insert(&I); } } // Early exit. if (Worklist.empty() || (TTI && !SeenExtFromIllegalType)) return MinBWs; // Now proceed breadth-first, unioning values together. while (!Worklist.empty()) { Value *Val = Worklist.pop_back_val(); Value *Leader = ECs.getOrInsertLeaderValue(Val); if (Visited.count(Val)) continue; Visited.insert(Val); // Non-instructions terminate a chain successfully. if (!isa(Val)) continue; Instruction *I = cast(Val); // If we encounter a type that is larger than 64 bits, we can't represent // it so bail out. if (DB.getDemandedBits(I).getBitWidth() > 64) return MapVector(); uint64_t V = DB.getDemandedBits(I).getZExtValue(); DBits[Leader] |= V; DBits[I] = V; // Casts, loads and instructions outside of our range terminate a chain // successfully. if (isa(I) || isa(I) || isa(I) || !InstructionSet.count(I)) continue; // Unsafe casts terminate a chain unsuccessfully. We can't do anything // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to // transform anything that relies on them. if (isa(I) || isa(I) || isa(I) || !I->getType()->isIntegerTy()) { DBits[Leader] |= ~0ULL; continue; } // We don't modify the types of PHIs. Reductions will already have been // truncated if possible, and inductions' sizes will have been chosen by // indvars. if (isa(I)) continue; if (DBits[Leader] == ~0ULL) // All bits demanded, no point continuing. continue; for (Value *O : cast(I)->operands()) { ECs.unionSets(Leader, O); Worklist.push_back(O); } } // Now we've discovered all values, walk them to see if there are // any users we didn't see. If there are, we can't optimize that // chain. for (auto &I : DBits) for (auto *U : I.first->users()) if (U->getType()->isIntegerTy() && DBits.count(U) == 0) DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL; for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) { uint64_t LeaderDemandedBits = 0; for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) LeaderDemandedBits |= DBits[*MI]; uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) - llvm::countLeadingZeros(LeaderDemandedBits); // Round up to a power of 2 if (!isPowerOf2_64((uint64_t)MinBW)) MinBW = NextPowerOf2(MinBW); // We don't modify the types of PHIs. Reductions will already have been // truncated if possible, and inductions' sizes will have been chosen by // indvars. // If we are required to shrink a PHI, abandon this entire equivalence class. bool Abort = false; for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) if (isa(*MI) && MinBW < (*MI)->getType()->getScalarSizeInBits()) { Abort = true; break; } if (Abort) continue; for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) { if (!isa(*MI)) continue; Type *Ty = (*MI)->getType(); if (Roots.count(*MI)) Ty = cast(*MI)->getOperand(0)->getType(); if (MinBW < Ty->getScalarSizeInBits()) MinBWs[cast(*MI)] = MinBW; } } return MinBWs; } /// Add all access groups in @p AccGroups to @p List. template static void addToAccessGroupList(ListT &List, MDNode *AccGroups) { // Interpret an access group as a list containing itself. if (AccGroups->getNumOperands() == 0) { assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group"); List.insert(AccGroups); return; } for (auto &AccGroupListOp : AccGroups->operands()) { auto *Item = cast(AccGroupListOp.get()); assert(isValidAsAccessGroup(Item) && "List item must be an access group"); List.insert(Item); } } MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) { if (!AccGroups1) return AccGroups2; if (!AccGroups2) return AccGroups1; if (AccGroups1 == AccGroups2) return AccGroups1; SmallSetVector Union; addToAccessGroupList(Union, AccGroups1); addToAccessGroupList(Union, AccGroups2); if (Union.size() == 0) return nullptr; if (Union.size() == 1) return cast(Union.front()); LLVMContext &Ctx = AccGroups1->getContext(); return MDNode::get(Ctx, Union.getArrayRef()); } MDNode *llvm::intersectAccessGroups(const Instruction *Inst1, const Instruction *Inst2) { bool MayAccessMem1 = Inst1->mayReadOrWriteMemory(); bool MayAccessMem2 = Inst2->mayReadOrWriteMemory(); if (!MayAccessMem1 && !MayAccessMem2) return nullptr; if (!MayAccessMem1) return Inst2->getMetadata(LLVMContext::MD_access_group); if (!MayAccessMem2) return Inst1->getMetadata(LLVMContext::MD_access_group); MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group); MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group); if (!MD1 || !MD2) return nullptr; if (MD1 == MD2) return MD1; // Use set for scalable 'contains' check. SmallPtrSet AccGroupSet2; addToAccessGroupList(AccGroupSet2, MD2); SmallVector Intersection; if (MD1->getNumOperands() == 0) { assert(isValidAsAccessGroup(MD1) && "Node must be an access group"); if (AccGroupSet2.count(MD1)) Intersection.push_back(MD1); } else { for (const MDOperand &Node : MD1->operands()) { auto *Item = cast(Node.get()); assert(isValidAsAccessGroup(Item) && "List item must be an access group"); if (AccGroupSet2.count(Item)) Intersection.push_back(Item); } } if (Intersection.size() == 0) return nullptr; if (Intersection.size() == 1) return cast(Intersection.front()); LLVMContext &Ctx = Inst1->getContext(); return MDNode::get(Ctx, Intersection); } /// \returns \p I after propagating metadata from \p VL. Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef VL) { Instruction *I0 = cast(VL[0]); SmallVector, 4> Metadata; I0->getAllMetadataOtherThanDebugLoc(Metadata); for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, LLVMContext::MD_noalias, LLVMContext::MD_fpmath, LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load, LLVMContext::MD_access_group}) { MDNode *MD = I0->getMetadata(Kind); for (int J = 1, E = VL.size(); MD && J != E; ++J) { const Instruction *IJ = cast(VL[J]); MDNode *IMD = IJ->getMetadata(Kind); switch (Kind) { case LLVMContext::MD_tbaa: MD = MDNode::getMostGenericTBAA(MD, IMD); break; case LLVMContext::MD_alias_scope: MD = MDNode::getMostGenericAliasScope(MD, IMD); break; case LLVMContext::MD_fpmath: MD = MDNode::getMostGenericFPMath(MD, IMD); break; case LLVMContext::MD_noalias: case LLVMContext::MD_nontemporal: case LLVMContext::MD_invariant_load: MD = MDNode::intersect(MD, IMD); break; case LLVMContext::MD_access_group: MD = intersectAccessGroups(Inst, IJ); break; default: llvm_unreachable("unhandled metadata"); } } Inst->setMetadata(Kind, MD); } return Inst; } Constant * llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, const InterleaveGroup &Group) { // All 1's means mask is not needed. if (Group.getNumMembers() == Group.getFactor()) return nullptr; // TODO: support reversed access. assert(!Group.isReverse() && "Reversed group not supported."); SmallVector Mask; for (unsigned i = 0; i < VF; i++) for (unsigned j = 0; j < Group.getFactor(); ++j) { unsigned HasMember = Group.getMember(j) ? 1 : 0; Mask.push_back(Builder.getInt1(HasMember)); } return ConstantVector::get(Mask); } llvm::SmallVector llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) { SmallVector MaskVec; for (unsigned i = 0; i < VF; i++) for (unsigned j = 0; j < ReplicationFactor; j++) MaskVec.push_back(i); return MaskVec; } llvm::SmallVector llvm::createInterleaveMask(unsigned VF, unsigned NumVecs) { SmallVector Mask; for (unsigned i = 0; i < VF; i++) for (unsigned j = 0; j < NumVecs; j++) Mask.push_back(j * VF + i); return Mask; } llvm::SmallVector llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) { SmallVector Mask; for (unsigned i = 0; i < VF; i++) Mask.push_back(Start + i * Stride); return Mask; } llvm::SmallVector llvm::createSequentialMask(unsigned Start, unsigned NumInts, unsigned NumUndefs) { SmallVector Mask; for (unsigned i = 0; i < NumInts; i++) Mask.push_back(Start + i); for (unsigned i = 0; i < NumUndefs; i++) Mask.push_back(-1); return Mask; } /// A helper function for concatenating vectors. This function concatenates two /// vectors having the same element type. If the second vector has fewer /// elements than the first, it is padded with undefs. static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1, Value *V2) { VectorType *VecTy1 = dyn_cast(V1->getType()); VectorType *VecTy2 = dyn_cast(V2->getType()); assert(VecTy1 && VecTy2 && VecTy1->getScalarType() == VecTy2->getScalarType() && "Expect two vectors with the same element type"); unsigned NumElts1 = VecTy1->getNumElements(); unsigned NumElts2 = VecTy2->getNumElements(); assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); if (NumElts1 > NumElts2) { // Extend with UNDEFs. V2 = Builder.CreateShuffleVector( V2, UndefValue::get(VecTy2), createSequentialMask(0, NumElts2, NumElts1 - NumElts2)); } return Builder.CreateShuffleVector( V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0)); } Value *llvm::concatenateVectors(IRBuilderBase &Builder, ArrayRef Vecs) { unsigned NumVecs = Vecs.size(); assert(NumVecs > 1 && "Should be at least two vectors"); SmallVector ResList; ResList.append(Vecs.begin(), Vecs.end()); do { SmallVector TmpList; for (unsigned i = 0; i < NumVecs - 1; i += 2) { Value *V0 = ResList[i], *V1 = ResList[i + 1]; assert((V0->getType() == V1->getType() || i == NumVecs - 2) && "Only the last vector may have a different type"); TmpList.push_back(concatenateTwoVectors(Builder, V0, V1)); } // Push the last vector if the total number of vectors is odd. if (NumVecs % 2 != 0) TmpList.push_back(ResList[NumVecs - 1]); ResList = TmpList; NumVecs = ResList.size(); } while (NumVecs > 1); return ResList[0]; } bool llvm::maskIsAllZeroOrUndef(Value *Mask) { auto *ConstMask = dyn_cast(Mask); if (!ConstMask) return false; if (ConstMask->isNullValue() || isa(ConstMask)) return true; for (unsigned I = 0, E = cast(ConstMask->getType())->getNumElements(); I != E; ++I) { if (auto *MaskElt = ConstMask->getAggregateElement(I)) if (MaskElt->isNullValue() || isa(MaskElt)) continue; return false; } return true; } bool llvm::maskIsAllOneOrUndef(Value *Mask) { auto *ConstMask = dyn_cast(Mask); if (!ConstMask) return false; if (ConstMask->isAllOnesValue() || isa(ConstMask)) return true; for (unsigned I = 0, E = cast(ConstMask->getType())->getNumElements(); I != E; ++I) { if (auto *MaskElt = ConstMask->getAggregateElement(I)) if (MaskElt->isAllOnesValue() || isa(MaskElt)) continue; return false; } return true; } /// TODO: This is a lot like known bits, but for /// vectors. Is there something we can common this with? APInt llvm::possiblyDemandedEltsInMask(Value *Mask) { const unsigned VWidth = cast(Mask->getType())->getNumElements(); APInt DemandedElts = APInt::getAllOnesValue(VWidth); if (auto *CV = dyn_cast(Mask)) for (unsigned i = 0; i < VWidth; i++) if (CV->getAggregateElement(i)->isNullValue()) DemandedElts.clearBit(i); return DemandedElts; } bool InterleavedAccessInfo::isStrided(int Stride) { unsigned Factor = std::abs(Stride); return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; } void InterleavedAccessInfo::collectConstStrideAccesses( MapVector &AccessStrideInfo, const ValueToValueMap &Strides) { auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); // Since it's desired that the load/store instructions be maintained in // "program order" for the interleaved access analysis, we have to visit the // blocks in the loop in reverse postorder (i.e., in a topological order). // Such an ordering will ensure that any load/store that may be executed // before a second load/store will precede the second load/store in // AccessStrideInfo. LoopBlocksDFS DFS(TheLoop); DFS.perform(LI); for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) for (auto &I : *BB) { auto *LI = dyn_cast(&I); auto *SI = dyn_cast(&I); if (!LI && !SI) continue; Value *Ptr = getLoadStorePointerOperand(&I); // We don't check wrapping here because we don't know yet if Ptr will be // part of a full group or a group with gaps. Checking wrapping for all // pointers (even those that end up in groups with no gaps) will be overly // conservative. For full groups, wrapping should be ok since if we would // wrap around the address space we would do a memory access at nullptr // even without the transformation. The wrapping checks are therefore // deferred until after we've formed the interleaved groups. int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, /*Assume=*/true, /*ShouldCheckWrap=*/false); const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); PointerType *PtrTy = cast(Ptr->getType()); uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); // An alignment of 0 means target ABI alignment. MaybeAlign Alignment = MaybeAlign(getLoadStoreAlignment(&I)); if (!Alignment) Alignment = Align(DL.getABITypeAlignment(PtrTy->getElementType())); AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, *Alignment); } } // Analyze interleaved accesses and collect them into interleaved load and // store groups. // // When generating code for an interleaved load group, we effectively hoist all // loads in the group to the location of the first load in program order. When // generating code for an interleaved store group, we sink all stores to the // location of the last store. This code motion can change the order of load // and store instructions and may break dependences. // // The code generation strategy mentioned above ensures that we won't violate // any write-after-read (WAR) dependences. // // E.g., for the WAR dependence: a = A[i]; // (1) // A[i] = b; // (2) // // The store group of (2) is always inserted at or below (2), and the load // group of (1) is always inserted at or above (1). Thus, the instructions will // never be reordered. All other dependences are checked to ensure the // correctness of the instruction reordering. // // The algorithm visits all memory accesses in the loop in bottom-up program // order. Program order is established by traversing the blocks in the loop in // reverse postorder when collecting the accesses. // // We visit the memory accesses in bottom-up order because it can simplify the // construction of store groups in the presence of write-after-write (WAW) // dependences. // // E.g., for the WAW dependence: A[i] = a; // (1) // A[i] = b; // (2) // A[i + 1] = c; // (3) // // We will first create a store group with (3) and (2). (1) can't be added to // this group because it and (2) are dependent. However, (1) can be grouped // with other accesses that may precede it in program order. Note that a // bottom-up order does not imply that WAW dependences should not be checked. void InterleavedAccessInfo::analyzeInterleaving( bool EnablePredicatedInterleavedMemAccesses) { LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); const ValueToValueMap &Strides = LAI->getSymbolicStrides(); // Holds all accesses with a constant stride. MapVector AccessStrideInfo; collectConstStrideAccesses(AccessStrideInfo, Strides); if (AccessStrideInfo.empty()) return; // Collect the dependences in the loop. collectDependences(); // Holds all interleaved store groups temporarily. SmallSetVector *, 4> StoreGroups; // Holds all interleaved load groups temporarily. SmallSetVector *, 4> LoadGroups; // Search in bottom-up program order for pairs of accesses (A and B) that can // form interleaved load or store groups. In the algorithm below, access A // precedes access B in program order. We initialize a group for B in the // outer loop of the algorithm, and then in the inner loop, we attempt to // insert each A into B's group if: // // 1. A and B have the same stride, // 2. A and B have the same memory object size, and // 3. A belongs in B's group according to its distance from B. // // Special care is taken to ensure group formation will not break any // dependences. for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); BI != E; ++BI) { Instruction *B = BI->first; StrideDescriptor DesB = BI->second; // Initialize a group for B if it has an allowable stride. Even if we don't // create a group for B, we continue with the bottom-up algorithm to ensure // we don't break any of B's dependences. InterleaveGroup *Group = nullptr; if (isStrided(DesB.Stride) && (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) { Group = getInterleaveGroup(B); if (!Group) { LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); Group = createInterleaveGroup(B, DesB.Stride, DesB.Alignment); } if (B->mayWriteToMemory()) StoreGroups.insert(Group); else LoadGroups.insert(Group); } for (auto AI = std::next(BI); AI != E; ++AI) { Instruction *A = AI->first; StrideDescriptor DesA = AI->second; // Our code motion strategy implies that we can't have dependences // between accesses in an interleaved group and other accesses located // between the first and last member of the group. Note that this also // means that a group can't have more than one member at a given offset. // The accesses in a group can have dependences with other accesses, but // we must ensure we don't extend the boundaries of the group such that // we encompass those dependent accesses. // // For example, assume we have the sequence of accesses shown below in a // stride-2 loop: // // (1, 2) is a group | A[i] = a; // (1) // | A[i-1] = b; // (2) | // A[i-3] = c; // (3) // A[i] = d; // (4) | (2, 4) is not a group // // Because accesses (2) and (3) are dependent, we can group (2) with (1) // but not with (4). If we did, the dependent access (3) would be within // the boundaries of the (2, 4) group. if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { // If a dependence exists and A is already in a group, we know that A // must be a store since A precedes B and WAR dependences are allowed. // Thus, A would be sunk below B. We release A's group to prevent this // illegal code motion. A will then be free to form another group with // instructions that precede it. if (isInterleaved(A)) { InterleaveGroup *StoreGroup = getInterleaveGroup(A); LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to " "dependence between " << *A << " and "<< *B << '\n'); StoreGroups.remove(StoreGroup); releaseGroup(StoreGroup); } // If a dependence exists and A is not already in a group (or it was // and we just released it), B might be hoisted above A (if B is a // load) or another store might be sunk below A (if B is a store). In // either case, we can't add additional instructions to B's group. B // will only form a group with instructions that it precedes. break; } // At this point, we've checked for illegal code motion. If either A or B // isn't strided, there's nothing left to do. if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) continue; // Ignore A if it's already in a group or isn't the same kind of memory // operation as B. // Note that mayReadFromMemory() isn't mutually exclusive to // mayWriteToMemory in the case of atomic loads. We shouldn't see those // here, canVectorizeMemory() should have returned false - except for the // case we asked for optimization remarks. if (isInterleaved(A) || (A->mayReadFromMemory() != B->mayReadFromMemory()) || (A->mayWriteToMemory() != B->mayWriteToMemory())) continue; // Check rules 1 and 2. Ignore A if its stride or size is different from // that of B. if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) continue; // Ignore A if the memory object of A and B don't belong to the same // address space if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B)) continue; // Calculate the distance from A to B. const SCEVConstant *DistToB = dyn_cast( PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); if (!DistToB) continue; int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); // Check rule 3. Ignore A if its distance to B is not a multiple of the // size. if (DistanceToB % static_cast(DesB.Size)) continue; // All members of a predicated interleave-group must have the same predicate, // and currently must reside in the same BB. BasicBlock *BlockA = A->getParent(); BasicBlock *BlockB = B->getParent(); if ((isPredicated(BlockA) || isPredicated(BlockB)) && (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB)) continue; // The index of A is the index of B plus A's distance to B in multiples // of the size. int IndexA = Group->getIndex(B) + DistanceToB / static_cast(DesB.Size); // Try to insert A into B's group. if (Group->insertMember(A, IndexA, DesA.Alignment)) { LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' << " into the interleave group with" << *B << '\n'); InterleaveGroupMap[A] = Group; // Set the first load in program order as the insert position. if (A->mayReadFromMemory()) Group->setInsertPos(A); } } // Iteration over A accesses. } // Iteration over B accesses. // Remove interleaved store groups with gaps. for (auto *Group : StoreGroups) if (Group->getNumMembers() != Group->getFactor()) { LLVM_DEBUG( dbgs() << "LV: Invalidate candidate interleaved store group due " "to gaps.\n"); releaseGroup(Group); } // Remove interleaved groups with gaps (currently only loads) whose memory // accesses may wrap around. We have to revisit the getPtrStride analysis, // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does // not check wrapping (see documentation there). // FORNOW we use Assume=false; // TODO: Change to Assume=true but making sure we don't exceed the threshold // of runtime SCEV assumptions checks (thereby potentially failing to // vectorize altogether). // Additional optional optimizations: // TODO: If we are peeling the loop and we know that the first pointer doesn't // wrap then we can deduce that all pointers in the group don't wrap. // This means that we can forcefully peel the loop in order to only have to // check the first pointer for no-wrap. When we'll change to use Assume=true // we'll only need at most one runtime check per interleaved group. for (auto *Group : LoadGroups) { // Case 1: A full group. Can Skip the checks; For full groups, if the wide // load would wrap around the address space we would do a memory access at // nullptr even without the transformation. if (Group->getNumMembers() == Group->getFactor()) continue; // Case 2: If first and last members of the group don't wrap this implies // that all the pointers in the group don't wrap. // So we check only group member 0 (which is always guaranteed to exist), // and group member Factor - 1; If the latter doesn't exist we rely on // peeling (if it is a non-reversed accsess -- see Case 3). Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0)); if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, /*ShouldCheckWrap=*/true)) { LLVM_DEBUG( dbgs() << "LV: Invalidate candidate interleaved group due to " "first group member potentially pointer-wrapping.\n"); releaseGroup(Group); continue; } Instruction *LastMember = Group->getMember(Group->getFactor() - 1); if (LastMember) { Value *LastMemberPtr = getLoadStorePointerOperand(LastMember); if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, /*ShouldCheckWrap=*/true)) { LLVM_DEBUG( dbgs() << "LV: Invalidate candidate interleaved group due to " "last group member potentially pointer-wrapping.\n"); releaseGroup(Group); } } else { // Case 3: A non-reversed interleaved load group with gaps: We need // to execute at least one scalar epilogue iteration. This will ensure // we don't speculatively access memory out-of-bounds. We only need // to look for a member at index factor - 1, since every group must have // a member at index zero. if (Group->isReverse()) { LLVM_DEBUG( dbgs() << "LV: Invalidate candidate interleaved group due to " "a reverse access with gaps.\n"); releaseGroup(Group); continue; } LLVM_DEBUG( dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); RequiresScalarEpilogue = true; } } } void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() { // If no group had triggered the requirement to create an epilogue loop, // there is nothing to do. if (!requiresScalarEpilogue()) return; bool ReleasedGroup = false; // Release groups requiring scalar epilogues. Note that this also removes them // from InterleaveGroups. for (auto *Group : make_early_inc_range(InterleaveGroups)) { if (!Group->requiresScalarEpilogue()) continue; LLVM_DEBUG( dbgs() << "LV: Invalidate candidate interleaved group due to gaps that " "require a scalar epilogue (not allowed under optsize) and cannot " "be masked (not enabled). \n"); releaseGroup(Group); ReleasedGroup = true; } assert(ReleasedGroup && "At least one group must be invalidated, as a " "scalar epilogue was required"); (void)ReleasedGroup; RequiresScalarEpilogue = false; } template void InterleaveGroup::addMetadata(InstT *NewInst) const { llvm_unreachable("addMetadata can only be used for Instruction"); } namespace llvm { template <> void InterleaveGroup::addMetadata(Instruction *NewInst) const { SmallVector VL; std::transform(Members.begin(), Members.end(), std::back_inserter(VL), [](std::pair p) { return p.second; }); propagateMetadata(NewInst, VL); } } void VFABI::getVectorVariantNames( const CallInst &CI, SmallVectorImpl &VariantMappings) { const StringRef S = CI.getAttribute(AttributeList::FunctionIndex, VFABI::MappingsAttrName) .getValueAsString(); if (S.empty()) return; SmallVector ListAttr; S.split(ListAttr, ","); for (auto &S : SetVector(ListAttr.begin(), ListAttr.end())) { #ifndef NDEBUG LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n"); Optional Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule())); assert(Info.hasValue() && "Invalid name for a VFABI variant."); assert(CI.getModule()->getFunction(Info.getValue().VectorName) && "Vector function is missing."); #endif VariantMappings.push_back(std::string(S)); } } bool VFShape::hasValidParameterList() const { for (unsigned Pos = 0, NumParams = Parameters.size(); Pos < NumParams; ++Pos) { assert(Parameters[Pos].ParamPos == Pos && "Broken parameter list."); switch (Parameters[Pos].ParamKind) { default: // Nothing to check. break; case VFParamKind::OMP_Linear: case VFParamKind::OMP_LinearRef: case VFParamKind::OMP_LinearVal: case VFParamKind::OMP_LinearUVal: // Compile time linear steps must be non-zero. if (Parameters[Pos].LinearStepOrPos == 0) return false; break; case VFParamKind::OMP_LinearPos: case VFParamKind::OMP_LinearRefPos: case VFParamKind::OMP_LinearValPos: case VFParamKind::OMP_LinearUValPos: // The runtime linear step must be referring to some other // parameters in the signature. if (Parameters[Pos].LinearStepOrPos >= int(NumParams)) return false; // The linear step parameter must be marked as uniform. if (Parameters[Parameters[Pos].LinearStepOrPos].ParamKind != VFParamKind::OMP_Uniform) return false; // The linear step parameter can't point at itself. if (Parameters[Pos].LinearStepOrPos == int(Pos)) return false; break; case VFParamKind::GlobalPredicate: // The global predicate must be the unique. Can be placed anywhere in the // signature. for (unsigned NextPos = Pos + 1; NextPos < NumParams; ++NextPos) if (Parameters[NextPos].ParamKind == VFParamKind::GlobalPredicate) return false; break; } } return true; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index feb618383e74..b2dc7259e139 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -1,2326 +1,2331 @@ //===- InstCombineVectorOps.cpp -------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements instcombine for ExtractElement, InsertElement and // ShuffleVector. // //===----------------------------------------------------------------------===// #include "InstCombineInternal.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/VectorUtils.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Operator.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/Type.h" #include "llvm/IR/User.h" #include "llvm/IR/Value.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" #include #include #include #include using namespace llvm; using namespace PatternMatch; #define DEBUG_TYPE "instcombine" /// Return true if the value is cheaper to scalarize than it is to leave as a /// vector operation. IsConstantExtractIndex indicates whether we are extracting /// one known element from a vector constant. /// /// FIXME: It's possible to create more instructions than previously existed. static bool cheapToScalarize(Value *V, bool IsConstantExtractIndex) { // If we can pick a scalar constant value out of a vector, that is free. if (auto *C = dyn_cast(V)) return IsConstantExtractIndex || C->getSplatValue(); // An insertelement to the same constant index as our extract will simplify // to the scalar inserted element. An insertelement to a different constant // index is irrelevant to our extract. if (match(V, m_InsertElement(m_Value(), m_Value(), m_ConstantInt()))) return IsConstantExtractIndex; if (match(V, m_OneUse(m_Load(m_Value())))) return true; if (match(V, m_OneUse(m_UnOp()))) return true; Value *V0, *V1; if (match(V, m_OneUse(m_BinOp(m_Value(V0), m_Value(V1))))) if (cheapToScalarize(V0, IsConstantExtractIndex) || cheapToScalarize(V1, IsConstantExtractIndex)) return true; CmpInst::Predicate UnusedPred; if (match(V, m_OneUse(m_Cmp(UnusedPred, m_Value(V0), m_Value(V1))))) if (cheapToScalarize(V0, IsConstantExtractIndex) || cheapToScalarize(V1, IsConstantExtractIndex)) return true; return false; } // If we have a PHI node with a vector type that is only used to feed // itself and be an operand of extractelement at a constant location, // try to replace the PHI of the vector type with a PHI of a scalar type. Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) { SmallVector Extracts; // The users we want the PHI to have are: // 1) The EI ExtractElement (we already know this) // 2) Possibly more ExtractElements with the same index. // 3) Another operand, which will feed back into the PHI. Instruction *PHIUser = nullptr; for (auto U : PN->users()) { if (ExtractElementInst *EU = dyn_cast(U)) { if (EI.getIndexOperand() == EU->getIndexOperand()) Extracts.push_back(EU); else return nullptr; } else if (!PHIUser) { PHIUser = cast(U); } else { return nullptr; } } if (!PHIUser) return nullptr; // Verify that this PHI user has one use, which is the PHI itself, // and that it is a binary operation which is cheap to scalarize. // otherwise return nullptr. if (!PHIUser->hasOneUse() || !(PHIUser->user_back() == PN) || !(isa(PHIUser)) || !cheapToScalarize(PHIUser, true)) return nullptr; // Create a scalar PHI node that will replace the vector PHI node // just before the current PHI node. PHINode *scalarPHI = cast(InsertNewInstWith( PHINode::Create(EI.getType(), PN->getNumIncomingValues(), ""), *PN)); // Scalarize each PHI operand. for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { Value *PHIInVal = PN->getIncomingValue(i); BasicBlock *inBB = PN->getIncomingBlock(i); Value *Elt = EI.getIndexOperand(); // If the operand is the PHI induction variable: if (PHIInVal == PHIUser) { // Scalarize the binary operation. Its first operand is the // scalar PHI, and the second operand is extracted from the other // vector operand. BinaryOperator *B0 = cast(PHIUser); unsigned opId = (B0->getOperand(0) == PN) ? 1 : 0; Value *Op = InsertNewInstWith( ExtractElementInst::Create(B0->getOperand(opId), Elt, B0->getOperand(opId)->getName() + ".Elt"), *B0); Value *newPHIUser = InsertNewInstWith( BinaryOperator::CreateWithCopiedFlags(B0->getOpcode(), scalarPHI, Op, B0), *B0); scalarPHI->addIncoming(newPHIUser, inBB); } else { // Scalarize PHI input: Instruction *newEI = ExtractElementInst::Create(PHIInVal, Elt, ""); // Insert the new instruction into the predecessor basic block. Instruction *pos = dyn_cast(PHIInVal); BasicBlock::iterator InsertPos; if (pos && !isa(pos)) { InsertPos = ++pos->getIterator(); } else { InsertPos = inBB->getFirstInsertionPt(); } InsertNewInstWith(newEI, *InsertPos); scalarPHI->addIncoming(newEI, inBB); } } for (auto E : Extracts) replaceInstUsesWith(*E, scalarPHI); return &EI; } static Instruction *foldBitcastExtElt(ExtractElementInst &Ext, InstCombiner::BuilderTy &Builder, bool IsBigEndian) { Value *X; uint64_t ExtIndexC; if (!match(Ext.getVectorOperand(), m_BitCast(m_Value(X))) || !X->getType()->isVectorTy() || !match(Ext.getIndexOperand(), m_ConstantInt(ExtIndexC))) return nullptr; // If this extractelement is using a bitcast from a vector of the same number // of elements, see if we can find the source element from the source vector: // extelt (bitcast VecX), IndexC --> bitcast X[IndexC] auto *SrcTy = cast(X->getType()); Type *DestTy = Ext.getType(); unsigned NumSrcElts = SrcTy->getNumElements(); unsigned NumElts = Ext.getVectorOperandType()->getNumElements(); if (NumSrcElts == NumElts) if (Value *Elt = findScalarElement(X, ExtIndexC)) return new BitCastInst(Elt, DestTy); // If the source elements are wider than the destination, try to shift and // truncate a subset of scalar bits of an insert op. if (NumSrcElts < NumElts) { Value *Scalar; uint64_t InsIndexC; if (!match(X, m_InsertElement(m_Value(), m_Value(Scalar), m_ConstantInt(InsIndexC)))) return nullptr; // The extract must be from the subset of vector elements that we inserted // into. Example: if we inserted element 1 of a <2 x i64> and we are // extracting an i16 (narrowing ratio = 4), then this extract must be from 1 // of elements 4-7 of the bitcasted vector. unsigned NarrowingRatio = NumElts / NumSrcElts; if (ExtIndexC / NarrowingRatio != InsIndexC) return nullptr; // We are extracting part of the original scalar. How that scalar is // inserted into the vector depends on the endian-ness. Example: // Vector Byte Elt Index: 0 1 2 3 4 5 6 7 // +--+--+--+--+--+--+--+--+ // inselt <2 x i32> V, S, 1: |V0|V1|V2|V3|S0|S1|S2|S3| // extelt <4 x i16> V', 3: | |S2|S3| // +--+--+--+--+--+--+--+--+ // If this is little-endian, S2|S3 are the MSB of the 32-bit 'S' value. // If this is big-endian, S2|S3 are the LSB of the 32-bit 'S' value. // In this example, we must right-shift little-endian. Big-endian is just a // truncate. unsigned Chunk = ExtIndexC % NarrowingRatio; if (IsBigEndian) Chunk = NarrowingRatio - 1 - Chunk; // Bail out if this is an FP vector to FP vector sequence. That would take // more instructions than we started with unless there is no shift, and it // may not be handled as well in the backend. bool NeedSrcBitcast = SrcTy->getScalarType()->isFloatingPointTy(); bool NeedDestBitcast = DestTy->isFloatingPointTy(); if (NeedSrcBitcast && NeedDestBitcast) return nullptr; unsigned SrcWidth = SrcTy->getScalarSizeInBits(); unsigned DestWidth = DestTy->getPrimitiveSizeInBits(); unsigned ShAmt = Chunk * DestWidth; // TODO: This limitation is more strict than necessary. We could sum the // number of new instructions and subtract the number eliminated to know if // we can proceed. if (!X->hasOneUse() || !Ext.getVectorOperand()->hasOneUse()) if (NeedSrcBitcast || NeedDestBitcast) return nullptr; if (NeedSrcBitcast) { Type *SrcIntTy = IntegerType::getIntNTy(Scalar->getContext(), SrcWidth); Scalar = Builder.CreateBitCast(Scalar, SrcIntTy); } if (ShAmt) { // Bail out if we could end with more instructions than we started with. if (!Ext.getVectorOperand()->hasOneUse()) return nullptr; Scalar = Builder.CreateLShr(Scalar, ShAmt); } if (NeedDestBitcast) { Type *DestIntTy = IntegerType::getIntNTy(Scalar->getContext(), DestWidth); return new BitCastInst(Builder.CreateTrunc(Scalar, DestIntTy), DestTy); } return new TruncInst(Scalar, DestTy); } return nullptr; } /// Find elements of V demanded by UserInstr. static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) { unsigned VWidth = cast(V->getType())->getNumElements(); // Conservatively assume that all elements are needed. APInt UsedElts(APInt::getAllOnesValue(VWidth)); switch (UserInstr->getOpcode()) { case Instruction::ExtractElement: { ExtractElementInst *EEI = cast(UserInstr); assert(EEI->getVectorOperand() == V); ConstantInt *EEIIndexC = dyn_cast(EEI->getIndexOperand()); if (EEIIndexC && EEIIndexC->getValue().ult(VWidth)) { UsedElts = APInt::getOneBitSet(VWidth, EEIIndexC->getZExtValue()); } break; } case Instruction::ShuffleVector: { ShuffleVectorInst *Shuffle = cast(UserInstr); unsigned MaskNumElts = cast(UserInstr->getType())->getNumElements(); UsedElts = APInt(VWidth, 0); for (unsigned i = 0; i < MaskNumElts; i++) { unsigned MaskVal = Shuffle->getMaskValue(i); if (MaskVal == -1u || MaskVal >= 2 * VWidth) continue; if (Shuffle->getOperand(0) == V && (MaskVal < VWidth)) UsedElts.setBit(MaskVal); if (Shuffle->getOperand(1) == V && ((MaskVal >= VWidth) && (MaskVal < 2 * VWidth))) UsedElts.setBit(MaskVal - VWidth); } break; } default: break; } return UsedElts; } /// Find union of elements of V demanded by all its users. /// If it is known by querying findDemandedEltsBySingleUser that /// no user demands an element of V, then the corresponding bit /// remains unset in the returned value. static APInt findDemandedEltsByAllUsers(Value *V) { unsigned VWidth = cast(V->getType())->getNumElements(); APInt UnionUsedElts(VWidth, 0); for (const Use &U : V->uses()) { if (Instruction *I = dyn_cast(U.getUser())) { UnionUsedElts |= findDemandedEltsBySingleUser(V, I); } else { UnionUsedElts = APInt::getAllOnesValue(VWidth); break; } if (UnionUsedElts.isAllOnesValue()) break; } return UnionUsedElts; } Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { Value *SrcVec = EI.getVectorOperand(); Value *Index = EI.getIndexOperand(); if (Value *V = SimplifyExtractElementInst(SrcVec, Index, SQ.getWithInstruction(&EI))) return replaceInstUsesWith(EI, V); // If extracting a specified index from the vector, see if we can recursively // find a previously computed scalar that was inserted into the vector. auto *IndexC = dyn_cast(Index); if (IndexC) { - unsigned NumElts = EI.getVectorOperandType()->getNumElements(); + ElementCount EC = EI.getVectorOperandType()->getElementCount(); + unsigned NumElts = EC.Min; // InstSimplify should handle cases where the index is invalid. - if (!IndexC->getValue().ule(NumElts)) + // For fixed-length vector, it's invalid to extract out-of-range element. + if (!EC.Scalable && IndexC->getValue().uge(NumElts)) return nullptr; // This instruction only demands the single element from the input vector. - if (NumElts != 1) { + // Skip for scalable type, the number of elements is unknown at + // compile-time. + if (!EC.Scalable && NumElts != 1) { // If the input vector has a single use, simplify it based on this use // property. if (SrcVec->hasOneUse()) { APInt UndefElts(NumElts, 0); APInt DemandedElts(NumElts, 0); DemandedElts.setBit(IndexC->getZExtValue()); if (Value *V = SimplifyDemandedVectorElts(SrcVec, DemandedElts, UndefElts)) return replaceOperand(EI, 0, V); } else { // If the input vector has multiple uses, simplify it based on a union // of all elements used. APInt DemandedElts = findDemandedEltsByAllUsers(SrcVec); if (!DemandedElts.isAllOnesValue()) { APInt UndefElts(NumElts, 0); if (Value *V = SimplifyDemandedVectorElts( SrcVec, DemandedElts, UndefElts, 0 /* Depth */, true /* AllowMultipleUsers */)) { if (V != SrcVec) { SrcVec->replaceAllUsesWith(V); return &EI; } } } } } if (Instruction *I = foldBitcastExtElt(EI, Builder, DL.isBigEndian())) return I; // If there's a vector PHI feeding a scalar use through this extractelement // instruction, try to scalarize the PHI. if (auto *Phi = dyn_cast(SrcVec)) if (Instruction *ScalarPHI = scalarizePHI(EI, Phi)) return ScalarPHI; } // TODO come up with a n-ary matcher that subsumes both unary and // binary matchers. UnaryOperator *UO; if (match(SrcVec, m_UnOp(UO)) && cheapToScalarize(SrcVec, IndexC)) { // extelt (unop X), Index --> unop (extelt X, Index) Value *X = UO->getOperand(0); Value *E = Builder.CreateExtractElement(X, Index); return UnaryOperator::CreateWithCopiedFlags(UO->getOpcode(), E, UO); } BinaryOperator *BO; if (match(SrcVec, m_BinOp(BO)) && cheapToScalarize(SrcVec, IndexC)) { // extelt (binop X, Y), Index --> binop (extelt X, Index), (extelt Y, Index) Value *X = BO->getOperand(0), *Y = BO->getOperand(1); Value *E0 = Builder.CreateExtractElement(X, Index); Value *E1 = Builder.CreateExtractElement(Y, Index); return BinaryOperator::CreateWithCopiedFlags(BO->getOpcode(), E0, E1, BO); } Value *X, *Y; CmpInst::Predicate Pred; if (match(SrcVec, m_Cmp(Pred, m_Value(X), m_Value(Y))) && cheapToScalarize(SrcVec, IndexC)) { // extelt (cmp X, Y), Index --> cmp (extelt X, Index), (extelt Y, Index) Value *E0 = Builder.CreateExtractElement(X, Index); Value *E1 = Builder.CreateExtractElement(Y, Index); return CmpInst::Create(cast(SrcVec)->getOpcode(), Pred, E0, E1); } if (auto *I = dyn_cast(SrcVec)) { if (auto *IE = dyn_cast(I)) { // Extracting the inserted element? if (IE->getOperand(2) == Index) return replaceInstUsesWith(EI, IE->getOperand(1)); // If the inserted and extracted elements are constants, they must not // be the same value, extract from the pre-inserted value instead. if (isa(IE->getOperand(2)) && IndexC) return replaceOperand(EI, 0, IE->getOperand(0)); } else if (auto *SVI = dyn_cast(I)) { // If this is extracting an element from a shufflevector, figure out where // it came from and extract from the appropriate input element instead. - if (auto *Elt = dyn_cast(Index)) { - int SrcIdx = SVI->getMaskValue(Elt->getZExtValue()); + // Restrict the following transformation to fixed-length vector. + if (isa(SVI->getType()) && isa(Index)) { + int SrcIdx = + SVI->getMaskValue(cast(Index)->getZExtValue()); Value *Src; - unsigned LHSWidth = - cast(SVI->getOperand(0)->getType())->getNumElements(); + unsigned LHSWidth = cast(SVI->getOperand(0)->getType()) + ->getNumElements(); if (SrcIdx < 0) return replaceInstUsesWith(EI, UndefValue::get(EI.getType())); if (SrcIdx < (int)LHSWidth) Src = SVI->getOperand(0); else { SrcIdx -= LHSWidth; Src = SVI->getOperand(1); } Type *Int32Ty = Type::getInt32Ty(EI.getContext()); - return ExtractElementInst::Create(Src, - ConstantInt::get(Int32Ty, - SrcIdx, false)); + return ExtractElementInst::Create( + Src, ConstantInt::get(Int32Ty, SrcIdx, false)); } } else if (auto *CI = dyn_cast(I)) { // Canonicalize extractelement(cast) -> cast(extractelement). // Bitcasts can change the number of vector elements, and they cost // nothing. if (CI->hasOneUse() && (CI->getOpcode() != Instruction::BitCast)) { Value *EE = Builder.CreateExtractElement(CI->getOperand(0), Index); return CastInst::Create(CI->getOpcode(), EE, EI.getType()); } } } return nullptr; } /// If V is a shuffle of values that ONLY returns elements from either LHS or /// RHS, return the shuffle mask and true. Otherwise, return false. static bool collectSingleShuffleElements(Value *V, Value *LHS, Value *RHS, SmallVectorImpl &Mask) { assert(LHS->getType() == RHS->getType() && "Invalid CollectSingleShuffleElements"); unsigned NumElts = cast(V->getType())->getNumElements(); if (isa(V)) { Mask.assign(NumElts, -1); return true; } if (V == LHS) { for (unsigned i = 0; i != NumElts; ++i) Mask.push_back(i); return true; } if (V == RHS) { for (unsigned i = 0; i != NumElts; ++i) Mask.push_back(i + NumElts); return true; } if (InsertElementInst *IEI = dyn_cast(V)) { // If this is an insert of an extract from some other vector, include it. Value *VecOp = IEI->getOperand(0); Value *ScalarOp = IEI->getOperand(1); Value *IdxOp = IEI->getOperand(2); if (!isa(IdxOp)) return false; unsigned InsertedIdx = cast(IdxOp)->getZExtValue(); if (isa(ScalarOp)) { // inserting undef into vector. // We can handle this if the vector we are inserting into is // transitively ok. if (collectSingleShuffleElements(VecOp, LHS, RHS, Mask)) { // If so, update the mask to reflect the inserted undef. Mask[InsertedIdx] = -1; return true; } } else if (ExtractElementInst *EI = dyn_cast(ScalarOp)){ if (isa(EI->getOperand(1))) { unsigned ExtractedIdx = cast(EI->getOperand(1))->getZExtValue(); unsigned NumLHSElts = cast(LHS->getType())->getNumElements(); // This must be extracting from either LHS or RHS. if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) { // We can handle this if the vector we are inserting into is // transitively ok. if (collectSingleShuffleElements(VecOp, LHS, RHS, Mask)) { // If so, update the mask to reflect the inserted value. if (EI->getOperand(0) == LHS) { Mask[InsertedIdx % NumElts] = ExtractedIdx; } else { assert(EI->getOperand(0) == RHS); Mask[InsertedIdx % NumElts] = ExtractedIdx + NumLHSElts; } return true; } } } } } return false; } /// If we have insertion into a vector that is wider than the vector that we /// are extracting from, try to widen the source vector to allow a single /// shufflevector to replace one or more insert/extract pairs. static void replaceExtractElements(InsertElementInst *InsElt, ExtractElementInst *ExtElt, InstCombiner &IC) { VectorType *InsVecType = InsElt->getType(); VectorType *ExtVecType = ExtElt->getVectorOperandType(); unsigned NumInsElts = InsVecType->getNumElements(); unsigned NumExtElts = ExtVecType->getNumElements(); // The inserted-to vector must be wider than the extracted-from vector. if (InsVecType->getElementType() != ExtVecType->getElementType() || NumExtElts >= NumInsElts) return; // Create a shuffle mask to widen the extended-from vector using undefined // values. The mask selects all of the values of the original vector followed // by as many undefined values as needed to create a vector of the same length // as the inserted-to vector. SmallVector ExtendMask; for (unsigned i = 0; i < NumExtElts; ++i) ExtendMask.push_back(i); for (unsigned i = NumExtElts; i < NumInsElts; ++i) ExtendMask.push_back(-1); Value *ExtVecOp = ExtElt->getVectorOperand(); auto *ExtVecOpInst = dyn_cast(ExtVecOp); BasicBlock *InsertionBlock = (ExtVecOpInst && !isa(ExtVecOpInst)) ? ExtVecOpInst->getParent() : ExtElt->getParent(); // TODO: This restriction matches the basic block check below when creating // new extractelement instructions. If that limitation is removed, this one // could also be removed. But for now, we just bail out to ensure that we // will replace the extractelement instruction that is feeding our // insertelement instruction. This allows the insertelement to then be // replaced by a shufflevector. If the insertelement is not replaced, we can // induce infinite looping because there's an optimization for extractelement // that will delete our widening shuffle. This would trigger another attempt // here to create that shuffle, and we spin forever. if (InsertionBlock != InsElt->getParent()) return; // TODO: This restriction matches the check in visitInsertElementInst() and // prevents an infinite loop caused by not turning the extract/insert pair // into a shuffle. We really should not need either check, but we're lacking // folds for shufflevectors because we're afraid to generate shuffle masks // that the backend can't handle. if (InsElt->hasOneUse() && isa(InsElt->user_back())) return; auto *WideVec = new ShuffleVectorInst(ExtVecOp, UndefValue::get(ExtVecType), ExtendMask); // Insert the new shuffle after the vector operand of the extract is defined // (as long as it's not a PHI) or at the start of the basic block of the // extract, so any subsequent extracts in the same basic block can use it. // TODO: Insert before the earliest ExtractElementInst that is replaced. if (ExtVecOpInst && !isa(ExtVecOpInst)) WideVec->insertAfter(ExtVecOpInst); else IC.InsertNewInstWith(WideVec, *ExtElt->getParent()->getFirstInsertionPt()); // Replace extracts from the original narrow vector with extracts from the new // wide vector. for (User *U : ExtVecOp->users()) { ExtractElementInst *OldExt = dyn_cast(U); if (!OldExt || OldExt->getParent() != WideVec->getParent()) continue; auto *NewExt = ExtractElementInst::Create(WideVec, OldExt->getOperand(1)); NewExt->insertAfter(OldExt); IC.replaceInstUsesWith(*OldExt, NewExt); } } /// We are building a shuffle to create V, which is a sequence of insertelement, /// extractelement pairs. If PermittedRHS is set, then we must either use it or /// not rely on the second vector source. Return a std::pair containing the /// left and right vectors of the proposed shuffle (or 0), and set the Mask /// parameter as required. /// /// Note: we intentionally don't try to fold earlier shuffles since they have /// often been chosen carefully to be efficiently implementable on the target. using ShuffleOps = std::pair; static ShuffleOps collectShuffleElements(Value *V, SmallVectorImpl &Mask, Value *PermittedRHS, InstCombiner &IC) { assert(V->getType()->isVectorTy() && "Invalid shuffle!"); unsigned NumElts = cast(V->getType())->getNumElements(); if (isa(V)) { Mask.assign(NumElts, -1); return std::make_pair( PermittedRHS ? UndefValue::get(PermittedRHS->getType()) : V, nullptr); } if (isa(V)) { Mask.assign(NumElts, 0); return std::make_pair(V, nullptr); } if (InsertElementInst *IEI = dyn_cast(V)) { // If this is an insert of an extract from some other vector, include it. Value *VecOp = IEI->getOperand(0); Value *ScalarOp = IEI->getOperand(1); Value *IdxOp = IEI->getOperand(2); if (ExtractElementInst *EI = dyn_cast(ScalarOp)) { if (isa(EI->getOperand(1)) && isa(IdxOp)) { unsigned ExtractedIdx = cast(EI->getOperand(1))->getZExtValue(); unsigned InsertedIdx = cast(IdxOp)->getZExtValue(); // Either the extracted from or inserted into vector must be RHSVec, // otherwise we'd end up with a shuffle of three inputs. if (EI->getOperand(0) == PermittedRHS || PermittedRHS == nullptr) { Value *RHS = EI->getOperand(0); ShuffleOps LR = collectShuffleElements(VecOp, Mask, RHS, IC); assert(LR.second == nullptr || LR.second == RHS); if (LR.first->getType() != RHS->getType()) { // Although we are giving up for now, see if we can create extracts // that match the inserts for another round of combining. replaceExtractElements(IEI, EI, IC); // We tried our best, but we can't find anything compatible with RHS // further up the chain. Return a trivial shuffle. for (unsigned i = 0; i < NumElts; ++i) Mask[i] = i; return std::make_pair(V, nullptr); } unsigned NumLHSElts = cast(RHS->getType())->getNumElements(); Mask[InsertedIdx % NumElts] = NumLHSElts + ExtractedIdx; return std::make_pair(LR.first, RHS); } if (VecOp == PermittedRHS) { // We've gone as far as we can: anything on the other side of the // extractelement will already have been converted into a shuffle. unsigned NumLHSElts = cast(EI->getOperand(0)->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) Mask.push_back(i == InsertedIdx ? ExtractedIdx : NumLHSElts + i); return std::make_pair(EI->getOperand(0), PermittedRHS); } // If this insertelement is a chain that comes from exactly these two // vectors, return the vector and the effective shuffle. if (EI->getOperand(0)->getType() == PermittedRHS->getType() && collectSingleShuffleElements(IEI, EI->getOperand(0), PermittedRHS, Mask)) return std::make_pair(EI->getOperand(0), PermittedRHS); } } } // Otherwise, we can't do anything fancy. Return an identity vector. for (unsigned i = 0; i != NumElts; ++i) Mask.push_back(i); return std::make_pair(V, nullptr); } /// Try to find redundant insertvalue instructions, like the following ones: /// %0 = insertvalue { i8, i32 } undef, i8 %x, 0 /// %1 = insertvalue { i8, i32 } %0, i8 %y, 0 /// Here the second instruction inserts values at the same indices, as the /// first one, making the first one redundant. /// It should be transformed to: /// %0 = insertvalue { i8, i32 } undef, i8 %y, 0 Instruction *InstCombiner::visitInsertValueInst(InsertValueInst &I) { bool IsRedundant = false; ArrayRef FirstIndices = I.getIndices(); // If there is a chain of insertvalue instructions (each of them except the // last one has only one use and it's another insertvalue insn from this // chain), check if any of the 'children' uses the same indices as the first // instruction. In this case, the first one is redundant. Value *V = &I; unsigned Depth = 0; while (V->hasOneUse() && Depth < 10) { User *U = V->user_back(); auto UserInsInst = dyn_cast(U); if (!UserInsInst || U->getOperand(0) != V) break; if (UserInsInst->getIndices() == FirstIndices) { IsRedundant = true; break; } V = UserInsInst; Depth++; } if (IsRedundant) return replaceInstUsesWith(I, I.getOperand(0)); return nullptr; } static bool isShuffleEquivalentToSelect(ShuffleVectorInst &Shuf) { // Can not analyze scalable type, the number of elements is not a compile-time // constant. if (isa(Shuf.getOperand(0)->getType())) return false; int MaskSize = Shuf.getShuffleMask().size(); int VecSize = cast(Shuf.getOperand(0)->getType())->getNumElements(); // A vector select does not change the size of the operands. if (MaskSize != VecSize) return false; // Each mask element must be undefined or choose a vector element from one of // the source operands without crossing vector lanes. for (int i = 0; i != MaskSize; ++i) { int Elt = Shuf.getMaskValue(i); if (Elt != -1 && Elt != i && Elt != i + VecSize) return false; } return true; } /// Turn a chain of inserts that splats a value into an insert + shuffle: /// insertelt(insertelt(insertelt(insertelt X, %k, 0), %k, 1), %k, 2) ... -> /// shufflevector(insertelt(X, %k, 0), undef, zero) static Instruction *foldInsSequenceIntoSplat(InsertElementInst &InsElt) { // We are interested in the last insert in a chain. So if this insert has a // single user and that user is an insert, bail. if (InsElt.hasOneUse() && isa(InsElt.user_back())) return nullptr; VectorType *VecTy = InsElt.getType(); // Can not handle scalable type, the number of elements is not a compile-time // constant. if (isa(VecTy)) return nullptr; unsigned NumElements = cast(VecTy)->getNumElements(); // Do not try to do this for a one-element vector, since that's a nop, // and will cause an inf-loop. if (NumElements == 1) return nullptr; Value *SplatVal = InsElt.getOperand(1); InsertElementInst *CurrIE = &InsElt; SmallBitVector ElementPresent(NumElements, false); InsertElementInst *FirstIE = nullptr; // Walk the chain backwards, keeping track of which indices we inserted into, // until we hit something that isn't an insert of the splatted value. while (CurrIE) { auto *Idx = dyn_cast(CurrIE->getOperand(2)); if (!Idx || CurrIE->getOperand(1) != SplatVal) return nullptr; auto *NextIE = dyn_cast(CurrIE->getOperand(0)); // Check none of the intermediate steps have any additional uses, except // for the root insertelement instruction, which can be re-used, if it // inserts at position 0. if (CurrIE != &InsElt && (!CurrIE->hasOneUse() && (NextIE != nullptr || !Idx->isZero()))) return nullptr; ElementPresent[Idx->getZExtValue()] = true; FirstIE = CurrIE; CurrIE = NextIE; } // If this is just a single insertelement (not a sequence), we are done. if (FirstIE == &InsElt) return nullptr; // If we are not inserting into an undef vector, make sure we've seen an // insert into every element. // TODO: If the base vector is not undef, it might be better to create a splat // and then a select-shuffle (blend) with the base vector. if (!isa(FirstIE->getOperand(0))) if (!ElementPresent.all()) return nullptr; // Create the insert + shuffle. Type *Int32Ty = Type::getInt32Ty(InsElt.getContext()); UndefValue *UndefVec = UndefValue::get(VecTy); Constant *Zero = ConstantInt::get(Int32Ty, 0); if (!cast(FirstIE->getOperand(2))->isZero()) FirstIE = InsertElementInst::Create(UndefVec, SplatVal, Zero, "", &InsElt); // Splat from element 0, but replace absent elements with undef in the mask. SmallVector Mask(NumElements, 0); for (unsigned i = 0; i != NumElements; ++i) if (!ElementPresent[i]) Mask[i] = -1; return new ShuffleVectorInst(FirstIE, UndefVec, Mask); } /// Try to fold an insert element into an existing splat shuffle by changing /// the shuffle's mask to include the index of this insert element. static Instruction *foldInsEltIntoSplat(InsertElementInst &InsElt) { // Check if the vector operand of this insert is a canonical splat shuffle. auto *Shuf = dyn_cast(InsElt.getOperand(0)); if (!Shuf || !Shuf->isZeroEltSplat()) return nullptr; // Bail out early if shuffle is scalable type. The number of elements in // shuffle mask is unknown at compile-time. if (isa(Shuf->getType())) return nullptr; // Check for a constant insertion index. uint64_t IdxC; if (!match(InsElt.getOperand(2), m_ConstantInt(IdxC))) return nullptr; // Check if the splat shuffle's input is the same as this insert's scalar op. Value *X = InsElt.getOperand(1); Value *Op0 = Shuf->getOperand(0); if (!match(Op0, m_InsertElement(m_Undef(), m_Specific(X), m_ZeroInt()))) return nullptr; // Replace the shuffle mask element at the index of this insert with a zero. // For example: // inselt (shuf (inselt undef, X, 0), undef, <0,undef,0,undef>), X, 1 // --> shuf (inselt undef, X, 0), undef, <0,0,0,undef> unsigned NumMaskElts = Shuf->getType()->getNumElements(); SmallVector NewMask(NumMaskElts); for (unsigned i = 0; i != NumMaskElts; ++i) NewMask[i] = i == IdxC ? 0 : Shuf->getMaskValue(i); return new ShuffleVectorInst(Op0, UndefValue::get(Op0->getType()), NewMask); } /// Try to fold an extract+insert element into an existing identity shuffle by /// changing the shuffle's mask to include the index of this insert element. static Instruction *foldInsEltIntoIdentityShuffle(InsertElementInst &InsElt) { // Check if the vector operand of this insert is an identity shuffle. auto *Shuf = dyn_cast(InsElt.getOperand(0)); if (!Shuf || !isa(Shuf->getOperand(1)) || !(Shuf->isIdentityWithExtract() || Shuf->isIdentityWithPadding())) return nullptr; // Bail out early if shuffle is scalable type. The number of elements in // shuffle mask is unknown at compile-time. if (isa(Shuf->getType())) return nullptr; // Check for a constant insertion index. uint64_t IdxC; if (!match(InsElt.getOperand(2), m_ConstantInt(IdxC))) return nullptr; // Check if this insert's scalar op is extracted from the identity shuffle's // input vector. Value *Scalar = InsElt.getOperand(1); Value *X = Shuf->getOperand(0); if (!match(Scalar, m_ExtractElement(m_Specific(X), m_SpecificInt(IdxC)))) return nullptr; // Replace the shuffle mask element at the index of this extract+insert with // that same index value. // For example: // inselt (shuf X, IdMask), (extelt X, IdxC), IdxC --> shuf X, IdMask' unsigned NumMaskElts = Shuf->getType()->getNumElements(); SmallVector NewMask(NumMaskElts); ArrayRef OldMask = Shuf->getShuffleMask(); for (unsigned i = 0; i != NumMaskElts; ++i) { if (i != IdxC) { // All mask elements besides the inserted element remain the same. NewMask[i] = OldMask[i]; } else if (OldMask[i] == (int)IdxC) { // If the mask element was already set, there's nothing to do // (demanded elements analysis may unset it later). return nullptr; } else { assert(OldMask[i] == UndefMaskElem && "Unexpected shuffle mask element for identity shuffle"); NewMask[i] = IdxC; } } return new ShuffleVectorInst(X, Shuf->getOperand(1), NewMask); } /// If we have an insertelement instruction feeding into another insertelement /// and the 2nd is inserting a constant into the vector, canonicalize that /// constant insertion before the insertion of a variable: /// /// insertelement (insertelement X, Y, IdxC1), ScalarC, IdxC2 --> /// insertelement (insertelement X, ScalarC, IdxC2), Y, IdxC1 /// /// This has the potential of eliminating the 2nd insertelement instruction /// via constant folding of the scalar constant into a vector constant. static Instruction *hoistInsEltConst(InsertElementInst &InsElt2, InstCombiner::BuilderTy &Builder) { auto *InsElt1 = dyn_cast(InsElt2.getOperand(0)); if (!InsElt1 || !InsElt1->hasOneUse()) return nullptr; Value *X, *Y; Constant *ScalarC; ConstantInt *IdxC1, *IdxC2; if (match(InsElt1->getOperand(0), m_Value(X)) && match(InsElt1->getOperand(1), m_Value(Y)) && !isa(Y) && match(InsElt1->getOperand(2), m_ConstantInt(IdxC1)) && match(InsElt2.getOperand(1), m_Constant(ScalarC)) && match(InsElt2.getOperand(2), m_ConstantInt(IdxC2)) && IdxC1 != IdxC2) { Value *NewInsElt1 = Builder.CreateInsertElement(X, ScalarC, IdxC2); return InsertElementInst::Create(NewInsElt1, Y, IdxC1); } return nullptr; } /// insertelt (shufflevector X, CVec, Mask|insertelt X, C1, CIndex1), C, CIndex /// --> shufflevector X, CVec', Mask' static Instruction *foldConstantInsEltIntoShuffle(InsertElementInst &InsElt) { auto *Inst = dyn_cast(InsElt.getOperand(0)); // Bail out if the parent has more than one use. In that case, we'd be // replacing the insertelt with a shuffle, and that's not a clear win. if (!Inst || !Inst->hasOneUse()) return nullptr; if (auto *Shuf = dyn_cast(InsElt.getOperand(0))) { // The shuffle must have a constant vector operand. The insertelt must have // a constant scalar being inserted at a constant position in the vector. Constant *ShufConstVec, *InsEltScalar; uint64_t InsEltIndex; if (!match(Shuf->getOperand(1), m_Constant(ShufConstVec)) || !match(InsElt.getOperand(1), m_Constant(InsEltScalar)) || !match(InsElt.getOperand(2), m_ConstantInt(InsEltIndex))) return nullptr; // Adding an element to an arbitrary shuffle could be expensive, but a // shuffle that selects elements from vectors without crossing lanes is // assumed cheap. // If we're just adding a constant into that shuffle, it will still be // cheap. if (!isShuffleEquivalentToSelect(*Shuf)) return nullptr; // From the above 'select' check, we know that the mask has the same number // of elements as the vector input operands. We also know that each constant // input element is used in its lane and can not be used more than once by // the shuffle. Therefore, replace the constant in the shuffle's constant // vector with the insertelt constant. Replace the constant in the shuffle's // mask vector with the insertelt index plus the length of the vector // (because the constant vector operand of a shuffle is always the 2nd // operand). ArrayRef Mask = Shuf->getShuffleMask(); unsigned NumElts = Mask.size(); SmallVector NewShufElts(NumElts); SmallVector NewMaskElts(NumElts); for (unsigned I = 0; I != NumElts; ++I) { if (I == InsEltIndex) { NewShufElts[I] = InsEltScalar; NewMaskElts[I] = InsEltIndex + NumElts; } else { // Copy over the existing values. NewShufElts[I] = ShufConstVec->getAggregateElement(I); NewMaskElts[I] = Mask[I]; } } // Create new operands for a shuffle that includes the constant of the // original insertelt. The old shuffle will be dead now. return new ShuffleVectorInst(Shuf->getOperand(0), ConstantVector::get(NewShufElts), NewMaskElts); } else if (auto *IEI = dyn_cast(Inst)) { // Transform sequences of insertelements ops with constant data/indexes into // a single shuffle op. // Can not handle scalable type, the number of elements needed to create // shuffle mask is not a compile-time constant. if (isa(InsElt.getType())) return nullptr; unsigned NumElts = cast(InsElt.getType())->getNumElements(); uint64_t InsertIdx[2]; Constant *Val[2]; if (!match(InsElt.getOperand(2), m_ConstantInt(InsertIdx[0])) || !match(InsElt.getOperand(1), m_Constant(Val[0])) || !match(IEI->getOperand(2), m_ConstantInt(InsertIdx[1])) || !match(IEI->getOperand(1), m_Constant(Val[1]))) return nullptr; SmallVector Values(NumElts); SmallVector Mask(NumElts); auto ValI = std::begin(Val); // Generate new constant vector and mask. // We have 2 values/masks from the insertelements instructions. Insert them // into new value/mask vectors. for (uint64_t I : InsertIdx) { if (!Values[I]) { Values[I] = *ValI; Mask[I] = NumElts + I; } ++ValI; } // Remaining values are filled with 'undef' values. for (unsigned I = 0; I < NumElts; ++I) { if (!Values[I]) { Values[I] = UndefValue::get(InsElt.getType()->getElementType()); Mask[I] = I; } } // Create new operands for a shuffle that includes the constant of the // original insertelt. return new ShuffleVectorInst(IEI->getOperand(0), ConstantVector::get(Values), Mask); } return nullptr; } Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) { Value *VecOp = IE.getOperand(0); Value *ScalarOp = IE.getOperand(1); Value *IdxOp = IE.getOperand(2); if (auto *V = SimplifyInsertElementInst( VecOp, ScalarOp, IdxOp, SQ.getWithInstruction(&IE))) return replaceInstUsesWith(IE, V); // If the vector and scalar are both bitcast from the same element type, do // the insert in that source type followed by bitcast. Value *VecSrc, *ScalarSrc; if (match(VecOp, m_BitCast(m_Value(VecSrc))) && match(ScalarOp, m_BitCast(m_Value(ScalarSrc))) && (VecOp->hasOneUse() || ScalarOp->hasOneUse()) && VecSrc->getType()->isVectorTy() && !ScalarSrc->getType()->isVectorTy() && cast(VecSrc->getType())->getElementType() == ScalarSrc->getType()) { // inselt (bitcast VecSrc), (bitcast ScalarSrc), IdxOp --> // bitcast (inselt VecSrc, ScalarSrc, IdxOp) Value *NewInsElt = Builder.CreateInsertElement(VecSrc, ScalarSrc, IdxOp); return new BitCastInst(NewInsElt, IE.getType()); } // If the inserted element was extracted from some other fixed-length vector // and both indexes are valid constants, try to turn this into a shuffle. // Can not handle scalable vector type, the number of elements needed to // create shuffle mask is not a compile-time constant. uint64_t InsertedIdx, ExtractedIdx; Value *ExtVecOp; if (isa(IE.getType()) && match(IdxOp, m_ConstantInt(InsertedIdx)) && match(ScalarOp, m_ExtractElement(m_Value(ExtVecOp), m_ConstantInt(ExtractedIdx))) && isa(ExtVecOp->getType()) && ExtractedIdx < cast(ExtVecOp->getType())->getNumElements()) { // TODO: Looking at the user(s) to determine if this insert is a // fold-to-shuffle opportunity does not match the usual instcombine // constraints. We should decide if the transform is worthy based only // on this instruction and its operands, but that may not work currently. // // Here, we are trying to avoid creating shuffles before reaching // the end of a chain of extract-insert pairs. This is complicated because // we do not generally form arbitrary shuffle masks in instcombine // (because those may codegen poorly), but collectShuffleElements() does // exactly that. // // The rules for determining what is an acceptable target-independent // shuffle mask are fuzzy because they evolve based on the backend's // capabilities and real-world impact. auto isShuffleRootCandidate = [](InsertElementInst &Insert) { if (!Insert.hasOneUse()) return true; auto *InsertUser = dyn_cast(Insert.user_back()); if (!InsertUser) return true; return false; }; // Try to form a shuffle from a chain of extract-insert ops. if (isShuffleRootCandidate(IE)) { SmallVector Mask; ShuffleOps LR = collectShuffleElements(&IE, Mask, nullptr, *this); // The proposed shuffle may be trivial, in which case we shouldn't // perform the combine. if (LR.first != &IE && LR.second != &IE) { // We now have a shuffle of LHS, RHS, Mask. if (LR.second == nullptr) LR.second = UndefValue::get(LR.first->getType()); return new ShuffleVectorInst(LR.first, LR.second, Mask); } } } if (auto VecTy = dyn_cast(VecOp->getType())) { unsigned VWidth = VecTy->getNumElements(); APInt UndefElts(VWidth, 0); APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); if (Value *V = SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts)) { if (V != &IE) return replaceInstUsesWith(IE, V); return &IE; } } if (Instruction *Shuf = foldConstantInsEltIntoShuffle(IE)) return Shuf; if (Instruction *NewInsElt = hoistInsEltConst(IE, Builder)) return NewInsElt; if (Instruction *Broadcast = foldInsSequenceIntoSplat(IE)) return Broadcast; if (Instruction *Splat = foldInsEltIntoSplat(IE)) return Splat; if (Instruction *IdentityShuf = foldInsEltIntoIdentityShuffle(IE)) return IdentityShuf; return nullptr; } /// Return true if we can evaluate the specified expression tree if the vector /// elements were shuffled in a different order. static bool canEvaluateShuffled(Value *V, ArrayRef Mask, unsigned Depth = 5) { // We can always reorder the elements of a constant. if (isa(V)) return true; // We won't reorder vector arguments. No IPO here. Instruction *I = dyn_cast(V); if (!I) return false; // Two users may expect different orders of the elements. Don't try it. if (!I->hasOneUse()) return false; if (Depth == 0) return false; switch (I->getOpcode()) { case Instruction::UDiv: case Instruction::SDiv: case Instruction::URem: case Instruction::SRem: // Propagating an undefined shuffle mask element to integer div/rem is not // allowed because those opcodes can create immediate undefined behavior // from an undefined element in an operand. if (llvm::any_of(Mask, [](int M){ return M == -1; })) return false; LLVM_FALLTHROUGH; case Instruction::Add: case Instruction::FAdd: case Instruction::Sub: case Instruction::FSub: case Instruction::Mul: case Instruction::FMul: case Instruction::FDiv: case Instruction::FRem: case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: case Instruction::And: case Instruction::Or: case Instruction::Xor: case Instruction::ICmp: case Instruction::FCmp: case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::UIToFP: case Instruction::SIToFP: case Instruction::FPTrunc: case Instruction::FPExt: case Instruction::GetElementPtr: { // Bail out if we would create longer vector ops. We could allow creating // longer vector ops, but that may result in more expensive codegen. Type *ITy = I->getType(); if (ITy->isVectorTy() && Mask.size() > cast(ITy)->getNumElements()) return false; for (Value *Operand : I->operands()) { if (!canEvaluateShuffled(Operand, Mask, Depth - 1)) return false; } return true; } case Instruction::InsertElement: { ConstantInt *CI = dyn_cast(I->getOperand(2)); if (!CI) return false; int ElementNumber = CI->getLimitedValue(); // Verify that 'CI' does not occur twice in Mask. A single 'insertelement' // can't put an element into multiple indices. bool SeenOnce = false; for (int i = 0, e = Mask.size(); i != e; ++i) { if (Mask[i] == ElementNumber) { if (SeenOnce) return false; SeenOnce = true; } } return canEvaluateShuffled(I->getOperand(0), Mask, Depth - 1); } } return false; } /// Rebuild a new instruction just like 'I' but with the new operands given. /// In the event of type mismatch, the type of the operands is correct. static Value *buildNew(Instruction *I, ArrayRef NewOps) { // We don't want to use the IRBuilder here because we want the replacement // instructions to appear next to 'I', not the builder's insertion point. switch (I->getOpcode()) { case Instruction::Add: case Instruction::FAdd: case Instruction::Sub: case Instruction::FSub: case Instruction::Mul: case Instruction::FMul: case Instruction::UDiv: case Instruction::SDiv: case Instruction::FDiv: case Instruction::URem: case Instruction::SRem: case Instruction::FRem: case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: case Instruction::And: case Instruction::Or: case Instruction::Xor: { BinaryOperator *BO = cast(I); assert(NewOps.size() == 2 && "binary operator with #ops != 2"); BinaryOperator *New = BinaryOperator::Create(cast(I)->getOpcode(), NewOps[0], NewOps[1], "", BO); if (isa(BO)) { New->setHasNoUnsignedWrap(BO->hasNoUnsignedWrap()); New->setHasNoSignedWrap(BO->hasNoSignedWrap()); } if (isa(BO)) { New->setIsExact(BO->isExact()); } if (isa(BO)) New->copyFastMathFlags(I); return New; } case Instruction::ICmp: assert(NewOps.size() == 2 && "icmp with #ops != 2"); return new ICmpInst(I, cast(I)->getPredicate(), NewOps[0], NewOps[1]); case Instruction::FCmp: assert(NewOps.size() == 2 && "fcmp with #ops != 2"); return new FCmpInst(I, cast(I)->getPredicate(), NewOps[0], NewOps[1]); case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::UIToFP: case Instruction::SIToFP: case Instruction::FPTrunc: case Instruction::FPExt: { // It's possible that the mask has a different number of elements from // the original cast. We recompute the destination type to match the mask. Type *DestTy = VectorType::get( I->getType()->getScalarType(), cast(NewOps[0]->getType())->getElementCount()); assert(NewOps.size() == 1 && "cast with #ops != 1"); return CastInst::Create(cast(I)->getOpcode(), NewOps[0], DestTy, "", I); } case Instruction::GetElementPtr: { Value *Ptr = NewOps[0]; ArrayRef Idx = NewOps.slice(1); GetElementPtrInst *GEP = GetElementPtrInst::Create( cast(I)->getSourceElementType(), Ptr, Idx, "", I); GEP->setIsInBounds(cast(I)->isInBounds()); return GEP; } } llvm_unreachable("failed to rebuild vector instructions"); } static Value *evaluateInDifferentElementOrder(Value *V, ArrayRef Mask) { // Mask.size() does not need to be equal to the number of vector elements. assert(V->getType()->isVectorTy() && "can't reorder non-vector elements"); Type *EltTy = V->getType()->getScalarType(); Type *I32Ty = IntegerType::getInt32Ty(V->getContext()); if (isa(V)) return UndefValue::get(VectorType::get(EltTy, Mask.size())); if (isa(V)) return ConstantAggregateZero::get(VectorType::get(EltTy, Mask.size())); if (Constant *C = dyn_cast(V)) return ConstantExpr::getShuffleVector(C, UndefValue::get(C->getType()), Mask); Instruction *I = cast(V); switch (I->getOpcode()) { case Instruction::Add: case Instruction::FAdd: case Instruction::Sub: case Instruction::FSub: case Instruction::Mul: case Instruction::FMul: case Instruction::UDiv: case Instruction::SDiv: case Instruction::FDiv: case Instruction::URem: case Instruction::SRem: case Instruction::FRem: case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: case Instruction::And: case Instruction::Or: case Instruction::Xor: case Instruction::ICmp: case Instruction::FCmp: case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::UIToFP: case Instruction::SIToFP: case Instruction::FPTrunc: case Instruction::FPExt: case Instruction::Select: case Instruction::GetElementPtr: { SmallVector NewOps; bool NeedsRebuild = (Mask.size() != cast(I->getType())->getNumElements()); for (int i = 0, e = I->getNumOperands(); i != e; ++i) { Value *V; // Recursively call evaluateInDifferentElementOrder on vector arguments // as well. E.g. GetElementPtr may have scalar operands even if the // return value is a vector, so we need to examine the operand type. if (I->getOperand(i)->getType()->isVectorTy()) V = evaluateInDifferentElementOrder(I->getOperand(i), Mask); else V = I->getOperand(i); NewOps.push_back(V); NeedsRebuild |= (V != I->getOperand(i)); } if (NeedsRebuild) { return buildNew(I, NewOps); } return I; } case Instruction::InsertElement: { int Element = cast(I->getOperand(2))->getLimitedValue(); // The insertelement was inserting at Element. Figure out which element // that becomes after shuffling. The answer is guaranteed to be unique // by CanEvaluateShuffled. bool Found = false; int Index = 0; for (int e = Mask.size(); Index != e; ++Index) { if (Mask[Index] == Element) { Found = true; break; } } // If element is not in Mask, no need to handle the operand 1 (element to // be inserted). Just evaluate values in operand 0 according to Mask. if (!Found) return evaluateInDifferentElementOrder(I->getOperand(0), Mask); Value *V = evaluateInDifferentElementOrder(I->getOperand(0), Mask); return InsertElementInst::Create(V, I->getOperand(1), ConstantInt::get(I32Ty, Index), "", I); } } llvm_unreachable("failed to reorder elements of vector instruction!"); } // Returns true if the shuffle is extracting a contiguous range of values from // LHS, for example: // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ // Input: |AA|BB|CC|DD|EE|FF|GG|HH|II|JJ|KK|LL|MM|NN|OO|PP| // Shuffles to: |EE|FF|GG|HH| // +--+--+--+--+ static bool isShuffleExtractingFromLHS(ShuffleVectorInst &SVI, ArrayRef Mask) { unsigned LHSElems = cast(SVI.getOperand(0)->getType())->getNumElements(); unsigned MaskElems = Mask.size(); unsigned BegIdx = Mask.front(); unsigned EndIdx = Mask.back(); if (BegIdx > EndIdx || EndIdx >= LHSElems || EndIdx - BegIdx != MaskElems - 1) return false; for (unsigned I = 0; I != MaskElems; ++I) if (static_cast(Mask[I]) != BegIdx + I) return false; return true; } /// These are the ingredients in an alternate form binary operator as described /// below. struct BinopElts { BinaryOperator::BinaryOps Opcode; Value *Op0; Value *Op1; BinopElts(BinaryOperator::BinaryOps Opc = (BinaryOperator::BinaryOps)0, Value *V0 = nullptr, Value *V1 = nullptr) : Opcode(Opc), Op0(V0), Op1(V1) {} operator bool() const { return Opcode != 0; } }; /// Binops may be transformed into binops with different opcodes and operands. /// Reverse the usual canonicalization to enable folds with the non-canonical /// form of the binop. If a transform is possible, return the elements of the /// new binop. If not, return invalid elements. static BinopElts getAlternateBinop(BinaryOperator *BO, const DataLayout &DL) { Value *BO0 = BO->getOperand(0), *BO1 = BO->getOperand(1); Type *Ty = BO->getType(); switch (BO->getOpcode()) { case Instruction::Shl: { // shl X, C --> mul X, (1 << C) Constant *C; if (match(BO1, m_Constant(C))) { Constant *ShlOne = ConstantExpr::getShl(ConstantInt::get(Ty, 1), C); return { Instruction::Mul, BO0, ShlOne }; } break; } case Instruction::Or: { // or X, C --> add X, C (when X and C have no common bits set) const APInt *C; if (match(BO1, m_APInt(C)) && MaskedValueIsZero(BO0, *C, DL)) return { Instruction::Add, BO0, BO1 }; break; } default: break; } return {}; } static Instruction *foldSelectShuffleWith1Binop(ShuffleVectorInst &Shuf) { assert(Shuf.isSelect() && "Must have select-equivalent shuffle"); // Are we shuffling together some value and that same value after it has been // modified by a binop with a constant? Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1); Constant *C; bool Op0IsBinop; if (match(Op0, m_BinOp(m_Specific(Op1), m_Constant(C)))) Op0IsBinop = true; else if (match(Op1, m_BinOp(m_Specific(Op0), m_Constant(C)))) Op0IsBinop = false; else return nullptr; // The identity constant for a binop leaves a variable operand unchanged. For // a vector, this is a splat of something like 0, -1, or 1. // If there's no identity constant for this binop, we're done. auto *BO = cast(Op0IsBinop ? Op0 : Op1); BinaryOperator::BinaryOps BOpcode = BO->getOpcode(); Constant *IdC = ConstantExpr::getBinOpIdentity(BOpcode, Shuf.getType(), true); if (!IdC) return nullptr; // Shuffle identity constants into the lanes that return the original value. // Example: shuf (mul X, {-1,-2,-3,-4}), X, {0,5,6,3} --> mul X, {-1,1,1,-4} // Example: shuf X, (add X, {-1,-2,-3,-4}), {0,1,6,7} --> add X, {0,0,-3,-4} // The existing binop constant vector remains in the same operand position. ArrayRef Mask = Shuf.getShuffleMask(); Constant *NewC = Op0IsBinop ? ConstantExpr::getShuffleVector(C, IdC, Mask) : ConstantExpr::getShuffleVector(IdC, C, Mask); bool MightCreatePoisonOrUB = is_contained(Mask, UndefMaskElem) && (Instruction::isIntDivRem(BOpcode) || Instruction::isShift(BOpcode)); if (MightCreatePoisonOrUB) NewC = getSafeVectorConstantForBinop(BOpcode, NewC, true); // shuf (bop X, C), X, M --> bop X, C' // shuf X, (bop X, C), M --> bop X, C' Value *X = Op0IsBinop ? Op1 : Op0; Instruction *NewBO = BinaryOperator::Create(BOpcode, X, NewC); NewBO->copyIRFlags(BO); // An undef shuffle mask element may propagate as an undef constant element in // the new binop. That would produce poison where the original code might not. // If we already made a safe constant, then there's no danger. if (is_contained(Mask, UndefMaskElem) && !MightCreatePoisonOrUB) NewBO->dropPoisonGeneratingFlags(); return NewBO; } /// If we have an insert of a scalar to a non-zero element of an undefined /// vector and then shuffle that value, that's the same as inserting to the zero /// element and shuffling. Splatting from the zero element is recognized as the /// canonical form of splat. static Instruction *canonicalizeInsertSplat(ShuffleVectorInst &Shuf, InstCombiner::BuilderTy &Builder) { Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1); ArrayRef Mask = Shuf.getShuffleMask(); Value *X; uint64_t IndexC; // Match a shuffle that is a splat to a non-zero element. if (!match(Op0, m_OneUse(m_InsertElement(m_Undef(), m_Value(X), m_ConstantInt(IndexC)))) || !match(Op1, m_Undef()) || match(Mask, m_ZeroMask()) || IndexC == 0) return nullptr; // Insert into element 0 of an undef vector. UndefValue *UndefVec = UndefValue::get(Shuf.getType()); Constant *Zero = Builder.getInt32(0); Value *NewIns = Builder.CreateInsertElement(UndefVec, X, Zero); // Splat from element 0. Any mask element that is undefined remains undefined. // For example: // shuf (inselt undef, X, 2), undef, <2,2,undef> // --> shuf (inselt undef, X, 0), undef, <0,0,undef> unsigned NumMaskElts = Shuf.getType()->getNumElements(); SmallVector NewMask(NumMaskElts, 0); for (unsigned i = 0; i != NumMaskElts; ++i) if (Mask[i] == UndefMaskElem) NewMask[i] = Mask[i]; return new ShuffleVectorInst(NewIns, UndefVec, NewMask); } /// Try to fold shuffles that are the equivalent of a vector select. static Instruction *foldSelectShuffle(ShuffleVectorInst &Shuf, InstCombiner::BuilderTy &Builder, const DataLayout &DL) { if (!Shuf.isSelect()) return nullptr; // Canonicalize to choose from operand 0 first unless operand 1 is undefined. // Commuting undef to operand 0 conflicts with another canonicalization. unsigned NumElts = Shuf.getType()->getNumElements(); if (!isa(Shuf.getOperand(1)) && Shuf.getMaskValue(0) >= (int)NumElts) { // TODO: Can we assert that both operands of a shuffle-select are not undef // (otherwise, it would have been folded by instsimplify? Shuf.commute(); return &Shuf; } if (Instruction *I = foldSelectShuffleWith1Binop(Shuf)) return I; BinaryOperator *B0, *B1; if (!match(Shuf.getOperand(0), m_BinOp(B0)) || !match(Shuf.getOperand(1), m_BinOp(B1))) return nullptr; Value *X, *Y; Constant *C0, *C1; bool ConstantsAreOp1; if (match(B0, m_BinOp(m_Value(X), m_Constant(C0))) && match(B1, m_BinOp(m_Value(Y), m_Constant(C1)))) ConstantsAreOp1 = true; else if (match(B0, m_BinOp(m_Constant(C0), m_Value(X))) && match(B1, m_BinOp(m_Constant(C1), m_Value(Y)))) ConstantsAreOp1 = false; else return nullptr; // We need matching binops to fold the lanes together. BinaryOperator::BinaryOps Opc0 = B0->getOpcode(); BinaryOperator::BinaryOps Opc1 = B1->getOpcode(); bool DropNSW = false; if (ConstantsAreOp1 && Opc0 != Opc1) { // TODO: We drop "nsw" if shift is converted into multiply because it may // not be correct when the shift amount is BitWidth - 1. We could examine // each vector element to determine if it is safe to keep that flag. if (Opc0 == Instruction::Shl || Opc1 == Instruction::Shl) DropNSW = true; if (BinopElts AltB0 = getAlternateBinop(B0, DL)) { assert(isa(AltB0.Op1) && "Expecting constant with alt binop"); Opc0 = AltB0.Opcode; C0 = cast(AltB0.Op1); } else if (BinopElts AltB1 = getAlternateBinop(B1, DL)) { assert(isa(AltB1.Op1) && "Expecting constant with alt binop"); Opc1 = AltB1.Opcode; C1 = cast(AltB1.Op1); } } if (Opc0 != Opc1) return nullptr; // The opcodes must be the same. Use a new name to make that clear. BinaryOperator::BinaryOps BOpc = Opc0; // Select the constant elements needed for the single binop. ArrayRef Mask = Shuf.getShuffleMask(); Constant *NewC = ConstantExpr::getShuffleVector(C0, C1, Mask); // We are moving a binop after a shuffle. When a shuffle has an undefined // mask element, the result is undefined, but it is not poison or undefined // behavior. That is not necessarily true for div/rem/shift. bool MightCreatePoisonOrUB = is_contained(Mask, UndefMaskElem) && (Instruction::isIntDivRem(BOpc) || Instruction::isShift(BOpc)); if (MightCreatePoisonOrUB) NewC = getSafeVectorConstantForBinop(BOpc, NewC, ConstantsAreOp1); Value *V; if (X == Y) { // Remove a binop and the shuffle by rearranging the constant: // shuffle (op V, C0), (op V, C1), M --> op V, C' // shuffle (op C0, V), (op C1, V), M --> op C', V V = X; } else { // If there are 2 different variable operands, we must create a new shuffle // (select) first, so check uses to ensure that we don't end up with more // instructions than we started with. if (!B0->hasOneUse() && !B1->hasOneUse()) return nullptr; // If we use the original shuffle mask and op1 is *variable*, we would be // putting an undef into operand 1 of div/rem/shift. This is either UB or // poison. We do not have to guard against UB when *constants* are op1 // because safe constants guarantee that we do not overflow sdiv/srem (and // there's no danger for other opcodes). // TODO: To allow this case, create a new shuffle mask with no undefs. if (MightCreatePoisonOrUB && !ConstantsAreOp1) return nullptr; // Note: In general, we do not create new shuffles in InstCombine because we // do not know if a target can lower an arbitrary shuffle optimally. In this // case, the shuffle uses the existing mask, so there is no additional risk. // Select the variable vectors first, then perform the binop: // shuffle (op X, C0), (op Y, C1), M --> op (shuffle X, Y, M), C' // shuffle (op C0, X), (op C1, Y), M --> op C', (shuffle X, Y, M) V = Builder.CreateShuffleVector(X, Y, Mask); } Instruction *NewBO = ConstantsAreOp1 ? BinaryOperator::Create(BOpc, V, NewC) : BinaryOperator::Create(BOpc, NewC, V); // Flags are intersected from the 2 source binops. But there are 2 exceptions: // 1. If we changed an opcode, poison conditions might have changed. // 2. If the shuffle had undef mask elements, the new binop might have undefs // where the original code did not. But if we already made a safe constant, // then there's no danger. NewBO->copyIRFlags(B0); NewBO->andIRFlags(B1); if (DropNSW) NewBO->setHasNoSignedWrap(false); if (is_contained(Mask, UndefMaskElem) && !MightCreatePoisonOrUB) NewBO->dropPoisonGeneratingFlags(); return NewBO; } /// Convert a narrowing shuffle of a bitcasted vector into a vector truncate. /// Example (little endian): /// shuf (bitcast <4 x i16> X to <8 x i8>), <0, 2, 4, 6> --> trunc X to <4 x i8> static Instruction *foldTruncShuffle(ShuffleVectorInst &Shuf, bool IsBigEndian) { // This must be a bitcasted shuffle of 1 vector integer operand. Type *DestType = Shuf.getType(); Value *X; if (!match(Shuf.getOperand(0), m_BitCast(m_Value(X))) || !match(Shuf.getOperand(1), m_Undef()) || !DestType->isIntOrIntVectorTy()) return nullptr; // The source type must have the same number of elements as the shuffle, // and the source element type must be larger than the shuffle element type. Type *SrcType = X->getType(); if (!SrcType->isVectorTy() || !SrcType->isIntOrIntVectorTy() || cast(SrcType)->getNumElements() != cast(DestType)->getNumElements() || SrcType->getScalarSizeInBits() % DestType->getScalarSizeInBits() != 0) return nullptr; assert(Shuf.changesLength() && !Shuf.increasesLength() && "Expected a shuffle that decreases length"); // Last, check that the mask chooses the correct low bits for each narrow // element in the result. uint64_t TruncRatio = SrcType->getScalarSizeInBits() / DestType->getScalarSizeInBits(); ArrayRef Mask = Shuf.getShuffleMask(); for (unsigned i = 0, e = Mask.size(); i != e; ++i) { if (Mask[i] == UndefMaskElem) continue; uint64_t LSBIndex = IsBigEndian ? (i + 1) * TruncRatio - 1 : i * TruncRatio; assert(LSBIndex <= std::numeric_limits::max() && "Overflowed 32-bits"); if (Mask[i] != (int)LSBIndex) return nullptr; } return new TruncInst(X, DestType); } /// Match a shuffle-select-shuffle pattern where the shuffles are widening and /// narrowing (concatenating with undef and extracting back to the original /// length). This allows replacing the wide select with a narrow select. static Instruction *narrowVectorSelect(ShuffleVectorInst &Shuf, InstCombiner::BuilderTy &Builder) { // This must be a narrowing identity shuffle. It extracts the 1st N elements // of the 1st vector operand of a shuffle. if (!match(Shuf.getOperand(1), m_Undef()) || !Shuf.isIdentityWithExtract()) return nullptr; // The vector being shuffled must be a vector select that we can eliminate. // TODO: The one-use requirement could be eased if X and/or Y are constants. Value *Cond, *X, *Y; if (!match(Shuf.getOperand(0), m_OneUse(m_Select(m_Value(Cond), m_Value(X), m_Value(Y))))) return nullptr; // We need a narrow condition value. It must be extended with undef elements // and have the same number of elements as this shuffle. unsigned NarrowNumElts = Shuf.getType()->getNumElements(); Value *NarrowCond; if (!match(Cond, m_OneUse(m_ShuffleVector(m_Value(NarrowCond), m_Undef()))) || cast(NarrowCond->getType())->getNumElements() != NarrowNumElts || !cast(Cond)->isIdentityWithPadding()) return nullptr; // shuf (sel (shuf NarrowCond, undef, WideMask), X, Y), undef, NarrowMask) --> // sel NarrowCond, (shuf X, undef, NarrowMask), (shuf Y, undef, NarrowMask) Value *Undef = UndefValue::get(X->getType()); Value *NarrowX = Builder.CreateShuffleVector(X, Undef, Shuf.getShuffleMask()); Value *NarrowY = Builder.CreateShuffleVector(Y, Undef, Shuf.getShuffleMask()); return SelectInst::Create(NarrowCond, NarrowX, NarrowY); } /// Try to combine 2 shuffles into 1 shuffle by concatenating a shuffle mask. static Instruction *foldIdentityExtractShuffle(ShuffleVectorInst &Shuf) { Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1); if (!Shuf.isIdentityWithExtract() || !isa(Op1)) return nullptr; Value *X, *Y; ArrayRef Mask; if (!match(Op0, m_ShuffleVector(m_Value(X), m_Value(Y), m_Mask(Mask)))) return nullptr; // Be conservative with shuffle transforms. If we can't kill the 1st shuffle, // then combining may result in worse codegen. if (!Op0->hasOneUse()) return nullptr; // We are extracting a subvector from a shuffle. Remove excess elements from // the 1st shuffle mask to eliminate the extract. // // This transform is conservatively limited to identity extracts because we do // not allow arbitrary shuffle mask creation as a target-independent transform // (because we can't guarantee that will lower efficiently). // // If the extracting shuffle has an undef mask element, it transfers to the // new shuffle mask. Otherwise, copy the original mask element. Example: // shuf (shuf X, Y, ), undef, <0, undef, 2, 3> --> // shuf X, Y, unsigned NumElts = Shuf.getType()->getNumElements(); SmallVector NewMask(NumElts); assert(NumElts < Mask.size() && "Identity with extract must have less elements than its inputs"); for (unsigned i = 0; i != NumElts; ++i) { int ExtractMaskElt = Shuf.getMaskValue(i); int MaskElt = Mask[i]; NewMask[i] = ExtractMaskElt == UndefMaskElem ? ExtractMaskElt : MaskElt; } return new ShuffleVectorInst(X, Y, NewMask); } /// Try to replace a shuffle with an insertelement or try to replace a shuffle /// operand with the operand of an insertelement. static Instruction *foldShuffleWithInsert(ShuffleVectorInst &Shuf, InstCombiner &IC) { Value *V0 = Shuf.getOperand(0), *V1 = Shuf.getOperand(1); SmallVector Mask; Shuf.getShuffleMask(Mask); // The shuffle must not change vector sizes. // TODO: This restriction could be removed if the insert has only one use // (because the transform would require a new length-changing shuffle). int NumElts = Mask.size(); if (NumElts != (int)(cast(V0->getType())->getNumElements())) return nullptr; // This is a specialization of a fold in SimplifyDemandedVectorElts. We may // not be able to handle it there if the insertelement has >1 use. // If the shuffle has an insertelement operand but does not choose the // inserted scalar element from that value, then we can replace that shuffle // operand with the source vector of the insertelement. Value *X; uint64_t IdxC; if (match(V0, m_InsertElement(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) { // shuf (inselt X, ?, IdxC), ?, Mask --> shuf X, ?, Mask if (none_of(Mask, [IdxC](int MaskElt) { return MaskElt == (int)IdxC; })) return IC.replaceOperand(Shuf, 0, X); } if (match(V1, m_InsertElement(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) { // Offset the index constant by the vector width because we are checking for // accesses to the 2nd vector input of the shuffle. IdxC += NumElts; // shuf ?, (inselt X, ?, IdxC), Mask --> shuf ?, X, Mask if (none_of(Mask, [IdxC](int MaskElt) { return MaskElt == (int)IdxC; })) return IC.replaceOperand(Shuf, 1, X); } // shuffle (insert ?, Scalar, IndexC), V1, Mask --> insert V1, Scalar, IndexC' auto isShufflingScalarIntoOp1 = [&](Value *&Scalar, ConstantInt *&IndexC) { // We need an insertelement with a constant index. if (!match(V0, m_InsertElement(m_Value(), m_Value(Scalar), m_ConstantInt(IndexC)))) return false; // Test the shuffle mask to see if it splices the inserted scalar into the // operand 1 vector of the shuffle. int NewInsIndex = -1; for (int i = 0; i != NumElts; ++i) { // Ignore undef mask elements. if (Mask[i] == -1) continue; // The shuffle takes elements of operand 1 without lane changes. if (Mask[i] == NumElts + i) continue; // The shuffle must choose the inserted scalar exactly once. if (NewInsIndex != -1 || Mask[i] != IndexC->getSExtValue()) return false; // The shuffle is placing the inserted scalar into element i. NewInsIndex = i; } assert(NewInsIndex != -1 && "Did not fold shuffle with unused operand?"); // Index is updated to the potentially translated insertion lane. IndexC = ConstantInt::get(IndexC->getType(), NewInsIndex); return true; }; // If the shuffle is unnecessary, insert the scalar operand directly into // operand 1 of the shuffle. Example: // shuffle (insert ?, S, 1), V1, <1, 5, 6, 7> --> insert V1, S, 0 Value *Scalar; ConstantInt *IndexC; if (isShufflingScalarIntoOp1(Scalar, IndexC)) return InsertElementInst::Create(V1, Scalar, IndexC); // Try again after commuting shuffle. Example: // shuffle V0, (insert ?, S, 0), <0, 1, 2, 4> --> // shuffle (insert ?, S, 0), V0, <4, 5, 6, 0> --> insert V0, S, 3 std::swap(V0, V1); ShuffleVectorInst::commuteShuffleMask(Mask, NumElts); if (isShufflingScalarIntoOp1(Scalar, IndexC)) return InsertElementInst::Create(V1, Scalar, IndexC); return nullptr; } static Instruction *foldIdentityPaddedShuffles(ShuffleVectorInst &Shuf) { // Match the operands as identity with padding (also known as concatenation // with undef) shuffles of the same source type. The backend is expected to // recreate these concatenations from a shuffle of narrow operands. auto *Shuffle0 = dyn_cast(Shuf.getOperand(0)); auto *Shuffle1 = dyn_cast(Shuf.getOperand(1)); if (!Shuffle0 || !Shuffle0->isIdentityWithPadding() || !Shuffle1 || !Shuffle1->isIdentityWithPadding()) return nullptr; // We limit this transform to power-of-2 types because we expect that the // backend can convert the simplified IR patterns to identical nodes as the // original IR. // TODO: If we can verify the same behavior for arbitrary types, the // power-of-2 checks can be removed. Value *X = Shuffle0->getOperand(0); Value *Y = Shuffle1->getOperand(0); if (X->getType() != Y->getType() || !isPowerOf2_32(Shuf.getType()->getNumElements()) || !isPowerOf2_32(Shuffle0->getType()->getNumElements()) || !isPowerOf2_32(cast(X->getType())->getNumElements()) || isa(X) || isa(Y)) return nullptr; assert(isa(Shuffle0->getOperand(1)) && isa(Shuffle1->getOperand(1)) && "Unexpected operand for identity shuffle"); // This is a shuffle of 2 widening shuffles. We can shuffle the narrow source // operands directly by adjusting the shuffle mask to account for the narrower // types: // shuf (widen X), (widen Y), Mask --> shuf X, Y, Mask' int NarrowElts = cast(X->getType())->getNumElements(); int WideElts = Shuffle0->getType()->getNumElements(); assert(WideElts > NarrowElts && "Unexpected types for identity with padding"); ArrayRef Mask = Shuf.getShuffleMask(); SmallVector NewMask(Mask.size(), -1); for (int i = 0, e = Mask.size(); i != e; ++i) { if (Mask[i] == -1) continue; // If this shuffle is choosing an undef element from 1 of the sources, that // element is undef. if (Mask[i] < WideElts) { if (Shuffle0->getMaskValue(Mask[i]) == -1) continue; } else { if (Shuffle1->getMaskValue(Mask[i] - WideElts) == -1) continue; } // If this shuffle is choosing from the 1st narrow op, the mask element is // the same. If this shuffle is choosing from the 2nd narrow op, the mask // element is offset down to adjust for the narrow vector widths. if (Mask[i] < WideElts) { assert(Mask[i] < NarrowElts && "Unexpected shuffle mask"); NewMask[i] = Mask[i]; } else { assert(Mask[i] < (WideElts + NarrowElts) && "Unexpected shuffle mask"); NewMask[i] = Mask[i] - (WideElts - NarrowElts); } } return new ShuffleVectorInst(X, Y, NewMask); } Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) { Value *LHS = SVI.getOperand(0); Value *RHS = SVI.getOperand(1); SimplifyQuery ShufQuery = SQ.getWithInstruction(&SVI); if (auto *V = SimplifyShuffleVectorInst(LHS, RHS, SVI.getShuffleMask(), SVI.getType(), ShufQuery)) return replaceInstUsesWith(SVI, V); // shuffle x, x, mask --> shuffle x, undef, mask' unsigned VWidth = SVI.getType()->getNumElements(); unsigned LHSWidth = cast(LHS->getType())->getNumElements(); ArrayRef Mask = SVI.getShuffleMask(); Type *Int32Ty = Type::getInt32Ty(SVI.getContext()); // Peek through a bitcasted shuffle operand by scaling the mask. If the // simulated shuffle can simplify, then this shuffle is unnecessary: // shuf (bitcast X), undef, Mask --> bitcast X' // TODO: This could be extended to allow length-changing shuffles. // The transform might also be obsoleted if we allowed canonicalization // of bitcasted shuffles. Value *X; if (match(LHS, m_BitCast(m_Value(X))) && match(RHS, m_Undef()) && X->getType()->isVectorTy() && VWidth == LHSWidth) { // Try to create a scaled mask constant. auto *XType = cast(X->getType()); unsigned XNumElts = XType->getNumElements(); SmallVector ScaledMask; if (XNumElts >= VWidth) { assert(XNumElts % VWidth == 0 && "Unexpected vector bitcast"); narrowShuffleMaskElts(XNumElts / VWidth, Mask, ScaledMask); } else { assert(VWidth % XNumElts == 0 && "Unexpected vector bitcast"); if (!widenShuffleMaskElts(VWidth / XNumElts, Mask, ScaledMask)) ScaledMask.clear(); } if (!ScaledMask.empty()) { // If the shuffled source vector simplifies, cast that value to this // shuffle's type. if (auto *V = SimplifyShuffleVectorInst(X, UndefValue::get(XType), ScaledMask, XType, ShufQuery)) return BitCastInst::Create(Instruction::BitCast, V, SVI.getType()); } } if (LHS == RHS) { assert(!isa(RHS) && "Shuffle with 2 undef ops not simplified?"); // Remap any references to RHS to use LHS. SmallVector Elts; for (unsigned i = 0; i != VWidth; ++i) { // Propagate undef elements or force mask to LHS. if (Mask[i] < 0) Elts.push_back(UndefMaskElem); else Elts.push_back(Mask[i] % LHSWidth); } return new ShuffleVectorInst(LHS, UndefValue::get(RHS->getType()), Elts); } // shuffle undef, x, mask --> shuffle x, undef, mask' if (isa(LHS)) { SVI.commute(); return &SVI; } if (Instruction *I = canonicalizeInsertSplat(SVI, Builder)) return I; if (Instruction *I = foldSelectShuffle(SVI, Builder, DL)) return I; if (Instruction *I = foldTruncShuffle(SVI, DL.isBigEndian())) return I; if (Instruction *I = narrowVectorSelect(SVI, Builder)) return I; APInt UndefElts(VWidth, 0); APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); if (Value *V = SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) { if (V != &SVI) return replaceInstUsesWith(SVI, V); return &SVI; } if (Instruction *I = foldIdentityExtractShuffle(SVI)) return I; // These transforms have the potential to lose undef knowledge, so they are // intentionally placed after SimplifyDemandedVectorElts(). if (Instruction *I = foldShuffleWithInsert(SVI, *this)) return I; if (Instruction *I = foldIdentityPaddedShuffles(SVI)) return I; if (isa(RHS) && canEvaluateShuffled(LHS, Mask)) { Value *V = evaluateInDifferentElementOrder(LHS, Mask); return replaceInstUsesWith(SVI, V); } // SROA generates shuffle+bitcast when the extracted sub-vector is bitcast to // a non-vector type. We can instead bitcast the original vector followed by // an extract of the desired element: // // %sroa = shufflevector <16 x i8> %in, <16 x i8> undef, // <4 x i32> // %1 = bitcast <4 x i8> %sroa to i32 // Becomes: // %bc = bitcast <16 x i8> %in to <4 x i32> // %ext = extractelement <4 x i32> %bc, i32 0 // // If the shuffle is extracting a contiguous range of values from the input // vector then each use which is a bitcast of the extracted size can be // replaced. This will work if the vector types are compatible, and the begin // index is aligned to a value in the casted vector type. If the begin index // isn't aligned then we can shuffle the original vector (keeping the same // vector type) before extracting. // // This code will bail out if the target type is fundamentally incompatible // with vectors of the source type. // // Example of <16 x i8>, target type i32: // Index range [4,8): v-----------v Will work. // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ // <16 x i8>: | | | | | | | | | | | | | | | | | // <4 x i32>: | | | | | // +-----------+-----------+-----------+-----------+ // Index range [6,10): ^-----------^ Needs an extra shuffle. // Target type i40: ^--------------^ Won't work, bail. bool MadeChange = false; if (isShuffleExtractingFromLHS(SVI, Mask)) { Value *V = LHS; unsigned MaskElems = Mask.size(); VectorType *SrcTy = cast(V->getType()); unsigned VecBitWidth = SrcTy->getPrimitiveSizeInBits().getFixedSize(); unsigned SrcElemBitWidth = DL.getTypeSizeInBits(SrcTy->getElementType()); assert(SrcElemBitWidth && "vector elements must have a bitwidth"); unsigned SrcNumElems = SrcTy->getNumElements(); SmallVector BCs; DenseMap NewBCs; for (User *U : SVI.users()) if (BitCastInst *BC = dyn_cast(U)) if (!BC->use_empty()) // Only visit bitcasts that weren't previously handled. BCs.push_back(BC); for (BitCastInst *BC : BCs) { unsigned BegIdx = Mask.front(); Type *TgtTy = BC->getDestTy(); unsigned TgtElemBitWidth = DL.getTypeSizeInBits(TgtTy); if (!TgtElemBitWidth) continue; unsigned TgtNumElems = VecBitWidth / TgtElemBitWidth; bool VecBitWidthsEqual = VecBitWidth == TgtNumElems * TgtElemBitWidth; bool BegIsAligned = 0 == ((SrcElemBitWidth * BegIdx) % TgtElemBitWidth); if (!VecBitWidthsEqual) continue; if (!VectorType::isValidElementType(TgtTy)) continue; VectorType *CastSrcTy = VectorType::get(TgtTy, TgtNumElems); if (!BegIsAligned) { // Shuffle the input so [0,NumElements) contains the output, and // [NumElems,SrcNumElems) is undef. SmallVector ShuffleMask(SrcNumElems, -1); for (unsigned I = 0, E = MaskElems, Idx = BegIdx; I != E; ++Idx, ++I) ShuffleMask[I] = Idx; V = Builder.CreateShuffleVector(V, UndefValue::get(V->getType()), ShuffleMask, SVI.getName() + ".extract"); BegIdx = 0; } unsigned SrcElemsPerTgtElem = TgtElemBitWidth / SrcElemBitWidth; assert(SrcElemsPerTgtElem); BegIdx /= SrcElemsPerTgtElem; bool BCAlreadyExists = NewBCs.find(CastSrcTy) != NewBCs.end(); auto *NewBC = BCAlreadyExists ? NewBCs[CastSrcTy] : Builder.CreateBitCast(V, CastSrcTy, SVI.getName() + ".bc"); if (!BCAlreadyExists) NewBCs[CastSrcTy] = NewBC; auto *Ext = Builder.CreateExtractElement( NewBC, ConstantInt::get(Int32Ty, BegIdx), SVI.getName() + ".extract"); // The shufflevector isn't being replaced: the bitcast that used it // is. InstCombine will visit the newly-created instructions. replaceInstUsesWith(*BC, Ext); MadeChange = true; } } // If the LHS is a shufflevector itself, see if we can combine it with this // one without producing an unusual shuffle. // Cases that might be simplified: // 1. // x1=shuffle(v1,v2,mask1) // x=shuffle(x1,undef,mask) // ==> // x=shuffle(v1,undef,newMask) // newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : -1 // 2. // x1=shuffle(v1,undef,mask1) // x=shuffle(x1,x2,mask) // where v1.size() == mask1.size() // ==> // x=shuffle(v1,x2,newMask) // newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : mask[i] // 3. // x2=shuffle(v2,undef,mask2) // x=shuffle(x1,x2,mask) // where v2.size() == mask2.size() // ==> // x=shuffle(x1,v2,newMask) // newMask[i] = (mask[i] < x1.size()) // ? mask[i] : mask2[mask[i]-x1.size()]+x1.size() // 4. // x1=shuffle(v1,undef,mask1) // x2=shuffle(v2,undef,mask2) // x=shuffle(x1,x2,mask) // where v1.size() == v2.size() // ==> // x=shuffle(v1,v2,newMask) // newMask[i] = (mask[i] < x1.size()) // ? mask1[mask[i]] : mask2[mask[i]-x1.size()]+v1.size() // // Here we are really conservative: // we are absolutely afraid of producing a shuffle mask not in the input // program, because the code gen may not be smart enough to turn a merged // shuffle into two specific shuffles: it may produce worse code. As such, // we only merge two shuffles if the result is either a splat or one of the // input shuffle masks. In this case, merging the shuffles just removes // one instruction, which we know is safe. This is good for things like // turning: (splat(splat)) -> splat, or // merge(V[0..n], V[n+1..2n]) -> V[0..2n] ShuffleVectorInst* LHSShuffle = dyn_cast(LHS); ShuffleVectorInst* RHSShuffle = dyn_cast(RHS); if (LHSShuffle) if (!isa(LHSShuffle->getOperand(1)) && !isa(RHS)) LHSShuffle = nullptr; if (RHSShuffle) if (!isa(RHSShuffle->getOperand(1))) RHSShuffle = nullptr; if (!LHSShuffle && !RHSShuffle) return MadeChange ? &SVI : nullptr; Value* LHSOp0 = nullptr; Value* LHSOp1 = nullptr; Value* RHSOp0 = nullptr; unsigned LHSOp0Width = 0; unsigned RHSOp0Width = 0; if (LHSShuffle) { LHSOp0 = LHSShuffle->getOperand(0); LHSOp1 = LHSShuffle->getOperand(1); LHSOp0Width = cast(LHSOp0->getType())->getNumElements(); } if (RHSShuffle) { RHSOp0 = RHSShuffle->getOperand(0); RHSOp0Width = cast(RHSOp0->getType())->getNumElements(); } Value* newLHS = LHS; Value* newRHS = RHS; if (LHSShuffle) { // case 1 if (isa(RHS)) { newLHS = LHSOp0; newRHS = LHSOp1; } // case 2 or 4 else if (LHSOp0Width == LHSWidth) { newLHS = LHSOp0; } } // case 3 or 4 if (RHSShuffle && RHSOp0Width == LHSWidth) { newRHS = RHSOp0; } // case 4 if (LHSOp0 == RHSOp0) { newLHS = LHSOp0; newRHS = nullptr; } if (newLHS == LHS && newRHS == RHS) return MadeChange ? &SVI : nullptr; ArrayRef LHSMask; ArrayRef RHSMask; if (newLHS != LHS) LHSMask = LHSShuffle->getShuffleMask(); if (RHSShuffle && newRHS != RHS) RHSMask = RHSShuffle->getShuffleMask(); unsigned newLHSWidth = (newLHS != LHS) ? LHSOp0Width : LHSWidth; SmallVector newMask; bool isSplat = true; int SplatElt = -1; // Create a new mask for the new ShuffleVectorInst so that the new // ShuffleVectorInst is equivalent to the original one. for (unsigned i = 0; i < VWidth; ++i) { int eltMask; if (Mask[i] < 0) { // This element is an undef value. eltMask = -1; } else if (Mask[i] < (int)LHSWidth) { // This element is from left hand side vector operand. // // If LHS is going to be replaced (case 1, 2, or 4), calculate the // new mask value for the element. if (newLHS != LHS) { eltMask = LHSMask[Mask[i]]; // If the value selected is an undef value, explicitly specify it // with a -1 mask value. if (eltMask >= (int)LHSOp0Width && isa(LHSOp1)) eltMask = -1; } else eltMask = Mask[i]; } else { // This element is from right hand side vector operand // // If the value selected is an undef value, explicitly specify it // with a -1 mask value. (case 1) if (isa(RHS)) eltMask = -1; // If RHS is going to be replaced (case 3 or 4), calculate the // new mask value for the element. else if (newRHS != RHS) { eltMask = RHSMask[Mask[i]-LHSWidth]; // If the value selected is an undef value, explicitly specify it // with a -1 mask value. if (eltMask >= (int)RHSOp0Width) { assert(isa(RHSShuffle->getOperand(1)) && "should have been check above"); eltMask = -1; } } else eltMask = Mask[i]-LHSWidth; // If LHS's width is changed, shift the mask value accordingly. // If newRHS == nullptr, i.e. LHSOp0 == RHSOp0, we want to remap any // references from RHSOp0 to LHSOp0, so we don't need to shift the mask. // If newRHS == newLHS, we want to remap any references from newRHS to // newLHS so that we can properly identify splats that may occur due to // obfuscation across the two vectors. if (eltMask >= 0 && newRHS != nullptr && newLHS != newRHS) eltMask += newLHSWidth; } // Check if this could still be a splat. if (eltMask >= 0) { if (SplatElt >= 0 && SplatElt != eltMask) isSplat = false; SplatElt = eltMask; } newMask.push_back(eltMask); } // If the result mask is equal to one of the original shuffle masks, // or is a splat, do the replacement. if (isSplat || newMask == LHSMask || newMask == RHSMask || newMask == Mask) { SmallVector Elts; for (unsigned i = 0, e = newMask.size(); i != e; ++i) { if (newMask[i] < 0) { Elts.push_back(UndefValue::get(Int32Ty)); } else { Elts.push_back(ConstantInt::get(Int32Ty, newMask[i])); } } if (!newRHS) newRHS = UndefValue::get(newLHS->getType()); return new ShuffleVectorInst(newLHS, newRHS, ConstantVector::get(Elts)); } return MadeChange ? &SVI : nullptr; } diff --git a/llvm/test/Transforms/InstCombine/vscale_extractelement.ll b/llvm/test/Transforms/InstCombine/vscale_extractelement.ll new file mode 100644 index 000000000000..397767dcfd91 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/vscale_extractelement.ll @@ -0,0 +1,148 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S | FileCheck %s + +define i32 @extractelement_in_range( %a) { +; CHECK-LABEL: @extractelement_in_range( +; CHECK-NEXT: [[R:%.*]] = extractelement [[A:%.*]], i64 1 +; CHECK-NEXT: ret i32 [[R]] +; + %r = extractelement %a, i64 1 + ret i32 %r +} + +define i32 @extractelement_maybe_out_of_range( %a) { +; CHECK-LABEL: @extractelement_maybe_out_of_range( +; CHECK-NEXT: [[R:%.*]] = extractelement [[A:%.*]], i64 4 +; CHECK-NEXT: ret i32 [[R]] +; + %r = extractelement %a, i64 4 + ret i32 %r +} + +define i32 @extractelement_bitcast(float %f) { +; CHECK-LABEL: @extractelement_bitcast( +; CHECK-NEXT: [[R:%.*]] = bitcast float [[F:%.*]] to i32 +; CHECK-NEXT: ret i32 [[R]] +; + %vec_float = insertelement undef, float %f, i32 0 + %vec_int = bitcast %vec_float to + %r = extractelement %vec_int, i32 0 + ret i32 %r +} + +define i8 @extractelement_bitcast_to_trunc( %a, i32 %x) { +; CHECK-LABEL: @extractelement_bitcast_to_trunc( +; CHECK-NEXT: [[R:%.*]] = trunc i32 [[X:%.*]] to i8 +; CHECK-NEXT: ret i8 [[R]] +; + %vec = insertelement %a, i32 %x, i32 1 + %vec_cast = bitcast %vec to + %r = extractelement %vec_cast, i32 4 + ret i8 %r +} + +; TODO: Instcombine could remove the insert. +define i8 @extractelement_bitcast_wrong_insert( %a, i32 %x) { +; CHECK-LABEL: @extractelement_bitcast_wrong_insert( +; CHECK-NEXT: [[VEC:%.*]] = insertelement [[A:%.*]], i32 [[X:%.*]], i32 1 +; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast [[VEC]] to +; CHECK-NEXT: [[R:%.*]] = extractelement [[VEC_CAST]], i32 2 +; CHECK-NEXT: ret i8 [[R]] +; + %vec = insertelement %a, i32 %x, i32 1 ; <- This insert could be removed. + %vec_cast = bitcast %vec to + %r = extractelement %vec_cast, i32 2 + ret i8 %r +} + +; TODO: Instcombine could optimize to return %v. +define i32 @extractelement_shuffle_in_range(i32 %v) { +; CHECK-LABEL: @extractelement_shuffle_in_range( +; CHECK-NEXT: [[IN:%.*]] = insertelement undef, i32 [[V:%.*]], i32 0 +; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector [[IN]], undef, zeroinitializer +; CHECK-NEXT: [[R:%.*]] = extractelement [[SPLAT]], i32 1 +; CHECK-NEXT: ret i32 [[R]] +; + %in = insertelement undef, i32 %v, i32 0 + %splat = shufflevector %in, undef, zeroinitializer + %r = extractelement %splat, i32 1 + ret i32 %r +} + +define i32 @extractelement_shuffle_maybe_out_of_range(i32 %v) { +; CHECK-LABEL: @extractelement_shuffle_maybe_out_of_range( +; CHECK-NEXT: [[IN:%.*]] = insertelement undef, i32 [[V:%.*]], i32 0 +; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector [[IN]], undef, zeroinitializer +; CHECK-NEXT: [[R:%.*]] = extractelement [[SPLAT]], i32 4 +; CHECK-NEXT: ret i32 [[R]] +; + %in = insertelement undef, i32 %v, i32 0 + %splat = shufflevector %in, undef, zeroinitializer + %r = extractelement %splat, i32 4 + ret i32 %r +} + +define i32 @extractelement_shuffle_invalid_index(i32 %v) { +; CHECK-LABEL: @extractelement_shuffle_invalid_index( +; CHECK-NEXT: [[IN:%.*]] = insertelement undef, i32 [[V:%.*]], i32 0 +; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector [[IN]], undef, zeroinitializer +; CHECK-NEXT: [[R:%.*]] = extractelement [[SPLAT]], i32 -1 +; CHECK-NEXT: ret i32 [[R]] +; + %in = insertelement undef, i32 %v, i32 0 + %splat = shufflevector %in, undef, zeroinitializer + %r = extractelement %splat, i32 -1 + ret i32 %r +} + + +define i32 @extractelement_shuffle_symbolic_index(i32 %v, i32 %idx) { +; CHECK-LABEL: @extractelement_shuffle_symbolic_index( +; CHECK-NEXT: [[IN:%.*]] = insertelement undef, i32 [[V:%.*]], i32 0 +; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector [[IN]], undef, zeroinitializer +; CHECK-NEXT: [[R:%.*]] = extractelement [[SPLAT]], i32 [[IDX:%.*]] +; CHECK-NEXT: ret i32 [[R]] +; + %in = insertelement undef, i32 %v, i32 0 + %splat = shufflevector %in, undef, zeroinitializer + %r = extractelement %splat, i32 %idx + ret i32 %r +} + +define @extractelement_insertelement_same_positions( %vec) { +; CHECK-LABEL: @extractelement_insertelement_same_positions( +; CHECK-NEXT: ret [[VEC:%.*]] +; + %vec.e0 = extractelement %vec, i32 0 + %vec.e1 = extractelement %vec, i32 1 + %vec.e2 = extractelement %vec, i32 2 + %vec.e3 = extractelement %vec, i32 3 + %1 = insertelement %vec, i32 %vec.e0, i32 0 + %2 = insertelement %1, i32 %vec.e1, i32 1 + %3 = insertelement %2, i32 %vec.e2, i32 2 + %4 = insertelement %3, i32 %vec.e3, i32 3 + ret %4 +} + +define @extractelement_insertelement_diff_positions( %vec) { +; CHECK-LABEL: @extractelement_insertelement_diff_positions( +; CHECK-NEXT: [[VEC_E0:%.*]] = extractelement [[VEC:%.*]], i32 4 +; CHECK-NEXT: [[VEC_E1:%.*]] = extractelement [[VEC]], i32 5 +; CHECK-NEXT: [[VEC_E2:%.*]] = extractelement [[VEC]], i32 6 +; CHECK-NEXT: [[VEC_E3:%.*]] = extractelement [[VEC]], i32 7 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement undef, i32 [[VEC_E0]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = insertelement [[TMP1]], i32 [[VEC_E1]], i32 1 +; CHECK-NEXT: [[TMP3:%.*]] = insertelement [[TMP2]], i32 [[VEC_E2]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = insertelement [[TMP3]], i32 [[VEC_E3]], i32 3 +; CHECK-NEXT: ret [[TMP4]] +; + %vec.e0 = extractelement %vec, i32 4 + %vec.e1 = extractelement %vec, i32 5 + %vec.e2 = extractelement %vec, i32 6 + %vec.e3 = extractelement %vec, i32 7 + %1 = insertelement %vec, i32 %vec.e0, i32 0 + %2 = insertelement %1, i32 %vec.e1, i32 1 + %3 = insertelement %2, i32 %vec.e2, i32 2 + %4 = insertelement %3, i32 %vec.e3, i32 3 + ret %4 +}