diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -37,13 +37,48 @@ // multiple scalar registers, similar to a GPU vectorized load. In theory ARM // could use this pass (with some modifications), but currently it implements // its own pass to do something similar to what we do here. +// +// Overview of the algorithm and terminology in this pass: +// +// - Break up each basic block into pseudo-BBs, composed of instructions which +// are guaranteed to transfer control to their successors. +// - Within a single pseudo-BB, find all loads, and group them into +// "equivalence classes" according to getUnderlyingObject() and loaded +// element size. Do the same for stores. +// - For each equivalence class, greedily build "chains". Each chain has a +// leader instruction, and every other member of the chain has a known +// constant offset from the first instr in the chain. +// - Break up chains so that they contain only contiguous accesses of legal +// size with no intervening may-alias instrs. +// - Convert each chain to vector instructions. +// +// The O(n^2) behavior of this pass comes from initially building the chains. +// In the worst case we have to compare each new instruction to all of those +// that came before. To limit this, we only calculate the offset to the leaders +// of the N most recently-used chains. #include "llvm/Transforms/Vectorize/LoadStoreVectorizer.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/Sequence.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" @@ -57,6 +92,7 @@ #include "llvm/Analysis/VectorUtils.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" +#include "llvm/IR/ConstantRange.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" @@ -67,22 +103,21 @@ #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" +#include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" #include "llvm/InitializePasses.h" #include "llvm/Pass.h" +#include "llvm/Support/Alignment.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Debug.h" #include "llvm/Support/KnownBits.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/ModRef.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Local.h" -#include -#include -#include -#include -#include +#include "llvm/Transforms/Vectorize.h" using namespace llvm; @@ -91,21 +126,123 @@ STATISTIC(NumVectorInstructions, "Number of vector accesses generated"); STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized"); -// FIXME: Assuming stack alignment of 4 is always good enough -static const unsigned StackAdjustedAlignment = 4; - namespace { -/// ChainID is an arbitrary token that is allowed to be different only for the -/// accesses that are guaranteed to be considered non-consecutive by -/// Vectorizer::isConsecutiveAccess. It's used for grouping instructions -/// together and reducing the number of instructions the main search operates on -/// at a time, i.e. this is to reduce compile time and nothing else as the main -/// search has O(n^2) time complexity. The underlying type of ChainID should not -/// be relied upon. -using ChainID = const Value *; -using InstrList = SmallVector; -using InstrListMap = MapVector; +// Equivalence class key, the initial tuple by which we group loads/stores. +// Loads/stores with different EqClassKeys are never merged. +// +// (We could in theory remove element-size from the this tuple. We'd just need +// to fix up the vector packing/unpacking code.) +using EqClassKey = + std::tuple; +llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const EqClassKey &K) { + const auto &[UnderlyingObject, AddrSpace, ElementSize, IsLoad] = K; + return OS << (IsLoad ? "load" : "store") << " of " << *UnderlyingObject + << " of element size " << ElementSize << " bits in addrspace " + << AddrSpace; +} + +// A Chain is a set of instructions such that: +// - All instructions have the same equivalence class, so in particular all are +// loads, or all are stores. +// - We know the address accessed by the i'th chain elem relative to the +// chain's leader instruction, which is the first instr of the chain in BB +// order. +// +// Chains have two canonical orderings: +// - BB order, sorted by Instr->comesBefore. +// - Offset order, sorted by OffsetFromLeader. +// This pass switches back and forth between these orders. +struct ChainElem { + Instruction *Inst; + APInt OffsetFromLeader; +}; +using Chain = SmallVector; + +void sortChainInBBOrder(Chain &C) { + sort(C, [](auto &A, auto &B) { return A.Inst->comesBefore(B.Inst); }); +} +void sortChainInOffsetOrder(Chain &C) { + sort(C, [](const auto &A, const auto &B) { + if (A.OffsetFromLeader != B.OffsetFromLeader) + return A.OffsetFromLeader.slt(B.OffsetFromLeader); + return A.Inst->comesBefore(B.Inst); // stable tiebreaker + }); +} + +void dumpChain(ArrayRef C) { + for (const auto &E : C) { + dbgs() << " " << *E.Inst << " (offset " << E.OffsetFromLeader << ")" + << "\n"; + } +} + +using EquivalenceClassMap = + MapVector>; + +// FIXME: Assuming stack alignment of 4 is always good enough +constexpr unsigned StackAdjustedAlignment = 4; + +Instruction *propagateMetadata(Instruction *I, const Chain &C) { + SmallVector Values; + for (const ChainElem &E : C) { + Values.push_back(E.Inst); + } + return propagateMetadata(I, Values); +} + +Type *getScalarTy(Type *Ty) { + VectorType *VecTy = dyn_cast(Ty); + return VecTy ? VecTy->getScalarType() : Ty; +} + +bool isInvariantLoad(const Instruction *I) { + const LoadInst *LI = dyn_cast(I); + return LI != nullptr && LI->hasMetadata(LLVMContext::MD_invariant_load); +} + +/// Reorders the instructions that I depends on (the instructions defining its +/// operands), to ensure they dominate I. +void reorder(Instruction *I) { + SmallPtrSet InstructionsToMove; + SmallVector Worklist; + + Worklist.push_back(I); + while (!Worklist.empty()) { + Instruction *IW = Worklist.pop_back_val(); + int NumOperands = IW->getNumOperands(); + for (int i = 0; i < NumOperands; i++) { + Instruction *IM = dyn_cast(IW->getOperand(i)); + if (!IM || IM->getOpcode() == Instruction::PHI) + continue; + + // If IM is in another BB, no need to move it, because this pass only + // vectorizes instructions within one BB. + if (IM->getParent() != I->getParent()) + continue; + + if (!IM->comesBefore(I)) { + InstructionsToMove.insert(IM); + Worklist.push_back(IM); + } + } + } + + // All instructions to move should follow I. Start from I, not from begin(). + for (auto BBI = I->getIterator(), E = I->getParent()->end(); BBI != E; + ++BBI) { + if (!InstructionsToMove.count(&*BBI)) + continue; + Instruction *IM = &*BBI; + --BBI; + IM->removeFromParent(); + IM->insertBefore(I); + } +} class Vectorizer { Function &F; @@ -116,6 +253,7 @@ TargetTransformInfo &TTI; const DataLayout &DL; IRBuilder<> Builder; + SmallVector ToErase; public: Vectorizer(Function &F, AliasAnalysis &AA, AssumptionCache &AC, @@ -126,70 +264,82 @@ bool run(); private: - unsigned getPointerAddressSpace(Value *I); - static const unsigned MaxDepth = 3; - bool isConsecutiveAccess(Value *A, Value *B); - bool areConsecutivePointers(Value *PtrA, Value *PtrB, APInt PtrDelta, - unsigned Depth = 0) const; - bool lookThroughComplexAddresses(Value *PtrA, Value *PtrB, APInt PtrDelta, - unsigned Depth) const; - bool lookThroughSelects(Value *PtrA, Value *PtrB, const APInt &PtrDelta, - unsigned Depth) const; + /// Runs the vectorizer on a "pseudo basic block", which is a range of + /// instructions [Begin, End) within one BB all of which have + /// isGuaranteedToTransferExecutionToSuccessor(I) == true. + bool runOnPseudoBB(BasicBlock::iterator Begin, BasicBlock::iterator End); - /// After vectorization, reorder the instructions that I depends on - /// (the instructions defining its operands), to ensure they dominate I. - void reorder(Instruction *I); + /// Runs the vectorizer on one equivalence class, i.e. one set of loads/stores + /// in the same BB with the same value for getUnderlyingObject() etc. + bool runOnEquivalenceClass(const EqClassKey &EqClassKey, + ArrayRef EqClass); - /// Returns the first and the last instructions in Chain. - std::pair - getBoundaryInstrs(ArrayRef Chain); + /// Runs the vectorizer on one chain, i.e. a subset of an equivalence class + /// where all instructions access a known, constant offset from the first + /// instruction. + bool runOnChain(Chain &C); - /// Erases the original instructions after vectorizing. - void eraseInstructions(ArrayRef Chain); + /// Splits the chain into subchains of instructions which read/write a + /// contiguous block of memory. Doesn't return length-1 chains. + std::vector splitChainByContiguity(Chain &C); - /// "Legalize" the vector type that would be produced by combining \p - /// ElementSizeBits elements in \p Chain. Break into two pieces such that the - /// total size of each piece is 1, 2 or a multiple of 4 bytes. \p Chain is - /// expected to have more than 4 elements. - std::pair, ArrayRef> - splitOddVectorElts(ArrayRef Chain, unsigned ElementSizeBits); + /// Splits the chain into sub-chains where it's safe to hoist loads up to the + /// beginning of the sub-chain and it's safe to sink loads up to the end of + /// the sub-chain. Doesn't return length-1 chains. + std::vector splitChainByMayAliasInstrs(Chain &C); - /// Finds the largest prefix of Chain that's vectorizable, checking for - /// intervening instructions which may affect the memory accessed by the - /// instructions within Chain. + /// Splits the chain into subchains that make legal, aligned accesses. + /// Doesn't return length-1 chains. + std::vector splitChainByAlignment(Chain &C); + + /// Converts the instrs in the chain into a single vectorized load or store. + /// Adds the old scalar loads/stores to ToErase. + bool vectorizeChain(Chain &C); + + /// Tries to compute the offset in bytes PtrB - PtrA. + std::optional getConstantOffset(Value *PtrA, Value *PtrB, + unsigned Depth = 0); + std::optional lookThroughComplexAddresses(Value *PtrA, Value *PtrB, + unsigned Depth); + std::optional lookThroughSelects(Value *PtrA, Value *PtrB, + unsigned Depth); + + /// Asserts that Ty is a whole number of bytes, and returns its size in bytes. + unsigned getTypeSizeInBytes(Type *Ty); + + /// Gets the element type of the vector that the chain will load or store. + /// This is nontrivial because the chain may contain elements of different + /// types; e.g. it's legal to have a chain that contains both i32 and float. + Type *getChainElemTy(const Chain &C); + + /// Determines whether ChainElem can be moved up (if IsLoad) or down (if + /// !IsLoad) to ChainBegin -- i.e. there are no intervening may-alias + /// instructions. /// - /// The elements of \p Chain must be all loads or all stores and must be in - /// address order. - ArrayRef getVectorizablePrefix(ArrayRef Chain); + /// The map ChainElemOffsets must contain all of the elements in + /// [ChainBegin, ChainElem] and their offsets from some arbitrary base + /// address. It's ok if it contains additional entries. + template + bool isSafeToMove( + Instruction *ChainElem, Instruction *ChainBegin, + const DenseMap &ChainOffsets); - /// Collects load and store instructions to vectorize. - std::pair collectInstructions(BasicBlock *BB); + /// Collects loads and stores grouped by "equivalence class", where: + /// - all elements in an eq class are a load or all are a store, + /// - they all load/store the same element size (it's OK to have e.g. i8 and + /// <4 x i8> in the same class, but not i32 and <4 x i8>), and + /// - they all have the same value for getUnderlyingObject(). + EquivalenceClassMap collectEquivalenceClasses(BasicBlock::iterator Begin, + BasicBlock::iterator End); - /// Processes the collected instructions, the \p Map. The values of \p Map - /// should be all loads or all stores. - bool vectorizeChains(InstrListMap &Map); - - /// Finds the load/stores to consecutive memory addresses and vectorizes them. - bool vectorizeInstructions(ArrayRef Instrs); - - /// Vectorizes the load instructions in Chain. - bool - vectorizeLoadChain(ArrayRef Chain, - SmallPtrSet *InstructionsProcessed); - - /// Vectorizes the store instructions in Chain. - bool - vectorizeStoreChain(ArrayRef Chain, - SmallPtrSet *InstructionsProcessed); - - /// Check if this load/store access is misaligned accesses. - /// Returns a \p RelativeSpeed of an operation if allowed suitable to - /// compare to another result for the same \p AddressSpace and potentially - /// different \p Alignment and \p SzInBytes. - bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, - Align Alignment, unsigned &RelativeSpeed); + /// Partitions Instrs into "chains" where every instruction has a known + /// constant offset from the first instr in the chain. + /// + /// Postcondition: For all i, ret[i][0].second == 0, because the first instr + /// in the chain is the leader, and an instr touches distance 0 from itself. + std::vector gatherChains(ArrayRef Instrs); }; class LoadStoreVectorizerLegacyPass : public FunctionPass { @@ -197,7 +347,8 @@ static char ID; LoadStoreVectorizerLegacyPass() : FunctionPass(ID) { - initializeLoadStoreVectorizerLegacyPassPass(*PassRegistry::getPassRegistry()); + initializeLoadStoreVectorizerLegacyPassPass( + *PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; @@ -249,11 +400,11 @@ AssumptionCache &AC = getAnalysis().getAssumptionCache(F); - Vectorizer V(F, AA, AC, DT, SE, TTI); - return V.run(); + return Vectorizer(F, AA, AC, DT, SE, TTI).run(); } -PreservedAnalyses LoadStoreVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { +PreservedAnalyses LoadStoreVectorizerPass::run(Function &F, + FunctionAnalysisManager &AM) { // Don't vectorize when the attribute NoImplicitFloat is used. if (F.hasFnAttribute(Attribute::NoImplicitFloat)) return PreservedAnalyses::all(); @@ -264,125 +415,700 @@ TargetTransformInfo &TTI = AM.getResult(F); AssumptionCache &AC = AM.getResult(F); - Vectorizer V(F, AA, AC, DT, SE, TTI); - bool Changed = V.run(); + bool Changed = Vectorizer(F, AA, AC, DT, SE, TTI).run(); PreservedAnalyses PA; PA.preserveSet(); return Changed ? PA : PreservedAnalyses::all(); } -// The real propagateMetadata expects a SmallVector, but we deal in -// vectors of Instructions. -static void propagateMetadata(Instruction *I, ArrayRef IL) { - SmallVector VL(IL.begin(), IL.end()); - propagateMetadata(I, VL); -} - -// Vectorizer Implementation bool Vectorizer::run() { bool Changed = false; + LLVM_DEBUG(dbgs() << "LSV: Running on function " << F.getName() << "\n"); - // Scan the blocks in the function in post order. + // Break up the BB if there are any instrs which aren't guaranteed to transfer + // execution to their successor. + // + // Consider, for example: + // + // def assert_arr_len(int n) { if (n < 2) exit(); } + // + // load arr[0] + // call assert_array_len(arr.length) + // load arr[1] + // + // Even though assert_arr_len does not read or write any memory, we can't + // speculate the second load before the call. More info at + // https://github.com/llvm/llvm-project/issues/52950. for (BasicBlock *BB : post_order(&F)) { - InstrListMap LoadRefs, StoreRefs; - std::tie(LoadRefs, StoreRefs) = collectInstructions(BB); - Changed |= vectorizeChains(LoadRefs); - Changed |= vectorizeChains(StoreRefs); + if (BB->empty()) + continue; + + SmallVector Barriers; + Barriers.push_back(BB->begin()); + for (Instruction &I : *BB) { + if (!isGuaranteedToTransferExecutionToSuccessor(&I)) { + Barriers.push_back(I.getIterator()); + } + } + Barriers.push_back(BB->end()); + + for (auto It = Barriers.begin(), End = std::prev(Barriers.end()); It != End; + ++It) { + Changed |= runOnPseudoBB(*It, *std::next(It)); + } + + // Erasing right after vectorization can mess up our BB iterators, and also + // can make the equivalence class keys point to freed memory. This is + // fixable, but it's simpler just to wait until we're done with the BB and + // erase all at once. + for (Instruction *I : ToErase) { + auto *GEP = dyn_cast(getLoadStorePointerOperand(I)); + if (I->use_empty()) + I->eraseFromParent(); + if (GEP && GEP->use_empty()) + GEP->eraseFromParent(); + } + ToErase.clear(); } return Changed; } -unsigned Vectorizer::getPointerAddressSpace(Value *I) { - if (LoadInst *L = dyn_cast(I)) - return L->getPointerAddressSpace(); - if (StoreInst *S = dyn_cast(I)) - return S->getPointerAddressSpace(); - return -1; +bool Vectorizer::runOnPseudoBB(BasicBlock::iterator Begin, + BasicBlock::iterator End) { + LLVM_DEBUG({ + dbgs() << "LSV: Running on pseudo-BB [" << *Begin << ", "; + if (End != Begin->getParent()->end()) + dbgs() << *End; + else + dbgs() << ""; + dbgs() << ")\n"; + }); + + bool Changed = false; + for (const auto &[EqClassKey, EqClass] : + collectEquivalenceClasses(Begin, End)) { + Changed |= runOnEquivalenceClass(EqClassKey, EqClass); + } + + return Changed; } -// FIXME: Merge with llvm::isConsecutiveAccess -bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) { - Value *PtrA = getLoadStorePointerOperand(A); - Value *PtrB = getLoadStorePointerOperand(B); - unsigned ASA = getPointerAddressSpace(A); - unsigned ASB = getPointerAddressSpace(B); +bool Vectorizer::runOnEquivalenceClass(const EqClassKey &EqClassKey, + ArrayRef EqClass) { + bool Changed = false; - // Check that the address spaces match and that the pointers are valid. - if (!PtrA || !PtrB || (ASA != ASB)) - return false; + LLVM_DEBUG({ + dbgs() << "LSV: Running on equivalence class of size " << EqClass.size() + << " keyed on " << EqClassKey << ":\n"; + for (Instruction *I : EqClass) { + dbgs() << " " << *I << "\n"; + } + }); - // Make sure that A and B are different pointers of the same size type. - Type *PtrATy = getLoadStoreType(A); - Type *PtrBTy = getLoadStoreType(B); - if (PtrA == PtrB || - PtrATy->isVectorTy() != PtrBTy->isVectorTy() || - DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) || - DL.getTypeStoreSize(PtrATy->getScalarType()) != - DL.getTypeStoreSize(PtrBTy->getScalarType())) - return false; - - unsigned PtrOffsetWidth = DL.getIndexSizeInBits(ASA); - APInt Size(PtrOffsetWidth, DL.getTypeStoreSize(PtrATy)); - - return areConsecutivePointers(PtrA, PtrB, Size); + std::vector Chains = gatherChains(EqClass); + LLVM_DEBUG(dbgs() << "LSV: Got " << Chains.size() + << " nontrivial chains.\n";); + for (Chain &C : Chains) { + Changed |= runOnChain(C); + } + return Changed; } -bool Vectorizer::areConsecutivePointers(Value *PtrA, Value *PtrB, - APInt PtrDelta, unsigned Depth) const { - unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(PtrA->getType()); - APInt OffsetA(OffsetBitWidth, 0); - APInt OffsetB(OffsetBitWidth, 0); - PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); - PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); +bool Vectorizer::runOnChain(Chain &C) { + LLVM_DEBUG({ + dbgs() << "LSV: Running on chain with " << C.size() << " instructions:\n"; + dumpChain(C); + }); - unsigned NewPtrBitWidth = DL.getTypeStoreSizeInBits(PtrA->getType()); + // Split up the chain into increasingly smaller chains, until we can finally + // vectorize the chains. + // + // (Don't be scared by the depth of the loop nest here. These operations are + // all at worst O(n lg n) in the number of instructions, and splitting chains + // doesn't change the number of instrs. So the whole loop nest is O(n lg n).) + bool Changed = false; + for (auto &C : splitChainByMayAliasInstrs(C)) { + for (auto &C : splitChainByContiguity(C)) { + for (auto &C : splitChainByAlignment(C)) { + Changed |= vectorizeChain(C); + } + } + } + return Changed; +} - if (NewPtrBitWidth != DL.getTypeStoreSizeInBits(PtrB->getType())) +std::vector Vectorizer::splitChainByMayAliasInstrs(Chain &C) { + if (C.empty()) + return {}; + + sortChainInBBOrder(C); + + LLVM_DEBUG({ + dbgs() << "LSV: splitChainByMayAliasInstrs considering chain:\n"; + dumpChain(C); + }); + + // We know that elements in the chain with nonverlapping offsets can't + // alias, but AA may not be smart enough to figure this out. Use a + // hashtable. + DenseMap ChainOffsets; + for (const auto &E : C) { + ChainOffsets.insert({&*E.Inst, E.OffsetFromLeader}); + } + + // Loads get hoisted up to the first load in the chain. Stores get sunk + // down to the last store in the chain. Our algorithm for loads is: + // + // - Take the first element of the chain. This is the start of a new chain. + // - Take the next element of `Chain` and check for may-alias instructions + // up to the start of NewChain. If no may-alias instrs, add it to + // NewChain. Otherwise, start a new NewChain. + // + // For stores it's the same except in the reverse direction. + // + // We expect IsLoad to be an std::bool_constant. + auto Impl = [&](auto IsLoad) { + auto ChainBegin = [&] { + if constexpr (IsLoad()) + return C.begin(); + else + return C.rbegin(); + }(); + auto ChainEnd = [&] { + if constexpr (IsLoad()) + return C.end(); + else + return C.rend(); + }(); + assert(ChainBegin != ChainEnd); + + std::vector Chains; + SmallVector NewChain; + NewChain.push_back(*ChainBegin); + for (auto ChainIt = std::next(ChainBegin); ChainIt != ChainEnd; ++ChainIt) { + if (isSafeToMove(ChainIt->Inst, NewChain.front().Inst, + ChainOffsets)) { + LLVM_DEBUG(dbgs() << "LSV: No intervening may-alias instrs; can merge " + << *ChainIt->Inst << " into " << *ChainBegin->Inst + << "\n"); + NewChain.push_back(*ChainIt); + } else { + LLVM_DEBUG( + dbgs() << "LSV: Found intervening may-alias instrs; cannot merge " + << *ChainIt->Inst << " into " << *ChainBegin->Inst << "\n"); + if (NewChain.size() > 1) { + LLVM_DEBUG({ + dbgs() << "LSV: got nontrivial chain without aliasing instrs:\n"; + dumpChain(NewChain); + }); + Chains.push_back(std::move(NewChain)); + } + + // Start a new chain. + NewChain = SmallVector({*ChainIt}); + } + } + if (NewChain.size() > 1) { + LLVM_DEBUG({ + dbgs() << "LSV: got nontrivial chain without aliasing instrs:\n"; + dumpChain(NewChain); + }); + Chains.push_back(std::move(NewChain)); + } + return Chains; + }; + + if (isa(C[0].Inst)) { + return Impl(/*IsLoad=*/std::bool_constant()); + } else { + assert(isa(C[0].Inst)); + return Impl(/*IsLoad=*/std::bool_constant()); + } +} + +std::vector Vectorizer::splitChainByContiguity(Chain &C) { + if (C.empty()) + return {}; + + sortChainInOffsetOrder(C); + + LLVM_DEBUG({ + dbgs() << "LSV: splitChainByContiguity considering chain:\n"; + dumpChain(C); + }); + + std::vector Ret; + Ret.push_back({C.front()}); + + for (auto It = std::next(C.begin()), End = C.end(); It != End; ++It) { + // `prev` accesses offsets [PrevDistFromBase, PrevReadEnd). + auto &CurChain = Ret.back(); + const ChainElem &Prev = CurChain.back(); + unsigned SzBits = DL.getTypeSizeInBits(getLoadStoreType(&*Prev.Inst)); + assert(SzBits % 8 == 0 && "Non-byte sizes should have been filtered out by " + "collectEquivalenceClass"); + APInt PrevReadEnd = Prev.OffsetFromLeader + SzBits / 8; + + // Add this instruction to the end of the current chain, or start a new one. + bool AreContiguous = It->OffsetFromLeader == PrevReadEnd; + LLVM_DEBUG(dbgs() << "LSV: Instructions are " + << (AreContiguous ? "" : "not ") << "contiguous: " + << *Prev.Inst << " (ends at offset " << PrevReadEnd + << ") -> " << *It->Inst << " (starts at offset " + << It->OffsetFromLeader << ")\n"); + if (AreContiguous) { + CurChain.push_back(*It); + } else { + Ret.push_back({*It}); + } + } + + // Filter out length-1 chains, these are uninteresting. + llvm::erase_if(Ret, [](const auto &Chain) { return Chain.size() <= 1; }); + return Ret; +} + +unsigned Vectorizer::getTypeSizeInBytes(Type *ty) { + unsigned SzBits = DL.getTypeSizeInBits(ty); + assert(SzBits % 8 == 0); + return SzBits / 8; +} + +Type *Vectorizer::getChainElemTy(const Chain &C) { + assert(!C.empty()); + // The rules are: + // - If there are any pointer types in the chain, use an integer type. + // - Prefer an integer type if it appears in the chain. + // - Otherwise, use the first type in the chain. + // + // The rule about pointer types is a simplification when we merge e.g. a load + // of a ptr and a double. There's no direct conversion from a ptr to a + // double; it requires a ptrtoint followed by a bitcast. + // + // It's unclear to me if the other rules have any practical effect, but we do + // it to match this pass's previous behavior. + if (any_of(C, [](const ChainElem &E) { + return getScalarTy(getLoadStoreType(E.Inst))->isPointerTy(); + })) { + return Type::getIntNTy(F.getContext(), + DL.getTypeSizeInBits(getLoadStoreType(C[0].Inst))); + } + + for (const ChainElem &E : C) { + Type *T = getScalarTy(getLoadStoreType(E.Inst)); + if (T->isIntegerTy()) { + return T; + } + } + return getScalarTy(getLoadStoreType(C[0].Inst)); +} + +std::vector Vectorizer::splitChainByAlignment(Chain &C) { + // We use a simple greedy algorithm. + // - Given a chain of length N, find all prefixes that + // (a) are not longer than the max register length, and + // (b) are a power of 2. + // - Starting from the longest prefix, try to create a vector of that length. + // - If one of them works, great. Recurse on any remaining elements in the + // chain. + // - If none of them work, discard the first element and recurse on a chain + // of length N-1. + if (C.empty()) + return {}; + + sortChainInOffsetOrder(C); + + LLVM_DEBUG({ + dbgs() << "LSV: splitChainByAlignment considering chain:\n"; + dumpChain(C); + }); + + bool IsLoadChain = isa(C[0].Inst); + auto getVectorFactor = [&](unsigned VF, unsigned LoadStoreSize, + unsigned ChainSizeBytes, VectorType *VecTy) { + return IsLoadChain ? TTI.getLoadVectorFactor(VF, LoadStoreSize, + ChainSizeBytes, VecTy) + : TTI.getStoreVectorFactor(VF, LoadStoreSize, + ChainSizeBytes, VecTy); + }; + +#ifndef NDEBUG + for (const auto &E : C) { + Type *Ty = getScalarTy(getLoadStoreType(E.Inst)); + assert(isPowerOf2_32(DL.getTypeSizeInBits(Ty)) && + "Should have filtered out non-power-of-two elements in " + "collectEquivalenceClasses."); + } +#endif + + unsigned AS = getLoadStoreAddressSpace(C[0].Inst); + unsigned VecRegBytes = TTI.getLoadStoreVecRegBitWidth(AS) / 8; + + std::vector Ret; + for (unsigned CBegin = 0; CBegin < C.size(); ++CBegin) { + // closed interval [Begin, End]. + SmallVector, 8> + CandidateChains; + for (unsigned CEnd = CBegin + 1, Size = C.size(); CEnd < Size; ++CEnd) { + APInt Sz = C[CEnd].OffsetFromLeader + + getTypeSizeInBytes(getLoadStoreType(C[CEnd].Inst)) - + C[CBegin].OffsetFromLeader; + if (Sz.sgt(VecRegBytes)) { + break; + } + if (Sz.isPowerOf2()) { + assert(Sz.sle(std::numeric_limits::max())); + CandidateChains.push_back( + {CEnd, static_cast(Sz.getLimitedValue())}); + } + } + + // Consider the longest chain first. + for (auto It = CandidateChains.rbegin(), End = CandidateChains.rend(); + It != End; ++It) { + auto [CEnd, SizeBytes] = *It; + LLVM_DEBUG( + dbgs() << "LSV: splitChainByAlignment considering candidate chain [" + << *C[CBegin].Inst << ", " << *C[CEnd].Inst << "]\n"); + + Type *VecElemTy = getChainElemTy(C); + // Note, VecElemTy is a power of 2, but might be less than one byte. For + // example, we can vectorize 2 x <2 x i4> to <4 x i4>, and in this case + // VecElemTy would be i4. + unsigned VecElemBits = DL.getTypeSizeInBits(VecElemTy); + + // SizeBytes and VecElemBits are powers of 2, so they divide evenly. + assert((8 * SizeBytes) % VecElemBits == 0); + unsigned NumVecElems = 8 * SizeBytes / VecElemBits; + FixedVectorType *VecTy = FixedVectorType::get(VecElemTy, NumVecElems); + unsigned VF = 8 * VecRegBytes / VecElemBits; + + // Check that TTI is happy with this vectorization factor. + unsigned TargetVF = getVectorFactor(VF, VecElemBits, + VecElemBits * NumVecElems / 8, VecTy); + if (TargetVF != VF && TargetVF < NumVecElems) { + LLVM_DEBUG( + dbgs() << "LSV: splitChainByAlignment discarding candidate chain " + "because TargetVF=" + << TargetVF << " != VF=" << VF + << " and TargetVF < NumVecElems=" << NumVecElems << "\n"); + continue; + } + + // Is a load/store with this alignment allowed by TTI and at least as fast + // as an unvectorized load/store? + auto IsAllowedAndFast = [&, SizeBytes = SizeBytes](Align Alignment) { + if (Alignment.value() % SizeBytes == 0) + return true; + unsigned VectorizedSpeed = 0; + bool AllowsMisaligned = TTI.allowsMisalignedMemoryAccesses( + F.getContext(), SizeBytes * 8, AS, Alignment, &VectorizedSpeed); + if (!AllowsMisaligned) { + LLVM_DEBUG(dbgs() + << "LSV: Access of " << SizeBytes << "B in addrspace " + << AS << " with alignment " << Alignment.value() + << " is misaligned, and therefore can't be vectorized.\n"); + return false; + } + + unsigned ElementwiseSpeed = 0; + TTI.allowsMisalignedMemoryAccesses(F.getContext(), VecElemBits, AS, + Alignment, &ElementwiseSpeed); + if (VectorizedSpeed < ElementwiseSpeed) { + LLVM_DEBUG(dbgs() + << "LSV: Access of " << SizeBytes << "B in addrspace " + << AS << " with alignment " << Alignment.value() + << " is has relative speed " << VectorizedSpeed + << ", which is lower than the elementwise speed of " + << ElementwiseSpeed + << ". Therefore this access can't be vectorized.\n"); + return false; + } + return true; + }; + + // If we're loading/storing from an alloca, align it if possible. + // + // FIXME: We eagerly upgrade the alignment, regardless of whether TTI + // tells us this is beneficial. This feels a bit odd, but it matches + // existing tests. This isn't *so* bad, because at most we align to 4 + // bytes (current value of StackAdjustedAlignment). + // + // FIXME: We will upgrade the alignment of the alloca even if it turns out + // we can't vectorize for some other reason. + Align Alignment = getLoadStoreAlignment(C[CBegin].Inst); + if (AS == DL.getAllocaAddrSpace() && Alignment.value() % SizeBytes != 0 && + IsAllowedAndFast(Align(StackAdjustedAlignment))) { + Align NewAlign = getOrEnforceKnownAlignment( + getLoadStorePointerOperand(C[CBegin].Inst), + Align(StackAdjustedAlignment), DL, C[CBegin].Inst, nullptr, &DT); + if (NewAlign >= Alignment) { + LLVM_DEBUG(dbgs() + << "LSV: splitByChain upgrading alloca alignment from " + << Alignment.value() << " to " << NewAlign.value() + << "\n"); + Alignment = NewAlign; + } + } + + if (!IsAllowedAndFast(Alignment)) { + LLVM_DEBUG( + dbgs() << "LSV: splitChainByAlignment discarding candidate chain " + "because its alignment is not AllowedAndFast: " + << Alignment.value() << "\n"); + continue; + } + + if ((IsLoadChain && + !TTI.isLegalToVectorizeLoadChain(SizeBytes, Alignment, AS)) || + (!IsLoadChain && + !TTI.isLegalToVectorizeStoreChain(SizeBytes, Alignment, AS))) { + LLVM_DEBUG( + dbgs() << "LSV: splitChainByAlignment discarding candidate chain " + "because !isLegalToVectorizeLoad/StoreChain."); + continue; + } + + // Hooray, we can vectorize this chain! + Chain &NewChain = Ret.emplace_back(); + for (unsigned I = CBegin; I <= CEnd; ++I) { + NewChain.push_back(C[I]); + } + CBegin = CEnd; // Skip over the instructions we've added to the chain. + break; + } + } + return Ret; +} + +bool Vectorizer::vectorizeChain(Chain &C) { + if (C.size() < 2) return false; - // In case if we have to shrink the pointer - // stripAndAccumulateInBoundsConstantOffsets should properly handle a - // possible overflow and the value should fit into a smallest data type - // used in the cast/gep chain. - assert(OffsetA.getSignificantBits() <= NewPtrBitWidth && - OffsetB.getSignificantBits() <= NewPtrBitWidth); + sortChainInOffsetOrder(C); - OffsetA = OffsetA.sextOrTrunc(NewPtrBitWidth); - OffsetB = OffsetB.sextOrTrunc(NewPtrBitWidth); - PtrDelta = PtrDelta.sextOrTrunc(NewPtrBitWidth); + LLVM_DEBUG({ + dbgs() << "LSV: Vectorizing chain of " << C.size() << " instructions:\n"; + dumpChain(C); + }); - APInt OffsetDelta = OffsetB - OffsetA; + Type *VecElemTy = getChainElemTy(C); + bool IsLoadChain = isa(C[0].Inst); + unsigned AS = getLoadStoreAddressSpace(C[0].Inst); + unsigned ChainBytes = std::accumulate( + C.begin(), C.end(), 0u, [&](unsigned Bytes, const ChainElem &E) { + return Bytes + getTypeSizeInBytes(getLoadStoreType(E.Inst)); + }); + assert(ChainBytes % DL.getTypeStoreSize(VecElemTy) == 0); + Type *VecTy = FixedVectorType::get( + VecElemTy, ChainBytes / DL.getTypeStoreSize(VecElemTy)); - // Check if they are based on the same pointer. That makes the offsets - // sufficient. - if (PtrA == PtrB) - return OffsetDelta == PtrDelta; + Align Alignment = getLoadStoreAlignment(C[0].Inst); + // If this is a load/store of an alloca, we might have upgraded the alloca's + // alignment earlier. Get the new alignment. + if (AS == DL.getAllocaAddrSpace()) { + Alignment = std::max( + Alignment, + getOrEnforceKnownAlignment(getLoadStorePointerOperand(C[0].Inst), + MaybeAlign(), DL, C[0].Inst, nullptr, &DT)); + } - // Compute the necessary base pointer delta to have the necessary final delta - // equal to the pointer delta requested. - APInt BaseDelta = PtrDelta - OffsetDelta; + // All elements of the chain must have the same scalar-type size. +#ifndef NDEBUG + for (const ChainElem &E : C) + assert(getTypeSizeInBytes(getScalarTy(getLoadStoreType(E.Inst))) == + getTypeSizeInBytes(VecElemTy)); +#endif - // Compute the distance with SCEV between the base pointers. - const SCEV *PtrSCEVA = SE.getSCEV(PtrA); - const SCEV *PtrSCEVB = SE.getSCEV(PtrB); - const SCEV *C = SE.getConstant(BaseDelta); - const SCEV *X = SE.getAddExpr(PtrSCEVA, C); - if (X == PtrSCEVB) + Instruction *VecInst; + if (IsLoadChain) { + // Loads get hoisted to the location of the first load in the chain. We may + // also need to hoist the (transitive) operands of the loads. + Builder.SetInsertPoint( + std::min_element(C.begin(), C.end(), [](const auto &A, const auto &B) { + return A.Inst->comesBefore(B.Inst); + })->Inst); + + // Chain is in offset order, so C[0] is the instr with the lowest offset, + // i.e. the root of the vector. + Value *Bitcast = Builder.CreateBitOrPointerCast( + getLoadStorePointerOperand(C[0].Inst), VecTy->getPointerTo(AS)); + VecInst = Builder.CreateAlignedLoad(VecTy, Bitcast, Alignment); + + unsigned VecIdx = 0; + for (const ChainElem &E : C) { + Instruction *I = E.Inst; + Value *V; + Type *T = getLoadStoreType(I); + if (auto *VT = dyn_cast(T)) { + auto Mask = llvm::to_vector<8>( + llvm::seq(VecIdx, VecIdx + VT->getNumElements())); + V = Builder.CreateShuffleVector(VecInst, Mask, I->getName()); + VecIdx += VT->getNumElements(); + } else { + V = Builder.CreateExtractElement(VecInst, Builder.getInt32(VecIdx), + I->getName()); + ++VecIdx; + } + if (V->getType() != I->getType()) { + V = Builder.CreateBitOrPointerCast(V, I->getType()); + } + I->replaceAllUsesWith(V); + } + + // Finally, we need to reorder the instrs in the BB so that the (transitive) + // operands of VecInst appear before it. To see why, suppose we have + // vectorized the following code: + // + // ptr1 = gep a, 1 + // load1 = load i32 ptr1 + // ptr0 = gep a, 0 + // load0 = load i32 ptr0 + // + // We will put the vectorized load at the location of the earliest load in + // the BB, i.e. load1. We get: + // + // ptr1 = gep a, 1 + // loadv = load <2 x i32> ptr0 + // load0 = extractelement loadv, 0 + // load1 = extractelement loadv, 1 + // ptr0 = gep a, 0 + // + // Notice that loadv uses ptr0, which is defined *after* it! + reorder(VecInst); + } else { + // Stores get sunk to the location of the last store in the chain. + Builder.SetInsertPoint( + std::max_element(C.begin(), C.end(), [](auto &A, auto &B) { + return A.Inst->comesBefore(B.Inst); + })->Inst); + + // Build the vector to store. + Value *Vec = PoisonValue::get(VecTy); + unsigned VecIdx = 0; + auto InsertElem = [&](Value *V) { + if (V->getType() != VecElemTy) { + V = Builder.CreateBitOrPointerCast(V, VecElemTy); + } + Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(VecIdx++)); + }; + for (const ChainElem &E : C) { + auto I = cast(E.Inst); + if (FixedVectorType *VT = + dyn_cast(getLoadStoreType(I))) { + for (int J = 0, JE = VT->getNumElements(); J < JE; ++J) { + InsertElem(Builder.CreateExtractElement(I->getValueOperand(), + Builder.getInt32(J))); + } + } else { + InsertElem(I->getValueOperand()); + } + } + + // Chain is in offset order, so C[0] is the instr with the lowest offset, + // i.e. the root of the vector. + VecInst = Builder.CreateAlignedStore( + Vec, + Builder.CreateBitOrPointerCast(getLoadStorePointerOperand(C[0].Inst), + VecTy->getPointerTo(AS)), + Alignment); + } + + propagateMetadata(VecInst, C); + + for (const ChainElem &E : C) + ToErase.push_back(E.Inst); + + ++NumVectorInstructions; + NumScalarsVectorized += C.size(); + return true; +} + +template +bool Vectorizer::isSafeToMove( + Instruction *ChainElem, Instruction *ChainBegin, + const DenseMap &ChainOffsets) { + LLVM_DEBUG(dbgs() << "LSV: isSafeToMove(" << *ChainElem << " -> " + << *ChainBegin << ")\n"); + + assert(isa(ChainElem) == IsLoadChain); + if (ChainElem == ChainBegin) return true; - // The above check will not catch the cases where one of the pointers is - // factorized but the other one is not, such as (C + (S * (A + B))) vs - // (AS + BS). Get the minus scev. That will allow re-combining the expresions - // and getting the simplified difference. - const SCEV *Dist = SE.getMinusSCEV(PtrSCEVB, PtrSCEVA); - if (C == Dist) + // Invariant loads can always be reordered; by definition they are not + // clobbered by stores. + if (isInvariantLoad(ChainElem)) return true; - // Sometimes even this doesn't work, because SCEV can't always see through - // patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking - // things the hard way. - return lookThroughComplexAddresses(PtrA, PtrB, BaseDelta, Depth); + const APInt &ChainElemOffset = ChainOffsets.at(ChainElem); + + auto BBIt = std::next([&] { + if constexpr (IsLoadChain) + return BasicBlock::reverse_iterator(ChainElem); + else + return BasicBlock::iterator(ChainElem); + }()); + auto BBItEnd = std::next([&] { + if constexpr (IsLoadChain) + return BasicBlock::reverse_iterator(ChainBegin); + else + return BasicBlock::iterator(ChainBegin); + }()); + + for (; BBIt != BBItEnd; ++BBIt) { + Instruction *I = &*BBIt; + + if (!I->mayReadOrWriteMemory()) + continue; + + // Loads can be reordered with other loads. + if (IsLoadChain && isa(I)) + continue; + + // Stores can be sunk below invariant loads. + if (!IsLoadChain && isInvariantLoad(I)) + continue; + + // If I is in the chain, we can tell whether it aliases ChainIt by checking + // what offset ChainIt accesses. This may be better than AA is able to do. + // + // We should really only have duplicate offsets for stores (the duplicate + // loads should be CSE'ed), but in case we have a duplicate load, we'll + // split the chain so we don't have to handle this case specially. + if (auto OffsetIt = ChainOffsets.find(&*BBIt); + OffsetIt != ChainOffsets.end()) { + if ((OffsetIt->second - ChainElemOffset) + .abs() + .ult(getTypeSizeInBytes(getLoadStoreType(I)))) { + LLVM_DEBUG({ + // Double check that AA also sees this alias. If not, we probably + // have a bug. + ModRefInfo MR = AA.getModRefInfo(I, MemoryLocation::get(ChainElem)); + assert(IsLoadChain ? isModSet(MR) : isModOrRefSet(MR)); + dbgs() << "LSV: Found alias in chain: " << *I << "\n"; + }); + return false; // We found an aliasing instruction; bail. + } else { + continue; // We're confident there's no alias. + } + } + + LLVM_DEBUG(dbgs() << "LSV: Querying AA for " << *I << "\n"); + ModRefInfo MR = AA.getModRefInfo(I, MemoryLocation::get(ChainElem)); + if (IsLoadChain ? isModSet(MR) : isModOrRefSet(MR)) { + LLVM_DEBUG(dbgs() << "LSV: Found alias in chain:\n" + << " Aliasing instruction:\n" + << " " << *I << '\n' + << " Aliased instruction and pointer:\n" + << " " << *ChainElem << '\n' + << " " << *getLoadStorePointerOperand(ChainElem) + << '\n'); + + return false; + } + } + return true; } static bool checkNoWrapFlags(Instruction *I, bool Signed) { @@ -394,10 +1120,14 @@ static bool checkIfSafeAddSequence(const APInt &IdxDiff, Instruction *AddOpA, unsigned MatchingOpIdxA, Instruction *AddOpB, unsigned MatchingOpIdxB, bool Signed) { - // If both OpA and OpB is an add with NSW/NUW and with - // one of the operands being the same, we can guarantee that the - // transformation is safe if we can prove that OpA won't overflow when - // IdxDiff added to the other operand of OpA. + LLVM_DEBUG(dbgs() << "LSV: checkIfSafeAddSequence IdxDiff=" << IdxDiff + << ", AddOpA=" << *AddOpA << ", MatchingOpIdxA=" + << MatchingOpIdxA << ", AddOpB=" << *AddOpB + << ", MatchingOpIdxB=" << MatchingOpIdxB + << ", Signed=" << Signed << "\n"); + // If both OpA and OpB are adds with NSW/NUW and with one of the operands + // being the same, we can guarantee that the transformation is safe if we can + // prove that OpA won't overflow when Ret added to the other operand of OpA. // For example: // %tmp7 = add nsw i32 %tmp2, %v0 // %tmp8 = sext i32 %tmp7 to i64 @@ -406,10 +1136,9 @@ // %tmp12 = add nsw i32 %tmp2, %tmp11 // %tmp13 = sext i32 %tmp12 to i64 // - // Both %tmp7 and %tmp2 has the nsw flag and the first operand - // is %tmp2. It's guaranteed that adding 1 to %tmp7 won't overflow - // because %tmp11 adds 1 to %v0 and both %tmp11 and %tmp12 has the - // nsw flag. + // Both %tmp7 and %tmp12 have the nsw flag and the first operand is %tmp2. + // It's guaranteed that adding 1 to %tmp7 won't overflow because %tmp11 adds + // 1 to %v0 and both %tmp11 and %tmp12 have the nsw flag. assert(AddOpA->getOpcode() == Instruction::Add && AddOpB->getOpcode() == Instruction::Add && checkNoWrapFlags(AddOpA, Signed) && checkNoWrapFlags(AddOpB, Signed)); @@ -460,24 +1189,26 @@ return false; } -bool Vectorizer::lookThroughComplexAddresses(Value *PtrA, Value *PtrB, - APInt PtrDelta, - unsigned Depth) const { +std::optional Vectorizer::lookThroughComplexAddresses(Value *PtrA, + Value *PtrB, + unsigned Depth) { + LLVM_DEBUG(dbgs() << "LSV: lookThroughComplexAddresses PtrA=" << *PtrA + << " PtrB=" << *PtrB << " Depth=" << Depth << "\n"); auto *GEPA = dyn_cast(PtrA); auto *GEPB = dyn_cast(PtrB); if (!GEPA || !GEPB) - return lookThroughSelects(PtrA, PtrB, PtrDelta, Depth); + return lookThroughSelects(PtrA, PtrB, Depth); // Look through GEPs after checking they're the same except for the last // index. if (GEPA->getNumOperands() != GEPB->getNumOperands() || GEPA->getPointerOperand() != GEPB->getPointerOperand()) - return false; + return std::nullopt; gep_type_iterator GTIA = gep_type_begin(GEPA); gep_type_iterator GTIB = gep_type_begin(GEPB); for (unsigned I = 0, E = GEPA->getNumIndices() - 1; I < E; ++I) { if (GTIA.getOperand() != GTIB.getOperand()) - return false; + return std::nullopt; ++GTIA; ++GTIB; } @@ -486,23 +1217,13 @@ Instruction *OpB = dyn_cast(GTIB.getOperand()); if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() || OpA->getType() != OpB->getType()) - return false; + return std::nullopt; - if (PtrDelta.isNegative()) { - if (PtrDelta.isMinSignedValue()) - return false; - PtrDelta.negate(); - std::swap(OpA, OpB); - } uint64_t Stride = DL.getTypeAllocSize(GTIA.getIndexedType()); - if (PtrDelta.urem(Stride) != 0) - return false; - unsigned IdxBitWidth = OpA->getType()->getScalarSizeInBits(); - APInt IdxDiff = PtrDelta.udiv(Stride).zext(IdxBitWidth); // Only look through a ZExt/SExt. if (!isa(OpA) && !isa(OpA)) - return false; + return std::nullopt; bool Signed = isa(OpA); @@ -510,7 +1231,22 @@ Value *ValA = OpA->getOperand(0); OpB = dyn_cast(OpB->getOperand(0)); if (!OpB || ValA->getType() != OpB->getType()) - return false; + return std::nullopt; + + const SCEV *OffsetSCEVA = SE.getSCEV(ValA); + const SCEV *OffsetSCEVB = SE.getSCEV(OpB); + const SCEV *IdxDiffSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA); + if (IdxDiffSCEV == SE.getCouldNotCompute()) + return std::nullopt; + + ConstantRange IdxDiffRange = SE.getSignedRange(IdxDiffSCEV); + if (!IdxDiffRange.isSingleElement()) { + return std::nullopt; + } + APInt IdxDiff = *IdxDiffRange.getSingleElement(); + + LLVM_DEBUG(dbgs() << "LSV: lookThroughComplexAddresses IdxDiff=" << IdxDiff + << "\n"); // Now we need to prove that adding IdxDiff to ValA won't overflow. bool Safe = false; @@ -529,10 +1265,9 @@ if (!Safe && OpA && OpA->getOpcode() == Instruction::Add && OpB->getOpcode() == Instruction::Add && checkNoWrapFlags(OpA, Signed) && checkNoWrapFlags(OpB, Signed)) { - // In the checks below a matching operand in OpA and OpB is - // an operand which is the same in those two instructions. - // Below we account for possible orders of the operands of - // these add instructions. + // In the checks below a matching operand in OpA and OpB is an operand which + // is the same in those two instructions. Below we account for possible + // orders of the operands of these add instructions. for (unsigned MatchingOpIdxA : {0, 1}) for (unsigned MatchingOpIdxB : {0, 1}) if (!Safe) @@ -543,804 +1278,266 @@ unsigned BitWidth = ValA->getType()->getScalarSizeInBits(); // Third attempt: - // If all set bits of IdxDiff or any higher order bit other than the sign bit - // are known to be zero in ValA, we can add Diff to it while guaranteeing no - // overflow of any sort. + // + // Assuming IdxDiff is positive: If all set bits of IdxDiff or any higher + // order bit other than the sign bit are known to be zero in ValA, we can add + // Diff to it while guaranteeing no overflow of any sort. + // + // If IdxDiff is negative, do the same, but swap ValA and ValB. if (!Safe) { + // When computing known bits, use the GEPs as context instructions, since + // they likely are in the same BB as the load/store. + Instruction *ContextInst = GEPA->comesBefore(GEPB) ? GEPB : GEPA; KnownBits Known(BitWidth); - computeKnownBits(ValA, Known, DL, 0, &AC, OpB, &DT); + computeKnownBits((IdxDiff.sge(0) ? ValA : OpB), Known, DL, 0, &AC, + ContextInst, &DT); APInt BitsAllowedToBeSet = Known.Zero.zext(IdxDiff.getBitWidth()); if (Signed) BitsAllowedToBeSet.clearBit(BitWidth - 1); - if (BitsAllowedToBeSet.ult(IdxDiff)) - return false; + if (BitsAllowedToBeSet.ult(IdxDiff.abs())) + return std::nullopt; + Safe = true; } - const SCEV *OffsetSCEVA = SE.getSCEV(ValA); - const SCEV *OffsetSCEVB = SE.getSCEV(OpB); - const SCEV *C = SE.getConstant(IdxDiff.trunc(BitWidth)); - const SCEV *X = SE.getAddExpr(OffsetSCEVA, C); - return X == OffsetSCEVB; + if (Safe) { + return IdxDiff * Stride; + } + return std::nullopt; } -bool Vectorizer::lookThroughSelects(Value *PtrA, Value *PtrB, - const APInt &PtrDelta, - unsigned Depth) const { +std::optional Vectorizer::lookThroughSelects(Value *PtrA, Value *PtrB, + unsigned Depth) { if (Depth++ == MaxDepth) - return false; + return std::nullopt; if (auto *SelectA = dyn_cast(PtrA)) { if (auto *SelectB = dyn_cast(PtrB)) { - return SelectA->getCondition() == SelectB->getCondition() && - areConsecutivePointers(SelectA->getTrueValue(), - SelectB->getTrueValue(), PtrDelta, Depth) && - areConsecutivePointers(SelectA->getFalseValue(), - SelectB->getFalseValue(), PtrDelta, Depth); - } - } - return false; -} - -void Vectorizer::reorder(Instruction *I) { - SmallPtrSet InstructionsToMove; - SmallVector Worklist; - - Worklist.push_back(I); - while (!Worklist.empty()) { - Instruction *IW = Worklist.pop_back_val(); - int NumOperands = IW->getNumOperands(); - for (int i = 0; i < NumOperands; i++) { - Instruction *IM = dyn_cast(IW->getOperand(i)); - if (!IM || IM->getOpcode() == Instruction::PHI) - continue; - - // If IM is in another BB, no need to move it, because this pass only - // vectorizes instructions within one BB. - if (IM->getParent() != I->getParent()) - continue; - - if (!IM->comesBefore(I)) { - InstructionsToMove.insert(IM); - Worklist.push_back(IM); + if (SelectA->getCondition() != SelectB->getCondition()) { + return std::nullopt; } + LLVM_DEBUG(dbgs() << "LSV: lookThroughSelects, PtrA=" << *PtrA + << ", PtrB=" << *PtrB << ", Depth=" << Depth << "\n"); + std::optional TrueDiff = getConstantOffset( + SelectA->getTrueValue(), SelectB->getTrueValue(), Depth); + if (!TrueDiff.has_value()) + return std::nullopt; + std::optional FalseDiff = getConstantOffset( + SelectA->getFalseValue(), SelectB->getFalseValue(), Depth); + if (TrueDiff == FalseDiff) + return TrueDiff; } } - - // All instructions to move should follow I. Start from I, not from begin(). - for (auto BBI = I->getIterator(), E = I->getParent()->end(); BBI != E; - ++BBI) { - if (!InstructionsToMove.count(&*BBI)) - continue; - Instruction *IM = &*BBI; - --BBI; - IM->removeFromParent(); - IM->insertBefore(I); - } + return std::nullopt; } -std::pair -Vectorizer::getBoundaryInstrs(ArrayRef Chain) { - Instruction *C0 = Chain[0]; - BasicBlock::iterator FirstInstr = C0->getIterator(); - BasicBlock::iterator LastInstr = C0->getIterator(); +EquivalenceClassMap +Vectorizer::collectEquivalenceClasses(BasicBlock::iterator begin, + BasicBlock::iterator end) { + EquivalenceClassMap Ret; - BasicBlock *BB = C0->getParent(); - unsigned NumFound = 0; - for (Instruction &I : *BB) { - if (!is_contained(Chain, &I)) + auto getUnderlyingObject = [](const Value *Ptr) -> const Value * { + const Value *ObjPtr = llvm::getUnderlyingObject(Ptr); + if (const auto *Sel = dyn_cast(ObjPtr)) { + // The select's themselves are distinct instructions even if they share + // the same condition and evaluate to consecutive pointers for true and + // false values of the condition. Therefore using the select's themselves + // for grouping instructions would put consecutive accesses into different + // lists and they won't be even checked for being consecutive, and won't + // be vectorized. + return Sel->getCondition(); + } + return ObjPtr; + }; + + for (Instruction &I : make_range(begin, end)) { + auto *LI = dyn_cast(&I); + auto *SI = dyn_cast(&I); + if (!LI && !SI) continue; - ++NumFound; - if (NumFound == 1) { - FirstInstr = I.getIterator(); - } - if (NumFound == Chain.size()) { - LastInstr = I.getIterator(); - break; - } - } + assert((LI != nullptr) != (SI != nullptr)); - // Range is [first, last). - return std::make_pair(FirstInstr, ++LastInstr); -} - -void Vectorizer::eraseInstructions(ArrayRef Chain) { - SmallVector Instrs; - for (Instruction *I : Chain) { - Value *PtrOperand = getLoadStorePointerOperand(I); - assert(PtrOperand && "Instruction must have a pointer operand."); - Instrs.push_back(I); - if (GetElementPtrInst *GEP = dyn_cast(PtrOperand)) - Instrs.push_back(GEP); - } - - // Erase instructions. - for (Instruction *I : Instrs) - if (I->use_empty()) - I->eraseFromParent(); -} - -std::pair, ArrayRef> -Vectorizer::splitOddVectorElts(ArrayRef Chain, - unsigned ElementSizeBits) { - unsigned ElementSizeBytes = ElementSizeBits / 8; - unsigned SizeBytes = ElementSizeBytes * Chain.size(); - unsigned LeftBytes = (SizeBytes - (SizeBytes % 4)); - // If we're already a multiple of 4 bytes or the whole chain is shorter than 4 - // bytes, then try splitting down on power-of-2 boundary. - if (LeftBytes == SizeBytes || LeftBytes == 0) - LeftBytes = PowerOf2Ceil(SizeBytes) / 2; - unsigned NumLeft = LeftBytes / ElementSizeBytes; - if (NumLeft == 0) - NumLeft = 1; - LLVM_DEBUG(dbgs() << "LSV: Splitting the chain into " << NumLeft << "+" - << Chain.size() - NumLeft << " elements\n"); - return std::make_pair(Chain.slice(0, NumLeft), Chain.slice(NumLeft)); -} - -ArrayRef -Vectorizer::getVectorizablePrefix(ArrayRef Chain) { - // These are in BB order, unlike Chain, which is in address order. - SmallVector MemoryInstrs; - SmallVector ChainInstrs; - - bool IsLoadChain = isa(Chain[0]); - LLVM_DEBUG({ - for (Instruction *I : Chain) { - if (IsLoadChain) - assert(isa(I) && - "All elements of Chain must be loads, or all must be stores."); - else - assert(isa(I) && - "All elements of Chain must be loads, or all must be stores."); - } - }); - - for (Instruction &I : make_range(getBoundaryInstrs(Chain))) { - if ((isa(I) || isa(I)) && is_contained(Chain, &I)) { - ChainInstrs.push_back(&I); + if ((LI && !LI->isSimple()) || (SI && !SI->isSimple())) continue; - } - if (!isGuaranteedToTransferExecutionToSuccessor(&I)) { - LLVM_DEBUG(dbgs() << "LSV: Found instruction may not transfer execution: " - << I << '\n'); - break; - } - if (I.mayReadOrWriteMemory()) - MemoryInstrs.push_back(&I); + + if ((LI && !TTI.isLegalToVectorizeLoad(LI)) || + (SI && !TTI.isLegalToVectorizeStore(SI))) + continue; + + Type *Ty = getLoadStoreType(&I); + if (!VectorType::isValidElementType(Ty->getScalarType())) + continue; + + // Skip weird non-byte sizes. They probably aren't worth the effort of + // handling correctly. + unsigned TySize = DL.getTypeSizeInBits(Ty); + if ((TySize % 8) != 0) + continue; + + // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain + // functions are currently using an integer type for the vectorized + // load/store, and does not support casting between the integer type and a + // vector of pointers (e.g. i64 to <2 x i16*>) + if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy()) + continue; + + Value *Ptr = getLoadStorePointerOperand(&I); + unsigned AS = Ptr->getType()->getPointerAddressSpace(); + unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); + + unsigned VF = VecRegSize / TySize; + VectorType *VecTy = dyn_cast(Ty); + + // Only handle power-of-two sized elements. + if ((!VecTy && !isPowerOf2_32(DL.getTypeSizeInBits(Ty))) || + (VecTy && !isPowerOf2_32(DL.getTypeSizeInBits(VecTy->getScalarType())))) + continue; + + // No point in looking at these if they're too big to vectorize. + if (TySize > VecRegSize / 2 || + (VecTy && TTI.getLoadVectorFactor(VF, TySize, TySize / 8, VecTy) == 0)) + continue; + + Ret[{getUnderlyingObject(Ptr), AS, + DL.getTypeSizeInBits(getScalarTy(getLoadStoreType(&I))), + /*IsLoad=*/LI != nullptr}] + .push_back(&I); } - // Loop until we find an instruction in ChainInstrs that we can't vectorize. - unsigned ChainInstrIdx = 0; - Instruction *BarrierMemoryInstr = nullptr; + return Ret; +} - for (unsigned E = ChainInstrs.size(); ChainInstrIdx < E; ++ChainInstrIdx) { - Instruction *ChainInstr = ChainInstrs[ChainInstrIdx]; +std::vector Vectorizer::gatherChains(ArrayRef Instrs) { + if (Instrs.empty()) + return {}; - // If a barrier memory instruction was found, chain instructions that follow - // will not be added to the valid prefix. - if (BarrierMemoryInstr && BarrierMemoryInstr->comesBefore(ChainInstr)) - break; + unsigned AS = getLoadStoreAddressSpace(Instrs[0]); + unsigned ASPtrBits = DL.getIndexSizeInBits(AS); - // Check (in BB order) if any instruction prevents ChainInstr from being - // vectorized. Find and store the first such "conflicting" instruction. - for (Instruction *MemInstr : MemoryInstrs) { - // If a barrier memory instruction was found, do not check past it. - if (BarrierMemoryInstr && BarrierMemoryInstr->comesBefore(MemInstr)) - break; +#ifndef NDEBUG + // Check that Instrs is in BB order and all have the same addr space. + for (size_t I = 1; I < Instrs.size(); ++I) { + assert(Instrs[I - 1]->comesBefore(Instrs[I])); + assert(getLoadStoreAddressSpace(Instrs[I]) == AS); + } +#endif - auto *MemLoad = dyn_cast(MemInstr); - auto *ChainLoad = dyn_cast(ChainInstr); - if (MemLoad && ChainLoad) - continue; + // Machinery to build an LRU-hashtable of Chains. + // + // (Ideally this could be done with MapVector, but as currently implemented, + // moving an element to the front of a MapVector is O(n).) + struct InstrListElem : ilist_node, + std::pair { + explicit InstrListElem(Instruction *I) + : std::pair(I, {}) {} + }; + struct InstrListElemDenseMapInfo { + using PtrInfo = DenseMapInfo; + using IInfo = DenseMapInfo; + static inline InstrListElem *getEmptyKey() { + return PtrInfo::getEmptyKey(); + } + static inline InstrListElem *getTombstoneKey() { + return PtrInfo::getTombstoneKey(); + } + static unsigned getHashValue(const InstrListElem *E) { + return IInfo::getHashValue(E->first); + } + static bool isEqual(const InstrListElem *A, const InstrListElem *B) { + if (A == getEmptyKey() || B == getEmptyKey()) + return A == getEmptyKey() && B == getEmptyKey(); + if (A == getTombstoneKey() || B == getTombstoneKey()) + return A == getTombstoneKey() && B == getTombstoneKey(); + return IInfo::isEqual(A->first, B->first); + } + }; + SpecificBumpPtrAllocator Allocator; + simple_ilist LRU; + DenseSet Chains; - // We can ignore the alias if the we have a load store pair and the load - // is known to be invariant. The load cannot be clobbered by the store. - auto IsInvariantLoad = [](const LoadInst *LI) -> bool { - return LI->hasMetadata(LLVMContext::MD_invariant_load); - }; + // Compare each instruction in `instrs` to leader of the N most recently-used + // chains. This limits the O(n^2) behavior of this pass while also allowing + // us to build arbitrarily long chains. + for (Instruction *I : Instrs) { + constexpr size_t MaxChainsToTry = 64; - if (IsLoadChain) { - // We can ignore the alias as long as the load comes before the store, - // because that means we won't be moving the load past the store to - // vectorize it (the vectorized load is inserted at the location of the - // first load in the chain). - if (ChainInstr->comesBefore(MemInstr) || - (ChainLoad && IsInvariantLoad(ChainLoad))) - continue; - } else { - // Same case, but in reverse. - if (MemInstr->comesBefore(ChainInstr) || - (MemLoad && IsInvariantLoad(MemLoad))) - continue; - } - - ModRefInfo MR = - AA.getModRefInfo(MemInstr, MemoryLocation::get(ChainInstr)); - if (IsLoadChain ? isModSet(MR) : isModOrRefSet(MR)) { - LLVM_DEBUG({ - dbgs() << "LSV: Found alias:\n" - " Aliasing instruction:\n" - << " " << *MemInstr << '\n' - << " Aliased instruction and pointer:\n" - << " " << *ChainInstr << '\n' - << " " << *getLoadStorePointerOperand(ChainInstr) << '\n'; - }); - // Save this aliasing memory instruction as a barrier, but allow other - // instructions that precede the barrier to be vectorized with this one. - BarrierMemoryInstr = MemInstr; + bool MatchFound = false; + auto ChainIter = LRU.begin(); + for (int J = 0; J < MaxChainsToTry && ChainIter != LRU.end(); + ++J, ++ChainIter) { + std::optional Offset = + getConstantOffset(getLoadStorePointerOperand(ChainIter->first), + getLoadStorePointerOperand(I)); + if (Offset.has_value()) { + // `Offset` might not have the expected number of bits, if e.g. AS has a + // different number of bits than opaque pointers. + ChainIter->second.push_back( + ChainElem{I, Offset.value().sextOrTrunc(ASPtrBits)}); + // Move ChainIter to the front of the LRU list. + LRU.remove(*ChainIter); + LRU.push_front(*ChainIter); + MatchFound = true; break; } } - // Continue the search only for store chains, since vectorizing stores that - // precede an aliasing load is valid. Conversely, vectorizing loads is valid - // up to an aliasing store, but should not pull loads from further down in - // the basic block. - if (IsLoadChain && BarrierMemoryInstr) { - // The BarrierMemoryInstr is a store that precedes ChainInstr. - assert(BarrierMemoryInstr->comesBefore(ChainInstr)); - break; + + if (!MatchFound) { + APInt ZeroOffset(ASPtrBits, 0); + InstrListElem *E = new (Allocator.Allocate()) InstrListElem(I); + E->second.push_back(ChainElem{I, ZeroOffset}); + LRU.push_front(*E); + Chains.insert(E); } } - // Find the largest prefix of Chain whose elements are all in - // ChainInstrs[0, ChainInstrIdx). This is the largest vectorizable prefix of - // Chain. (Recall that Chain is in address order, but ChainInstrs is in BB - // order.) - SmallPtrSet VectorizableChainInstrs( - ChainInstrs.begin(), ChainInstrs.begin() + ChainInstrIdx); - unsigned ChainIdx = 0; - for (unsigned ChainLen = Chain.size(); ChainIdx < ChainLen; ++ChainIdx) { - if (!VectorizableChainInstrs.count(Chain[ChainIdx])) - break; + std::vector Ret; + Ret.reserve(Chains.size()); + // Iterate over LRU rather than Chains so the order is deterministic. + for (auto &E : LRU) { + if (E.second.size() > 1) { + Ret.push_back(std::move(E.second)); + } } - return Chain.slice(0, ChainIdx); + return Ret; } -static ChainID getChainID(const Value *Ptr) { - const Value *ObjPtr = getUnderlyingObject(Ptr); - if (const auto *Sel = dyn_cast(ObjPtr)) { - // The select's themselves are distinct instructions even if they share the - // same condition and evaluate to consecutive pointers for true and false - // values of the condition. Therefore using the select's themselves for - // grouping instructions would put consecutive accesses into different lists - // and they won't be even checked for being consecutive, and won't be - // vectorized. - return Sel->getCondition(); +std::optional Vectorizer::getConstantOffset(Value *PtrA, Value *PtrB, + unsigned Depth) { + LLVM_DEBUG(dbgs() << "LSV: getConstantOffset, PtrA=" << *PtrA + << ", PtrB=" << *PtrB << ", Depth=" << Depth << "\n"); + unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(PtrA->getType()); + APInt OffsetA(OffsetBitWidth, 0); + APInt OffsetB(OffsetBitWidth, 0); + PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); + PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); + unsigned NewPtrBitWidth = DL.getTypeStoreSizeInBits(PtrA->getType()); + if (NewPtrBitWidth != DL.getTypeStoreSizeInBits(PtrB->getType())) + return std::nullopt; + + // If we have to shrink the pointer, stripAndAccumulateInBoundsConstantOffsets + // should properly handle a possible overflow and the value should fit into + // the smallest data type used in the cast/gep chain. + assert(OffsetA.getSignificantBits() <= NewPtrBitWidth && + OffsetB.getSignificantBits() <= NewPtrBitWidth); + + OffsetA = OffsetA.sextOrTrunc(NewPtrBitWidth); + OffsetB = OffsetB.sextOrTrunc(NewPtrBitWidth); + if (PtrA == PtrB) { + return OffsetB - OffsetA; } - return ObjPtr; -} - -std::pair -Vectorizer::collectInstructions(BasicBlock *BB) { - InstrListMap LoadRefs; - InstrListMap StoreRefs; - - for (Instruction &I : *BB) { - if (!I.mayReadOrWriteMemory()) - continue; - - if (LoadInst *LI = dyn_cast(&I)) { - if (!LI->isSimple()) - continue; - - // Skip if it's not legal. - if (!TTI.isLegalToVectorizeLoad(LI)) - continue; - - Type *Ty = LI->getType(); - if (!VectorType::isValidElementType(Ty->getScalarType())) - continue; - - // Skip weird non-byte sizes. They probably aren't worth the effort of - // handling correctly. - unsigned TySize = DL.getTypeSizeInBits(Ty); - if ((TySize % 8) != 0) - continue; - - // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain - // functions are currently using an integer type for the vectorized - // load/store, and does not support casting between the integer type and a - // vector of pointers (e.g. i64 to <2 x i16*>) - if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy()) - continue; - - Value *Ptr = LI->getPointerOperand(); - unsigned AS = Ptr->getType()->getPointerAddressSpace(); - unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); - - unsigned VF = VecRegSize / TySize; - VectorType *VecTy = dyn_cast(Ty); - - // No point in looking at these if they're too big to vectorize. - if (TySize > VecRegSize / 2 || - (VecTy && TTI.getLoadVectorFactor(VF, TySize, TySize / 8, VecTy) == 0)) - continue; - - // Save the load locations. - const ChainID ID = getChainID(Ptr); - LoadRefs[ID].push_back(LI); - } else if (StoreInst *SI = dyn_cast(&I)) { - if (!SI->isSimple()) - continue; - - // Skip if it's not legal. - if (!TTI.isLegalToVectorizeStore(SI)) - continue; - - Type *Ty = SI->getValueOperand()->getType(); - if (!VectorType::isValidElementType(Ty->getScalarType())) - continue; - - // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain - // functions are currently using an integer type for the vectorized - // load/store, and does not support casting between the integer type and a - // vector of pointers (e.g. i64 to <2 x i16*>) - if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy()) - continue; - - // Skip weird non-byte sizes. They probably aren't worth the effort of - // handling correctly. - unsigned TySize = DL.getTypeSizeInBits(Ty); - if ((TySize % 8) != 0) - continue; - - Value *Ptr = SI->getPointerOperand(); - unsigned AS = Ptr->getType()->getPointerAddressSpace(); - unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); - - unsigned VF = VecRegSize / TySize; - VectorType *VecTy = dyn_cast(Ty); - - // No point in looking at these if they're too big to vectorize. - if (TySize > VecRegSize / 2 || - (VecTy && TTI.getStoreVectorFactor(VF, TySize, TySize / 8, VecTy) == 0)) - continue; - - // Save store location. - const ChainID ID = getChainID(Ptr); - StoreRefs[ID].push_back(SI); - } - } - - return {LoadRefs, StoreRefs}; -} - -bool Vectorizer::vectorizeChains(InstrListMap &Map) { - bool Changed = false; - - for (const std::pair &Chain : Map) { - unsigned Size = Chain.second.size(); - if (Size < 2) - continue; - - LLVM_DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n"); - - // Process the stores in chunks of 64. - for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) { - unsigned Len = std::min(CE - CI, 64); - ArrayRef Chunk(&Chain.second[CI], Len); - Changed |= vectorizeInstructions(Chunk); - } - } - - return Changed; -} - -bool Vectorizer::vectorizeInstructions(ArrayRef Instrs) { - LLVM_DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size() - << " instructions.\n"); - SmallVector Heads, Tails; - int ConsecutiveChain[64]; - - // Do a quadratic search on all of the given loads/stores and find all of the - // pairs of loads/stores that follow each other. - for (int i = 0, e = Instrs.size(); i < e; ++i) { - ConsecutiveChain[i] = -1; - for (int j = e - 1; j >= 0; --j) { - if (i == j) - continue; - - if (isConsecutiveAccess(Instrs[i], Instrs[j])) { - if (ConsecutiveChain[i] != -1) { - int CurDistance = std::abs(ConsecutiveChain[i] - i); - int NewDistance = std::abs(ConsecutiveChain[i] - j); - if (j < i || NewDistance > CurDistance) - continue; // Should not insert. - } - - Tails.push_back(j); - Heads.push_back(i); - ConsecutiveChain[i] = j; - } - } - } - - bool Changed = false; - SmallPtrSet InstructionsProcessed; - - for (int Head : Heads) { - if (InstructionsProcessed.count(Instrs[Head])) - continue; - bool LongerChainExists = false; - for (unsigned TIt = 0; TIt < Tails.size(); TIt++) - if (Head == Tails[TIt] && - !InstructionsProcessed.count(Instrs[Heads[TIt]])) { - LongerChainExists = true; - break; - } - if (LongerChainExists) - continue; - - // We found an instr that starts a chain. Now follow the chain and try to - // vectorize it. - SmallVector Operands; - int I = Head; - while (I != -1 && (is_contained(Tails, I) || is_contained(Heads, I))) { - if (InstructionsProcessed.count(Instrs[I])) - break; - - Operands.push_back(Instrs[I]); - I = ConsecutiveChain[I]; - } - - bool Vectorized = false; - if (isa(*Operands.begin())) - Vectorized = vectorizeLoadChain(Operands, &InstructionsProcessed); - else - Vectorized = vectorizeStoreChain(Operands, &InstructionsProcessed); - - Changed |= Vectorized; - } - - return Changed; -} - -bool Vectorizer::vectorizeStoreChain( - ArrayRef Chain, - SmallPtrSet *InstructionsProcessed) { - StoreInst *S0 = cast(Chain[0]); - - // If the vector has an int element, default to int for the whole store. - Type *StoreTy = nullptr; - for (Instruction *I : Chain) { - StoreTy = cast(I)->getValueOperand()->getType(); - if (StoreTy->isIntOrIntVectorTy()) - break; - - if (StoreTy->isPtrOrPtrVectorTy()) { - StoreTy = Type::getIntNTy(F.getParent()->getContext(), - DL.getTypeSizeInBits(StoreTy)); - break; - } - } - assert(StoreTy && "Failed to find store type"); - - unsigned Sz = DL.getTypeSizeInBits(StoreTy); - unsigned AS = S0->getPointerAddressSpace(); - unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); - unsigned VF = VecRegSize / Sz; - unsigned ChainSize = Chain.size(); - Align Alignment = S0->getAlign(); - - if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) { - InstructionsProcessed->insert(Chain.begin(), Chain.end()); - return false; - } - - ArrayRef NewChain = getVectorizablePrefix(Chain); - if (NewChain.empty()) { - // No vectorization possible. - InstructionsProcessed->insert(Chain.begin(), Chain.end()); - return false; - } - if (NewChain.size() == 1) { - // Failed after the first instruction. Discard it and try the smaller chain. - InstructionsProcessed->insert(NewChain.front()); - return false; - } - - // Update Chain to the valid vectorizable subchain. - Chain = NewChain; - ChainSize = Chain.size(); - - // Check if it's legal to vectorize this chain. If not, split the chain and - // try again. - unsigned EltSzInBytes = Sz / 8; - unsigned SzInBytes = EltSzInBytes * ChainSize; - - FixedVectorType *VecTy; - auto *VecStoreTy = dyn_cast(StoreTy); - if (VecStoreTy) - VecTy = FixedVectorType::get(StoreTy->getScalarType(), - Chain.size() * VecStoreTy->getNumElements()); - else - VecTy = FixedVectorType::get(StoreTy, Chain.size()); - - // If it's more than the max vector size or the target has a better - // vector factor, break it into two pieces. - unsigned TargetVF = TTI.getStoreVectorFactor(VF, Sz, SzInBytes, VecTy); - if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) { - LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor." - " Creating two separate arrays.\n"); - bool Vectorized = false; - Vectorized |= - vectorizeStoreChain(Chain.slice(0, TargetVF), InstructionsProcessed); - Vectorized |= - vectorizeStoreChain(Chain.slice(TargetVF), InstructionsProcessed); - return Vectorized; - } - - LLVM_DEBUG({ - dbgs() << "LSV: Stores to vectorize:\n"; - for (Instruction *I : Chain) - dbgs() << " " << *I << "\n"; - }); - - // We won't try again to vectorize the elements of the chain, regardless of - // whether we succeed below. - InstructionsProcessed->insert(Chain.begin(), Chain.end()); - - // If the store is going to be misaligned, don't vectorize it. - unsigned RelativeSpeed; - if (accessIsMisaligned(SzInBytes, AS, Alignment, RelativeSpeed)) { - if (S0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) { - unsigned SpeedBefore; - accessIsMisaligned(EltSzInBytes, AS, Alignment, SpeedBefore); - if (SpeedBefore > RelativeSpeed) - return false; - - auto Chains = splitOddVectorElts(Chain, Sz); - bool Vectorized = false; - Vectorized |= vectorizeStoreChain(Chains.first, InstructionsProcessed); - Vectorized |= vectorizeStoreChain(Chains.second, InstructionsProcessed); - return Vectorized; - } - - Align NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(), - Align(StackAdjustedAlignment), - DL, S0, nullptr, &DT); - if (NewAlign >= Alignment) - Alignment = NewAlign; - else - return false; - } - - if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment, AS)) { - auto Chains = splitOddVectorElts(Chain, Sz); - bool Vectorized = false; - Vectorized |= vectorizeStoreChain(Chains.first, InstructionsProcessed); - Vectorized |= vectorizeStoreChain(Chains.second, InstructionsProcessed); - return Vectorized; - } - - BasicBlock::iterator First, Last; - std::tie(First, Last) = getBoundaryInstrs(Chain); - Builder.SetInsertPoint(&*Last); - - Value *Vec = PoisonValue::get(VecTy); - - if (VecStoreTy) { - unsigned VecWidth = VecStoreTy->getNumElements(); - for (unsigned I = 0, E = Chain.size(); I != E; ++I) { - StoreInst *Store = cast(Chain[I]); - for (unsigned J = 0, NE = VecStoreTy->getNumElements(); J != NE; ++J) { - unsigned NewIdx = J + I * VecWidth; - Value *Extract = Builder.CreateExtractElement(Store->getValueOperand(), - Builder.getInt32(J)); - if (Extract->getType() != StoreTy->getScalarType()) - Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType()); - - Value *Insert = - Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(NewIdx)); - Vec = Insert; - } - } - } else { - for (unsigned I = 0, E = Chain.size(); I != E; ++I) { - StoreInst *Store = cast(Chain[I]); - Value *Extract = Store->getValueOperand(); - if (Extract->getType() != StoreTy->getScalarType()) - Extract = - Builder.CreateBitOrPointerCast(Extract, StoreTy->getScalarType()); - - Value *Insert = - Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(I)); - Vec = Insert; - } - } - - StoreInst *SI = Builder.CreateAlignedStore( - Vec, - Builder.CreateBitCast(S0->getPointerOperand(), VecTy->getPointerTo(AS)), - Alignment); - propagateMetadata(SI, Chain); - - eraseInstructions(Chain); - ++NumVectorInstructions; - NumScalarsVectorized += Chain.size(); - return true; -} - -bool Vectorizer::vectorizeLoadChain( - ArrayRef Chain, - SmallPtrSet *InstructionsProcessed) { - LoadInst *L0 = cast(Chain[0]); - - // If the vector has an int element, default to int for the whole load. - Type *LoadTy = nullptr; - for (const auto &V : Chain) { - LoadTy = cast(V)->getType(); - if (LoadTy->isIntOrIntVectorTy()) - break; - - if (LoadTy->isPtrOrPtrVectorTy()) { - LoadTy = Type::getIntNTy(F.getParent()->getContext(), - DL.getTypeSizeInBits(LoadTy)); - break; - } - } - assert(LoadTy && "Can't determine LoadInst type from chain"); - - unsigned Sz = DL.getTypeSizeInBits(LoadTy); - unsigned AS = L0->getPointerAddressSpace(); - unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); - unsigned VF = VecRegSize / Sz; - unsigned ChainSize = Chain.size(); - Align Alignment = L0->getAlign(); - - if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) { - InstructionsProcessed->insert(Chain.begin(), Chain.end()); - return false; - } - - ArrayRef NewChain = getVectorizablePrefix(Chain); - if (NewChain.empty()) { - // No vectorization possible. - InstructionsProcessed->insert(Chain.begin(), Chain.end()); - return false; - } - if (NewChain.size() == 1) { - // Failed after the first instruction. Discard it and try the smaller chain. - InstructionsProcessed->insert(NewChain.front()); - return false; - } - - // Update Chain to the valid vectorizable subchain. - Chain = NewChain; - ChainSize = Chain.size(); - - // Check if it's legal to vectorize this chain. If not, split the chain and - // try again. - unsigned EltSzInBytes = Sz / 8; - unsigned SzInBytes = EltSzInBytes * ChainSize; - VectorType *VecTy; - auto *VecLoadTy = dyn_cast(LoadTy); - if (VecLoadTy) - VecTy = FixedVectorType::get(LoadTy->getScalarType(), - Chain.size() * VecLoadTy->getNumElements()); - else - VecTy = FixedVectorType::get(LoadTy, Chain.size()); - - // If it's more than the max vector size or the target has a better - // vector factor, break it into two pieces. - unsigned TargetVF = TTI.getLoadVectorFactor(VF, Sz, SzInBytes, VecTy); - if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) { - LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor." - " Creating two separate arrays.\n"); - bool Vectorized = false; - Vectorized |= - vectorizeLoadChain(Chain.slice(0, TargetVF), InstructionsProcessed); - Vectorized |= - vectorizeLoadChain(Chain.slice(TargetVF), InstructionsProcessed); - return Vectorized; - } - - // We won't try again to vectorize the elements of the chain, regardless of - // whether we succeed below. - InstructionsProcessed->insert(Chain.begin(), Chain.end()); - - // If the load is going to be misaligned, don't vectorize it. - unsigned RelativeSpeed; - if (accessIsMisaligned(SzInBytes, AS, Alignment, RelativeSpeed)) { - if (L0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) { - unsigned SpeedBefore; - accessIsMisaligned(EltSzInBytes, AS, Alignment, SpeedBefore); - if (SpeedBefore > RelativeSpeed) - return false; - - auto Chains = splitOddVectorElts(Chain, Sz); - bool Vectorized = false; - Vectorized |= vectorizeLoadChain(Chains.first, InstructionsProcessed); - Vectorized |= vectorizeLoadChain(Chains.second, InstructionsProcessed); - return Vectorized; - } - - Align NewAlign = getOrEnforceKnownAlignment(L0->getPointerOperand(), - Align(StackAdjustedAlignment), - DL, L0, nullptr, &DT); - if (NewAlign >= Alignment) - Alignment = NewAlign; - else - return false; - } - - if (!TTI.isLegalToVectorizeLoadChain(SzInBytes, Alignment, AS)) { - auto Chains = splitOddVectorElts(Chain, Sz); - bool Vectorized = false; - Vectorized |= vectorizeLoadChain(Chains.first, InstructionsProcessed); - Vectorized |= vectorizeLoadChain(Chains.second, InstructionsProcessed); - return Vectorized; - } - - LLVM_DEBUG({ - dbgs() << "LSV: Loads to vectorize:\n"; - for (Instruction *I : Chain) - I->dump(); - }); - - // getVectorizablePrefix already computed getBoundaryInstrs. The value of - // Last may have changed since then, but the value of First won't have. If it - // matters, we could compute getBoundaryInstrs only once and reuse it here. - BasicBlock::iterator First, Last; - std::tie(First, Last) = getBoundaryInstrs(Chain); - Builder.SetInsertPoint(&*First); - - Value *Bitcast = - Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS)); - LoadInst *LI = - Builder.CreateAlignedLoad(VecTy, Bitcast, MaybeAlign(Alignment)); - propagateMetadata(LI, Chain); - - for (unsigned I = 0, E = Chain.size(); I != E; ++I) { - Value *CV = Chain[I]; - Value *V; - if (VecLoadTy) { - // Extract a subvector using shufflevector. - unsigned VecWidth = VecLoadTy->getNumElements(); - auto Mask = - llvm::to_vector<8>(llvm::seq(I * VecWidth, (I + 1) * VecWidth)); - V = Builder.CreateShuffleVector(LI, Mask, CV->getName()); - } else { - V = Builder.CreateExtractElement(LI, Builder.getInt32(I), CV->getName()); - } - - if (V->getType() != CV->getType()) { - V = Builder.CreateBitOrPointerCast(V, CV->getType()); - } - - // Replace the old instruction. - CV->replaceAllUsesWith(V); - } - - // Since we might have opaque pointers we might end up using the pointer - // operand of the first load (wrt. memory loaded) for the vector load. Since - // this first load might not be the first in the block we potentially need to - // reorder the pointer operand (and its operands). If we have a bitcast though - // it might be before the load and should be the reorder start instruction. - // "Might" because for opaque pointers the "bitcast" is just the first loads - // pointer operand, as oppposed to something we inserted at the right position - // ourselves. - Instruction *BCInst = dyn_cast(Bitcast); - reorder((BCInst && BCInst != L0->getPointerOperand()) ? BCInst : LI); - - eraseInstructions(Chain); - - ++NumVectorInstructions; - NumScalarsVectorized += Chain.size(); - return true; -} - -bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, - Align Alignment, unsigned &RelativeSpeed) { - RelativeSpeed = 0; - if (Alignment.value() % SzInBytes == 0) - return false; - - bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(), - SzInBytes * 8, AddressSpace, - Alignment, &RelativeSpeed); - LLVM_DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows - << " with relative speed = " << RelativeSpeed << '\n';); - return !Allows || !RelativeSpeed; + + // Try to compute B - A. + const SCEV *DistScev = SE.getMinusSCEV(SE.getSCEV(PtrB), SE.getSCEV(PtrA)); + if (DistScev != SE.getCouldNotCompute()) { + LLVM_DEBUG(dbgs() << "LSV: SCEV PtrB - PtrA =" << *DistScev << "\n"); + ConstantRange DistRange = SE.getSignedRange(DistScev); + if (DistRange.isSingleElement()) { + return OffsetB - OffsetA + *DistRange.getSingleElement(); + } + } + std::optional Diff = lookThroughComplexAddresses(PtrA, PtrB, Depth); + if (Diff.has_value()) { + return OffsetB - OffsetA + Diff->sext(OffsetB.getBitWidth()); + } + return std::nullopt; } diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll --- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll +++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll @@ -112,8 +112,7 @@ ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: s_ashr_i32 s3, s2, 31 ; CHECK-NEXT: s_lshl_b64 s[0:1], s[2:3], 3 -; CHECK-NEXT: v_mov_b32_e32 v0, s8 -; CHECK-NEXT: v_mov_b32_e32 v1, s9 +; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1] ; CHECK-NEXT: s_add_u32 s0, s4, s0 ; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] ; CHECK-NEXT: s_addc_u32 s1, s5, s1 diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll @@ -279,9 +279,10 @@ ; Make sure we don't think the alignment will increase if the base address isn't an alloca define void @private_store_2xi16_align2_not_alloca(ptr addrspace(5) %p, ptr addrspace(5) %r) #0 { ; CHECK-LABEL: @private_store_2xi16_align2_not_alloca( -; CHECK-NEXT: [[GEP_R:%.*]] = getelementptr i16, ptr addrspace(5) [[R:%.*]], i32 1 -; CHECK-NEXT: store i16 1, ptr addrspace(5) [[R]], align 2 -; CHECK-NEXT: store i16 2, ptr addrspace(5) [[GEP_R]], align 2 +; ALIGNED-NEXT: [[GEP_R:%.*]] = getelementptr i16, ptr addrspace(5) [[R:%.*]], i32 1 +; ALIGNED-NEXT: store i16 1, ptr addrspace(5) [[R]], align 2 +; ALIGNED-NEXT: store i16 2, ptr addrspace(5) [[GEP_R]], align 2 +; UNALIGNED-NEXT:store <2 x i16> ; CHECK-NEXT: ret void ; %gep.r = getelementptr i16, ptr addrspace(5) %r, i32 1 @@ -309,11 +310,12 @@ define i32 @private_load_2xi16_align2_not_alloca(ptr addrspace(5) %p) #0 { ; CHECK-LABEL: @private_load_2xi16_align2_not_alloca( -; CHECK-NEXT: [[GEP_P:%.*]] = getelementptr i16, ptr addrspace(5) [[P:%.*]], i64 1 -; CHECK-NEXT: [[P_0:%.*]] = load i16, ptr addrspace(5) [[P]], align 2 -; CHECK-NEXT: [[P_1:%.*]] = load i16, ptr addrspace(5) [[GEP_P]], align 2 -; CHECK-NEXT: [[ZEXT_0:%.*]] = zext i16 [[P_0]] to i32 -; CHECK-NEXT: [[ZEXT_1:%.*]] = zext i16 [[P_1]] to i32 +; ALIGNED-NEXT: [[GEP_P:%.*]] = getelementptr i16, ptr addrspace(5) [[P:%.*]], i64 1 +; ALIGNED-NEXT: [[P_0:%.*]] = load i16, ptr addrspace(5) [[P]], align 2 +; ALIGNED-NEXT: [[P_1:%.*]] = load i16, ptr addrspace(5) [[GEP_P]], align 2 +; UNALIGNED-NEXT:load <2 x i16> +; CHECK: [[ZEXT_0:%.*]] = zext i16 +; CHECK-NEXT: [[ZEXT_1:%.*]] = zext i16 ; CHECK-NEXT: [[SHL_1:%.*]] = shl i32 [[ZEXT_1]], 16 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[ZEXT_0]], [[SHL_1]] ; CHECK-NEXT: ret i32 [[OR]] diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll @@ -62,11 +62,13 @@ } ; Here we have four stores, with an aliasing load before the last one. We can -; vectorize the first three stores as <3 x float>, but this vectorized store must -; be inserted at the location of the third scalar store, not the fourth one. +; vectorize the first three stores as <2 x float> + scalar float, but these +; stores must be inserted at the location of the third scalar store, not the +; fourth one. ; ; CHECK-LABEL: @insert_store_point_alias -; CHECK: store <3 x float> +; CHECK-DAG: store float +; CHECK-DAG: store <2 x float> ; CHECK: load float, ptr addrspace(1) %a.idx.2 ; CHECK: store float ; CHECK-SAME: %a.idx.3 @@ -85,21 +87,15 @@ ret float %x } -; Here we have four stores, with an aliasing load before the last one. We -; could vectorize two of the stores before the load (although we currently -; don't), but the important thing is that we *don't* sink the store to -; a[idx + 1] below the load. +; Here we have four stores, with an aliasing load before the last one. We can +; vectorize two of the stores before the load, but the important thing is that +; we *don't* sink the store to a[idx + 1] below the load. ; ; CHECK-LABEL: @insert_store_point_alias_ooo -; CHECK: store float -; CHECK-SAME: %a.idx.3 -; CHECK: store float -; CHECK-SAME: %a.idx.1 -; CHECK: store float -; CHECK-SAME: %a.idx.2 +; CHECK: store float{{.*}} %a.idx.3 +; CHECK: store <2 x float>{{.*}} %a.idx.1 ; CHECK: load float, ptr addrspace(1) %a.idx.2 -; CHECK: store float -; CHECK-SAME: %a.idx +; CHECK: store float{{.*}} %a.idx define float @insert_store_point_alias_ooo(ptr addrspace(1) nocapture %a, i64 %idx) { %a.idx = getelementptr inbounds float, ptr addrspace(1) %a, i64 %idx %a.idx.1 = getelementptr inbounds float, ptr addrspace(1) %a.idx, i64 1 diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll @@ -57,10 +57,21 @@ } ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32_align2( -; ALL: store i32 -; ALL: store i32 -; ALL: store i32 -; ALL: store i32 +; ELT4: store i32 +; ELT4: store i32 +; ELT4: store i32 +; ELT4: store i32 +; ELT8-ALIGNED: store i32 +; ELT8-ALIGNED: store i32 +; ELT8-ALIGNED: store i32 +; ELT8-ALIGNED: store i32 +; ELT8-UNALIGNED: store <2 x i32> +; ELT8-UNALIGNED: store <2 x i32> +; ELT16-ALIGNED: store i32 +; ELT16-ALIGNED: store i32 +; ELT16-ALIGNED: store i32 +; ELT16-ALIGNED: store i32 +; ELT16-UNALIGNED: store <4 x i32> define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align2(ptr addrspace(5) %out) #0 { %out.gep.1 = getelementptr i32, ptr addrspace(5) %out, i32 1 %out.gep.2 = getelementptr i32, ptr addrspace(5) %out, i32 2 @@ -117,8 +128,9 @@ } ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16_align2( -; ALL: store i16 -; ALL: store i16 +; ALIGNED: store i16 +; ALIGNED: store i16 +; UNALIGNED: store <2 x i16> define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align2(ptr addrspace(5) %out) #0 { %out.gep.1 = getelementptr i16, ptr addrspace(5) %out, i32 1 @@ -158,7 +170,8 @@ ; ELT8: store <2 x i32> ; ELT8: store i32 -; ELT16: store <3 x i32> +; ELT16: store <2 x i32> +; ELT16: store i32 define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i32(ptr addrspace(5) %out) #0 { %out.gep.1 = getelementptr i32, ptr addrspace(5) %out, i32 1 %out.gep.2 = getelementptr i32, ptr addrspace(5) %out, i32 2 @@ -181,7 +194,8 @@ ; ELT8-UNALIGNED: store <2 x i32> ; ELT8-UNALIGNED: store i32 -; ELT16-UNALIGNED: store <3 x i32> +; ELT16-UNALIGNED: store <2 x i32> +; ELT16-UNALIGNED: store i32 define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i32_align1(ptr addrspace(5) %out) #0 { %out.gep.1 = getelementptr i32, ptr addrspace(5) %out, i32 1 %out.gep.2 = getelementptr i32, ptr addrspace(5) %out, i32 2 @@ -197,7 +211,8 @@ ; ALIGNED: store i8 ; ALIGNED: store i8 -; UNALIGNED: store <3 x i8> +; UNALIGNED: store <2 x i8> +; UNALIGNED: store i8 define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i8_align1(ptr addrspace(5) %out) #0 { %out.gep.1 = getelementptr i8, ptr addrspace(5) %out, i8 1 %out.gep.2 = getelementptr i8, ptr addrspace(5) %out, i8 2 diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll @@ -206,7 +206,8 @@ define amdgpu_kernel void @merge_global_store_3_constants_i32(ptr addrspace(1) %out) #0 { ; CHECK-LABEL: @merge_global_store_3_constants_i32( -; CHECK-NEXT: store <3 x i32> , ptr addrspace(1) [[OUT:%.*]], align 4 +; CHECK-DAG: store i32 +; CHECK-DAG: store <2 x i32> ; CHECK-NEXT: ret void ; %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1 @@ -350,14 +351,10 @@ define amdgpu_kernel void @merge_global_store_3_adjacent_loads_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { ; CHECK-LABEL: @merge_global_store_3_adjacent_loads_i32( -; CHECK-NEXT: [[TMP1:%.*]] = load <3 x i32>, ptr addrspace(1) [[IN:%.*]], align 4 -; CHECK-NEXT: [[X1:%.*]] = extractelement <3 x i32> [[TMP1]], i32 0 -; CHECK-NEXT: [[Y2:%.*]] = extractelement <3 x i32> [[TMP1]], i32 1 -; CHECK-NEXT: [[Z3:%.*]] = extractelement <3 x i32> [[TMP1]], i32 2 -; CHECK-NEXT: [[TMP2:%.*]] = insertelement <3 x i32> poison, i32 [[X1]], i32 0 -; CHECK-NEXT: [[TMP3:%.*]] = insertelement <3 x i32> [[TMP2]], i32 [[Y2]], i32 1 -; CHECK-NEXT: [[TMP4:%.*]] = insertelement <3 x i32> [[TMP3]], i32 [[Z3]], i32 2 -; CHECK-NEXT: store <3 x i32> [[TMP4]], ptr addrspace(1) [[OUT:%.*]], align 4 +; CHECK: load <2 x i32> +; CHECK: load i32 +; CHECK: store <2 x i32> +; CHECK: store i32 ; CHECK-NEXT: ret void ; %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1 @@ -718,7 +715,8 @@ ; CHECK-LABEL: @merge_global_store_7_constants_i32( ; CHECK-NEXT: store <4 x i32> , ptr addrspace(1) [[OUT:%.*]], align 4 ; CHECK-NEXT: [[IDX4:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT]], i64 4 -; CHECK-NEXT: store <3 x i32> , ptr addrspace(1) [[IDX4]], align 4 +; CHECK-DAG: store <2 x i32> +; CHECK-DAG: store i32 ; CHECK-NEXT: ret void ; store i32 34, ptr addrspace(1) %out, align 4 diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll @@ -26,25 +26,17 @@ ret void } -; Check adjiacent memory locations are properly matched and the +; Check adjacent memory locations are properly matched and the ; longest chain vectorized ; GCN-LABEL: @interleave_get_longest -; GFX7: load <2 x i32> -; GFX7: load i32 -; GFX7: store <2 x i32> zeroinitializer -; GFX7: load i32 -; GFX7: load <2 x i32> -; GFX7: load i32 -; GFX7: load i32 - -; GFX9: load <4 x i32> -; GFX9: load i32 -; GFX9: store <2 x i32> zeroinitializer -; GFX9: load i32 -; GFX9: load i32 -; GFX9: load i32 +; GCN: load <2 x i32>{{.*}} %tmp1 +; GCN: store <2 x i32> zeroinitializer{{.*}} %tmp1 +; GCN: load <2 x i32>{{.*}} %tmp2 +; GCN: load <2 x i32>{{.*}} %tmp4 +; GCN: load i32{{.*}} %tmp5 +; GCN: load i32{{.*}} %tmp5 define amdgpu_kernel void @interleave_get_longest(i32 %arg) { %a1 = add i32 %arg, 1 @@ -70,4 +62,3 @@ ret void } - diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll @@ -4,7 +4,7 @@ define void @base_case(i1 %cnd, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %out) { ; CHECK-LABEL: @base_case -; CHECK: load <3 x i32> +; CHECK: load <2 x i32> entry: %gep1 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 1 %gep2 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 2 diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll @@ -4,7 +4,8 @@ define void @base_case(i1 %cnd, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %out) { ; CHECK-LABEL: @base_case -; CHECK: load <3 x i32> +; CHECK: load <2 x i32> +; CHECK: load i32 entry: %gep1 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 1 %gep2 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 2 diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll @@ -82,7 +82,7 @@ %a.ascast = addrspacecast ptr addrspace(5) %p to ptr %b.ascast = addrspacecast ptr addrspace(5) %gep2 to ptr %tmp1 = load i8, ptr %a.ascast, align 1 - %tmp2 = load i8, ptr %b.ascast, align 1 + %tmp2 = load i8, ptr %b.ascast, align 2 unreachable } diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/4x2xhalf.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/4x2xhalf.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/4x2xhalf.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/4x2xhalf.ll @@ -1,10 +1,10 @@ ; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -S -o - %s | FileCheck %s define void @ldg_f16(ptr nocapture align 16 %rd0) { - %load1 = load <2 x half>, ptr %rd0, align 4 + %load1 = load <2 x half>, ptr %rd0, align 16 %p1 = fcmp ogt <2 x half> %load1, zeroinitializer %s1 = select <2 x i1> %p1, <2 x half> %load1, <2 x half> zeroinitializer - store <2 x half> %s1, ptr %rd0, align 4 + store <2 x half> %s1, ptr %rd0, align 16 %in2 = getelementptr half, ptr %rd0, i64 2 %load2 = load <2 x half>, ptr %in2, align 4 %p2 = fcmp ogt <2 x half> %load2, zeroinitializer diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/many_loads_stores.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/many_loads_stores.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/many_loads_stores.ll @@ -0,0 +1,1136 @@ +; This is an end-to-end test that checks that LSV succeeds at vectorizing a +; large program with many loads. +; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -S -o - %s > %t +; RUN: grep 'load i8' < %t | count 18 +; RUN: grep 'load <2 x i8>' < %t | count 9 +; RUN: grep 'load <4 x i8>' < %t | count 27 + +target datalayout = "e-i64:64-i128:128-v16:16-v32:32-n16:32:64" +target triple = "nvptx64-nvidia-cuda" + +declare noundef i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() #0 +declare noundef i32 @llvm.nvvm.read.ptx.sreg.tid.x() #0 +declare float @llvm.ceil.f32(float) #0 +declare i32 @llvm.smax.i32(i32, i32) #0 +declare i32 @llvm.umin.i32(i32, i32) #0 + +define void @many_loads(ptr noalias readonly align 128 dereferenceable(5111808) %arg0, ptr noalias nocapture readonly align 128 dereferenceable(29952) %arg1, ptr noalias nocapture readonly align 128 dereferenceable(2664) %arg2, ptr noalias nocapture readonly align 128 dereferenceable(888) %arg3, ptr noalias nocapture writeonly align 128 dereferenceable(17731584) %arg4) local_unnamed_addr #1 { +entry: + %arg41104 = addrspacecast ptr %arg4 to ptr addrspace(1) + %arg31102 = addrspacecast ptr %arg3 to ptr addrspace(1) + %arg21100 = addrspacecast ptr %arg2 to ptr addrspace(1) + %arg11098 = addrspacecast ptr %arg1 to ptr addrspace(1) + %arg01096 = addrspacecast ptr %arg0 to ptr addrspace(1) + %0 = tail call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x(), !range !140 + %1 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x(), !range !141 + %2 = shl nuw nsw i32 %0, 6 + %linear_index = or i32 %2, %1 + %linear_index_base = shl nuw nsw i32 %linear_index, 4 + %.urem = add nsw i32 %linear_index, -554112 + %.cmp = icmp ult i32 %linear_index, 554112 + %3 = select i1 %.cmp, i32 %linear_index, i32 %.urem + %4 = urem i32 %linear_index, 2496 + %.lhs.trunc = trunc i32 %0 to i16 + %5 = udiv i16 %.lhs.trunc, 39 + %.zext = zext i16 %5 to i32 + %6 = mul nuw nsw i32 %.zext, 2496 + %7 = add nuw nsw i32 %6, %4 + %8 = udiv i32 %7, 222 + %9 = mul i32 %8, 222 + %.decomposed = sub i32 %7, %9 + %10 = mul nuw nsw i32 %8, 3 + %11 = mul nuw nsw i32 %.decomposed, 3 + %12 = uitofp i32 %8 to float + %add.26 = fadd float %12, -1.000000e+00 + %13 = tail call float @llvm.ceil.f32(float %add.26) + %14 = fcmp ole float %13, 0.000000e+00 + %15 = select i1 %14, float 0.000000e+00, float %13 + %16 = fcmp oge float %15, 2.493000e+03 + %17 = select i1 %16, float 2.493000e+03, float %15 + %.inv = fcmp ole float %17, 0xC1E0000000000000 + %18 = select i1 %.inv, float 0xC1E0000000000000, float %17 + %19 = fptosi float %18 to i32 + %20 = fcmp oge float %17, 0x41E0000000000000 + %21 = tail call i32 @llvm.smax.i32(i32 %19, i32 0) + %22 = tail call i32 @llvm.umin.i32(i32 %21, i32 2493) + %23 = select i1 %20, i32 2493, i32 %22 + %24 = uitofp i32 %.decomposed to float + %add.3613 = fadd float %24, 5.000000e-01 + %multiply.3915 = fmul float %add.3613, 0x3FE27350C0000000 + %add.4217 = fadd float %multiply.3915, -1.500000e+00 + %25 = tail call float @llvm.ceil.f32(float %add.4217) + %26 = fcmp ole float %25, 0.000000e+00 + %27 = select i1 %26, float 0.000000e+00, float %25 + %28 = fcmp oge float %27, 1.250000e+02 + %29 = select i1 %28, float 1.250000e+02, float %27 + %.inv821 = fcmp ole float %29, 0xC1E0000000000000 + %30 = select i1 %.inv821, float 0xC1E0000000000000, float %29 + %31 = fptosi float %30 to i32 + %32 = fcmp oge float %29, 0x41E0000000000000 + %33 = tail call i32 @llvm.smax.i32(i32 %31, i32 0) + %34 = fcmp uno float %29, 0.000000e+00 + %35 = tail call i32 @llvm.umin.i32(i32 %33, i32 125) + %36 = select i1 %32, i32 125, i32 %35 + %37 = select i1 %34, i32 0, i32 %36 + %.lhs.trunc1053 = trunc i32 %11 to i16 + %38 = udiv i16 %.lhs.trunc1053, 3 + %39 = mul i16 %38, 3 + %.decomposed1089 = sub i16 %.lhs.trunc1053, %39 + %40 = zext i16 %38 to i64 + %41 = zext i16 %.decomposed1089 to i64 + %42 = getelementptr inbounds [222 x [3 x float]], ptr addrspace(1) %arg21100, i64 0, i64 %40, i64 %41 + %43 = load float, ptr addrspace(1) %42, align 4, !invariant.load !142 + %44 = getelementptr inbounds [222 x float], ptr addrspace(1) %arg31102, i64 0, i64 %40 + %45 = load float, ptr addrspace(1) %44, align 4, !invariant.load !142 + %divide.6 = fdiv float %43, %45 + %46 = zext i32 %10 to i64 + %47 = getelementptr inbounds [7488 x float], ptr addrspace(1) %arg11098, i64 0, i64 %46 + %48 = load float, ptr addrspace(1) %47, align 4, !invariant.load !142 + %multiply.10 = fmul float %divide.6, %48 + %49 = zext i32 %23 to i64 + %50 = zext i32 %37 to i64 + %51 = getelementptr inbounds [1 x [4 x [2496 x [128 x [4 x i8]]]]], ptr addrspace(1) %arg01096, i64 0, i64 0, i64 0, i64 %49, i64 %50, i64 0 + %52 = load i8, ptr addrspace(1) %51, align 4, !invariant.load !142 + %53 = sitofp i8 %52 to float + %multiply.18 = fmul float %53, 0x3FC3BF2820000000 + %multiply.53 = fmul float %multiply.10, %multiply.18 + %add.57.i = fadd float %multiply.53, 0.000000e+00 + %.lhs.trunc1053.1 = add nuw nsw i16 %.lhs.trunc1053, 1 + %54 = udiv i16 %.lhs.trunc1053.1, 3 + %55 = mul i16 %54, 3 + %.decomposed1090 = sub i16 %.lhs.trunc1053.1, %55 + %56 = zext i16 %54 to i64 + %57 = zext i16 %.decomposed1090 to i64 + %58 = getelementptr inbounds [222 x [3 x float]], ptr addrspace(1) %arg21100, i64 0, i64 %56, i64 %57 + %59 = load float, ptr addrspace(1) %58, align 4, !invariant.load !142 + %60 = getelementptr inbounds [222 x float], ptr addrspace(1) %arg31102, i64 0, i64 %56 + %61 = load float, ptr addrspace(1) %60, align 4, !invariant.load !142 + %divide.6.1 = fdiv float %59, %61 + %multiply.10.1 = fmul float %divide.6.1, %48 + %62 = getelementptr inbounds i8, ptr addrspace(1) %51, i64 4 + %63 = load i8, ptr addrspace(1) %62, align 4, !invariant.load !142 + %64 = sitofp i8 %63 to float + %multiply.18.1 = fmul float %64, 0x3FC3BF2820000000 + %multiply.53.1 = fmul float %multiply.10.1, %multiply.18.1 + %add.57.i.1 = fadd float %add.57.i, %multiply.53.1 + %.lhs.trunc1053.2 = add nuw nsw i16 %.lhs.trunc1053, 2 + %65 = udiv i16 %.lhs.trunc1053.2, 3 + %66 = mul i16 %65, 3 + %.decomposed1091 = sub i16 %.lhs.trunc1053.2, %66 + %67 = zext i16 %65 to i64 + %68 = zext i16 %.decomposed1091 to i64 + %69 = getelementptr inbounds [222 x [3 x float]], ptr addrspace(1) %arg21100, i64 0, i64 %67, i64 %68 + %70 = load float, ptr addrspace(1) %69, align 4, !invariant.load !142 + %71 = getelementptr inbounds [222 x float], ptr addrspace(1) %arg31102, i64 0, i64 %67 + %72 = load float, ptr addrspace(1) %71, align 4, !invariant.load !142 + %divide.6.2 = fdiv float %70, %72 + %multiply.10.2 = fmul float %divide.6.2, %48 + %73 = getelementptr inbounds i8, ptr addrspace(1) %51, i64 8 + %74 = load i8, ptr addrspace(1) %73, align 4, !invariant.load !142 + %75 = sitofp i8 %74 to float + %multiply.18.2 = fmul float %75, 0x3FC3BF2820000000 + %multiply.53.2 = fmul float %multiply.10.2, %multiply.18.2 + %add.57.i.2 = fadd float %add.57.i.1, %multiply.53.2 + %76 = getelementptr inbounds float, ptr addrspace(1) %47, i64 1 + %77 = load float, ptr addrspace(1) %76, align 4, !invariant.load !142 + %multiply.10.3 = fmul float %divide.6, %77 + %78 = getelementptr inbounds i8, ptr addrspace(1) %51, i64 512 + %79 = load i8, ptr addrspace(1) %78, align 4, !invariant.load !142 + %80 = sitofp i8 %79 to float + %multiply.18.3 = fmul float %80, 0x3FC3BF2820000000 + %multiply.53.3 = fmul float %multiply.10.3, %multiply.18.3 + %add.57.i.3 = fadd float %add.57.i.2, %multiply.53.3 + %multiply.10.4 = fmul float %divide.6.1, %77 + %81 = getelementptr inbounds i8, ptr addrspace(1) %51, i64 516 + %82 = load i8, ptr addrspace(1) %81, align 4, !invariant.load !142 + %83 = sitofp i8 %82 to float + %multiply.18.4 = fmul float %83, 0x3FC3BF2820000000 + %multiply.53.4 = fmul float %multiply.10.4, %multiply.18.4 + %add.57.i.4 = fadd float %add.57.i.3, %multiply.53.4 + %multiply.10.5 = fmul float %divide.6.2, %77 + %84 = getelementptr inbounds i8, ptr addrspace(1) %51, i64 520 + %85 = load i8, ptr addrspace(1) %84, align 4, !invariant.load !142 + %86 = sitofp i8 %85 to float + %multiply.18.5 = fmul float %86, 0x3FC3BF2820000000 + %multiply.53.5 = fmul float %multiply.10.5, %multiply.18.5 + %add.57.i.5 = fadd float %add.57.i.4, %multiply.53.5 + %87 = getelementptr inbounds float, ptr addrspace(1) %47, i64 2 + %88 = load float, ptr addrspace(1) %87, align 4, !invariant.load !142 + %multiply.10.6 = fmul float %divide.6, %88 + %89 = getelementptr inbounds i8, ptr addrspace(1) %51, i64 1024 + %90 = load i8, ptr addrspace(1) %89, align 4, !invariant.load !142 + %91 = sitofp i8 %90 to float + %multiply.18.6 = fmul float %91, 0x3FC3BF2820000000 + %multiply.53.6 = fmul float %multiply.10.6, %multiply.18.6 + %add.57.i.6 = fadd float %add.57.i.5, %multiply.53.6 + %multiply.10.7 = fmul float %divide.6.1, %88 + %92 = getelementptr inbounds i8, ptr addrspace(1) %51, i64 1028 + %93 = load i8, ptr addrspace(1) %92, align 4, !invariant.load !142 + %94 = sitofp i8 %93 to float + %multiply.18.7 = fmul float %94, 0x3FC3BF2820000000 + %multiply.53.7 = fmul float %multiply.10.7, %multiply.18.7 + %add.57.i.7 = fadd float %add.57.i.6, %multiply.53.7 + %multiply.10.8 = fmul float %divide.6.2, %88 + %95 = getelementptr inbounds i8, ptr addrspace(1) %51, i64 1032 + %96 = load i8, ptr addrspace(1) %95, align 4, !invariant.load !142 + %97 = sitofp i8 %96 to float + %multiply.18.8 = fmul float %97, 0x3FC3BF2820000000 + %multiply.53.8 = fmul float %multiply.10.8, %multiply.18.8 + %add.57.i.8 = fadd float %add.57.i.7, %multiply.53.8 + %98 = fptrunc float %add.57.i.8 to half + %99 = zext i32 %linear_index_base to i64 + %100 = getelementptr half, ptr addrspace(1) %arg41104, i64 %99 + store half %98, ptr addrspace(1) %100, align 32 + %101 = udiv i32 %3, 222 + %102 = mul i32 %101, 222 + %.decomposed1092 = sub i32 %3, %102 + %103 = mul nuw nsw i32 %101, 3 + %104 = mul nuw nsw i32 %.decomposed1092, 3 + %105 = uitofp i32 %101 to float + %add.2637 = fadd float %105, -1.000000e+00 + %106 = tail call float @llvm.ceil.f32(float %add.2637) + %107 = fcmp ole float %106, 0.000000e+00 + %108 = select i1 %107, float 0.000000e+00, float %106 + %109 = fcmp oge float %108, 2.493000e+03 + %110 = select i1 %109, float 2.493000e+03, float %108 + %.inv824 = fcmp ole float %110, 0xC1E0000000000000 + %111 = select i1 %.inv824, float 0xC1E0000000000000, float %110 + %112 = fptosi float %111 to i32 + %113 = fcmp oge float %110, 0x41E0000000000000 + %114 = tail call i32 @llvm.smax.i32(i32 %112, i32 0) + %115 = tail call i32 @llvm.umin.i32(i32 %114, i32 2493) + %116 = select i1 %113, i32 2493, i32 %115 + %117 = uitofp i32 %.decomposed1092 to float + %add.3660 = fadd float %117, 5.000000e-01 + %multiply.3962 = fmul float %add.3660, 0x3FE27350C0000000 + %add.4264 = fadd float %multiply.3962, -1.500000e+00 + %118 = tail call float @llvm.ceil.f32(float %add.4264) + %119 = fcmp ole float %118, 0.000000e+00 + %120 = select i1 %119, float 0.000000e+00, float %118 + %121 = fcmp oge float %120, 1.250000e+02 + %122 = select i1 %121, float 1.250000e+02, float %120 + %.inv827 = fcmp ole float %122, 0xC1E0000000000000 + %123 = select i1 %.inv827, float 0xC1E0000000000000, float %122 + %124 = fptosi float %123 to i32 + %125 = fcmp oge float %122, 0x41E0000000000000 + %126 = tail call i32 @llvm.smax.i32(i32 %124, i32 0) + %127 = fcmp uno float %122, 0.000000e+00 + %128 = tail call i32 @llvm.umin.i32(i32 %126, i32 125) + %129 = select i1 %125, i32 125, i32 %128 + %130 = select i1 %127, i32 0, i32 %129 + %.lhs.trunc1045 = trunc i32 %104 to i16 + %131 = udiv i16 %.lhs.trunc1045, 3 + %132 = mul i16 %131, 3 + %.decomposed1093 = sub i16 %.lhs.trunc1045, %132 + %133 = zext i16 %131 to i64 + %134 = zext i16 %.decomposed1093 to i64 + %135 = getelementptr inbounds [222 x [3 x float]], ptr addrspace(1) %arg21100, i64 0, i64 %133, i64 %134 + %136 = load float, ptr addrspace(1) %135, align 4, !invariant.load !142 + %137 = getelementptr inbounds [222 x float], ptr addrspace(1) %arg31102, i64 0, i64 %133 + %138 = load float, ptr addrspace(1) %137, align 4, !invariant.load !142 + %divide.631 = fdiv float %136, %138 + %139 = zext i32 %103 to i64 + %140 = getelementptr inbounds [7488 x float], ptr addrspace(1) %arg11098, i64 0, i64 %139 + %141 = load float, ptr addrspace(1) %140, align 4, !invariant.load !142 + %multiply.1032 = fmul float %divide.631, %141 + %142 = zext i32 %116 to i64 + %143 = zext i32 %130 to i64 + %144 = getelementptr [1 x [4 x [2496 x [128 x [4 x i8]]]]], ptr addrspace(1) %arg01096, i64 0, i64 0, i64 0, i64 %142, i64 %143, i64 0 + %145 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1 + %146 = load i8, ptr addrspace(1) %145, align 1, !invariant.load !142 + %147 = sitofp i8 %146 to float + %multiply.1870 = fmul float %147, 0x3FC3BF2820000000 + %multiply.5371 = fmul float %multiply.1032, %multiply.1870 + %add.57.i914 = fadd float %multiply.5371, 0.000000e+00 + %.lhs.trunc1045.1 = add nuw nsw i16 %.lhs.trunc1045, 1 + %148 = udiv i16 %.lhs.trunc1045.1, 3 + %149 = mul i16 %148, 3 + %.decomposed1094 = sub i16 %.lhs.trunc1045.1, %149 + %150 = zext i16 %148 to i64 + %151 = zext i16 %.decomposed1094 to i64 + %152 = getelementptr inbounds [222 x [3 x float]], ptr addrspace(1) %arg21100, i64 0, i64 %150, i64 %151 + %153 = load float, ptr addrspace(1) %152, align 4, !invariant.load !142 + %154 = getelementptr inbounds [222 x float], ptr addrspace(1) %arg31102, i64 0, i64 %150 + %155 = load float, ptr addrspace(1) %154, align 4, !invariant.load !142 + %divide.631.1 = fdiv float %153, %155 + %multiply.1032.1 = fmul float %divide.631.1, %141 + %156 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 5 + %157 = load i8, ptr addrspace(1) %156, align 1, !invariant.load !142 + %158 = sitofp i8 %157 to float + %multiply.1870.1 = fmul float %158, 0x3FC3BF2820000000 + %multiply.5371.1 = fmul float %multiply.1032.1, %multiply.1870.1 + %add.57.i914.1 = fadd float %add.57.i914, %multiply.5371.1 + %.lhs.trunc1045.2 = add nuw nsw i16 %.lhs.trunc1045, 2 + %159 = udiv i16 %.lhs.trunc1045.2, 3 + %160 = mul i16 %159, 3 + %.decomposed1095 = sub i16 %.lhs.trunc1045.2, %160 + %161 = zext i16 %159 to i64 + %162 = zext i16 %.decomposed1095 to i64 + %163 = getelementptr inbounds [222 x [3 x float]], ptr addrspace(1) %arg21100, i64 0, i64 %161, i64 %162 + %164 = load float, ptr addrspace(1) %163, align 4, !invariant.load !142 + %165 = getelementptr inbounds [222 x float], ptr addrspace(1) %arg31102, i64 0, i64 %161 + %166 = load float, ptr addrspace(1) %165, align 4, !invariant.load !142 + %divide.631.2 = fdiv float %164, %166 + %multiply.1032.2 = fmul float %divide.631.2, %141 + %167 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 9 + %168 = load i8, ptr addrspace(1) %167, align 1, !invariant.load !142 + %169 = sitofp i8 %168 to float + %multiply.1870.2 = fmul float %169, 0x3FC3BF2820000000 + %multiply.5371.2 = fmul float %multiply.1032.2, %multiply.1870.2 + %add.57.i914.2 = fadd float %add.57.i914.1, %multiply.5371.2 + %170 = getelementptr inbounds float, ptr addrspace(1) %140, i64 1 + %171 = load float, ptr addrspace(1) %170, align 4, !invariant.load !142 + %multiply.1032.3 = fmul float %divide.631, %171 + %172 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 513 + %173 = load i8, ptr addrspace(1) %172, align 1, !invariant.load !142 + %174 = sitofp i8 %173 to float + %multiply.1870.3 = fmul float %174, 0x3FC3BF2820000000 + %multiply.5371.3 = fmul float %multiply.1032.3, %multiply.1870.3 + %add.57.i914.3 = fadd float %add.57.i914.2, %multiply.5371.3 + %multiply.1032.4 = fmul float %divide.631.1, %171 + %175 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 517 + %176 = load i8, ptr addrspace(1) %175, align 1, !invariant.load !142 + %177 = sitofp i8 %176 to float + %multiply.1870.4 = fmul float %177, 0x3FC3BF2820000000 + %multiply.5371.4 = fmul float %multiply.1032.4, %multiply.1870.4 + %add.57.i914.4 = fadd float %add.57.i914.3, %multiply.5371.4 + %multiply.1032.5 = fmul float %divide.631.2, %171 + %178 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 521 + %179 = load i8, ptr addrspace(1) %178, align 1, !invariant.load !142 + %180 = sitofp i8 %179 to float + %multiply.1870.5 = fmul float %180, 0x3FC3BF2820000000 + %multiply.5371.5 = fmul float %multiply.1032.5, %multiply.1870.5 + %add.57.i914.5 = fadd float %add.57.i914.4, %multiply.5371.5 + %181 = getelementptr inbounds float, ptr addrspace(1) %140, i64 2 + %182 = load float, ptr addrspace(1) %181, align 4, !invariant.load !142 + %multiply.1032.6 = fmul float %divide.631, %182 + %183 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1025 + %184 = load i8, ptr addrspace(1) %183, align 1, !invariant.load !142 + %185 = sitofp i8 %184 to float + %multiply.1870.6 = fmul float %185, 0x3FC3BF2820000000 + %multiply.5371.6 = fmul float %multiply.1032.6, %multiply.1870.6 + %add.57.i914.6 = fadd float %add.57.i914.5, %multiply.5371.6 + %multiply.1032.7 = fmul float %divide.631.1, %182 + %186 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1029 + %187 = load i8, ptr addrspace(1) %186, align 1, !invariant.load !142 + %188 = sitofp i8 %187 to float + %multiply.1870.7 = fmul float %188, 0x3FC3BF2820000000 + %multiply.5371.7 = fmul float %multiply.1032.7, %multiply.1870.7 + %add.57.i914.7 = fadd float %add.57.i914.6, %multiply.5371.7 + %multiply.1032.8 = fmul float %divide.631.2, %182 + %189 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1033 + %190 = load i8, ptr addrspace(1) %189, align 1, !invariant.load !142 + %191 = sitofp i8 %190 to float + %multiply.1870.8 = fmul float %191, 0x3FC3BF2820000000 + %multiply.5371.8 = fmul float %multiply.1032.8, %multiply.1870.8 + %add.57.i914.8 = fadd float %add.57.i914.7, %multiply.5371.8 + %192 = fptrunc float %add.57.i914.8 to half + %193 = getelementptr inbounds half, ptr addrspace(1) %100, i64 1 + store half %192, ptr addrspace(1) %193, align 2 + %194 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2 + %195 = load i8, ptr addrspace(1) %194, align 2, !invariant.load !142 + %196 = sitofp i8 %195 to float + %multiply.18122 = fmul float %196, 0x3FC3BF2820000000 + %multiply.53123 = fmul float %multiply.1032, %multiply.18122 + %add.57.i915 = fadd float %multiply.53123, 0.000000e+00 + %197 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 6 + %198 = load i8, ptr addrspace(1) %197, align 2, !invariant.load !142 + %199 = sitofp i8 %198 to float + %multiply.18122.1 = fmul float %199, 0x3FC3BF2820000000 + %multiply.53123.1 = fmul float %multiply.1032.1, %multiply.18122.1 + %add.57.i915.1 = fadd float %add.57.i915, %multiply.53123.1 + %200 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 10 + %201 = load i8, ptr addrspace(1) %200, align 2, !invariant.load !142 + %202 = sitofp i8 %201 to float + %multiply.18122.2 = fmul float %202, 0x3FC3BF2820000000 + %multiply.53123.2 = fmul float %multiply.1032.2, %multiply.18122.2 + %add.57.i915.2 = fadd float %add.57.i915.1, %multiply.53123.2 + %203 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 514 + %204 = load i8, ptr addrspace(1) %203, align 2, !invariant.load !142 + %205 = sitofp i8 %204 to float + %multiply.18122.3 = fmul float %205, 0x3FC3BF2820000000 + %multiply.53123.3 = fmul float %multiply.1032.3, %multiply.18122.3 + %add.57.i915.3 = fadd float %add.57.i915.2, %multiply.53123.3 + %206 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 518 + %207 = load i8, ptr addrspace(1) %206, align 2, !invariant.load !142 + %208 = sitofp i8 %207 to float + %multiply.18122.4 = fmul float %208, 0x3FC3BF2820000000 + %multiply.53123.4 = fmul float %multiply.1032.4, %multiply.18122.4 + %add.57.i915.4 = fadd float %add.57.i915.3, %multiply.53123.4 + %209 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 522 + %210 = load i8, ptr addrspace(1) %209, align 2, !invariant.load !142 + %211 = sitofp i8 %210 to float + %multiply.18122.5 = fmul float %211, 0x3FC3BF2820000000 + %multiply.53123.5 = fmul float %multiply.1032.5, %multiply.18122.5 + %add.57.i915.5 = fadd float %add.57.i915.4, %multiply.53123.5 + %212 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1026 + %213 = load i8, ptr addrspace(1) %212, align 2, !invariant.load !142 + %214 = sitofp i8 %213 to float + %multiply.18122.6 = fmul float %214, 0x3FC3BF2820000000 + %multiply.53123.6 = fmul float %multiply.1032.6, %multiply.18122.6 + %add.57.i915.6 = fadd float %add.57.i915.5, %multiply.53123.6 + %215 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1030 + %216 = load i8, ptr addrspace(1) %215, align 2, !invariant.load !142 + %217 = sitofp i8 %216 to float + %multiply.18122.7 = fmul float %217, 0x3FC3BF2820000000 + %multiply.53123.7 = fmul float %multiply.1032.7, %multiply.18122.7 + %add.57.i915.7 = fadd float %add.57.i915.6, %multiply.53123.7 + %218 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1034 + %219 = load i8, ptr addrspace(1) %218, align 2, !invariant.load !142 + %220 = sitofp i8 %219 to float + %multiply.18122.8 = fmul float %220, 0x3FC3BF2820000000 + %multiply.53123.8 = fmul float %multiply.1032.8, %multiply.18122.8 + %add.57.i915.8 = fadd float %add.57.i915.7, %multiply.53123.8 + %221 = fptrunc float %add.57.i915.8 to half + %222 = getelementptr inbounds half, ptr addrspace(1) %100, i64 2 + store half %221, ptr addrspace(1) %222, align 4 + %223 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3 + %224 = load i8, ptr addrspace(1) %223, align 1, !invariant.load !142 + %225 = sitofp i8 %224 to float + %multiply.18174 = fmul float %225, 0x3FC3BF2820000000 + %multiply.53175 = fmul float %multiply.1032, %multiply.18174 + %add.57.i916 = fadd float %multiply.53175, 0.000000e+00 + %226 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 7 + %227 = load i8, ptr addrspace(1) %226, align 1, !invariant.load !142 + %228 = sitofp i8 %227 to float + %multiply.18174.1 = fmul float %228, 0x3FC3BF2820000000 + %multiply.53175.1 = fmul float %multiply.1032.1, %multiply.18174.1 + %add.57.i916.1 = fadd float %add.57.i916, %multiply.53175.1 + %229 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 11 + %230 = load i8, ptr addrspace(1) %229, align 1, !invariant.load !142 + %231 = sitofp i8 %230 to float + %multiply.18174.2 = fmul float %231, 0x3FC3BF2820000000 + %multiply.53175.2 = fmul float %multiply.1032.2, %multiply.18174.2 + %add.57.i916.2 = fadd float %add.57.i916.1, %multiply.53175.2 + %232 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 515 + %233 = load i8, ptr addrspace(1) %232, align 1, !invariant.load !142 + %234 = sitofp i8 %233 to float + %multiply.18174.3 = fmul float %234, 0x3FC3BF2820000000 + %multiply.53175.3 = fmul float %multiply.1032.3, %multiply.18174.3 + %add.57.i916.3 = fadd float %add.57.i916.2, %multiply.53175.3 + %235 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 519 + %236 = load i8, ptr addrspace(1) %235, align 1, !invariant.load !142 + %237 = sitofp i8 %236 to float + %multiply.18174.4 = fmul float %237, 0x3FC3BF2820000000 + %multiply.53175.4 = fmul float %multiply.1032.4, %multiply.18174.4 + %add.57.i916.4 = fadd float %add.57.i916.3, %multiply.53175.4 + %238 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 523 + %239 = load i8, ptr addrspace(1) %238, align 1, !invariant.load !142 + %240 = sitofp i8 %239 to float + %multiply.18174.5 = fmul float %240, 0x3FC3BF2820000000 + %multiply.53175.5 = fmul float %multiply.1032.5, %multiply.18174.5 + %add.57.i916.5 = fadd float %add.57.i916.4, %multiply.53175.5 + %241 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1027 + %242 = load i8, ptr addrspace(1) %241, align 1, !invariant.load !142 + %243 = sitofp i8 %242 to float + %multiply.18174.6 = fmul float %243, 0x3FC3BF2820000000 + %multiply.53175.6 = fmul float %multiply.1032.6, %multiply.18174.6 + %add.57.i916.6 = fadd float %add.57.i916.5, %multiply.53175.6 + %244 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1031 + %245 = load i8, ptr addrspace(1) %244, align 1, !invariant.load !142 + %246 = sitofp i8 %245 to float + %multiply.18174.7 = fmul float %246, 0x3FC3BF2820000000 + %multiply.53175.7 = fmul float %multiply.1032.7, %multiply.18174.7 + %add.57.i916.7 = fadd float %add.57.i916.6, %multiply.53175.7 + %247 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1035 + %248 = load i8, ptr addrspace(1) %247, align 1, !invariant.load !142 + %249 = sitofp i8 %248 to float + %multiply.18174.8 = fmul float %249, 0x3FC3BF2820000000 + %multiply.53175.8 = fmul float %multiply.1032.8, %multiply.18174.8 + %add.57.i916.8 = fadd float %add.57.i916.7, %multiply.53175.8 + %250 = fptrunc float %add.57.i916.8 to half + %251 = getelementptr inbounds half, ptr addrspace(1) %100, i64 3 + store half %250, ptr addrspace(1) %251, align 2 + %252 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1277952 + %253 = load i8, ptr addrspace(1) %252, align 4, !invariant.load !142 + %254 = sitofp i8 %253 to float + %multiply.18226 = fmul float %254, 0x3FC3BF2820000000 + %multiply.53227 = fmul float %multiply.1032, %multiply.18226 + %add.57.i917 = fadd float %multiply.53227, 0.000000e+00 + %255 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1277956 + %256 = load i8, ptr addrspace(1) %255, align 4, !invariant.load !142 + %257 = sitofp i8 %256 to float + %multiply.18226.1 = fmul float %257, 0x3FC3BF2820000000 + %multiply.53227.1 = fmul float %multiply.1032.1, %multiply.18226.1 + %add.57.i917.1 = fadd float %add.57.i917, %multiply.53227.1 + %258 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1277960 + %259 = load i8, ptr addrspace(1) %258, align 4, !invariant.load !142 + %260 = sitofp i8 %259 to float + %multiply.18226.2 = fmul float %260, 0x3FC3BF2820000000 + %multiply.53227.2 = fmul float %multiply.1032.2, %multiply.18226.2 + %add.57.i917.2 = fadd float %add.57.i917.1, %multiply.53227.2 + %261 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278464 + %262 = load i8, ptr addrspace(1) %261, align 4, !invariant.load !142 + %263 = sitofp i8 %262 to float + %multiply.18226.3 = fmul float %263, 0x3FC3BF2820000000 + %multiply.53227.3 = fmul float %multiply.1032.3, %multiply.18226.3 + %add.57.i917.3 = fadd float %add.57.i917.2, %multiply.53227.3 + %264 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278468 + %265 = load i8, ptr addrspace(1) %264, align 4, !invariant.load !142 + %266 = sitofp i8 %265 to float + %multiply.18226.4 = fmul float %266, 0x3FC3BF2820000000 + %multiply.53227.4 = fmul float %multiply.1032.4, %multiply.18226.4 + %add.57.i917.4 = fadd float %add.57.i917.3, %multiply.53227.4 + %267 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278472 + %268 = load i8, ptr addrspace(1) %267, align 4, !invariant.load !142 + %269 = sitofp i8 %268 to float + %multiply.18226.5 = fmul float %269, 0x3FC3BF2820000000 + %multiply.53227.5 = fmul float %multiply.1032.5, %multiply.18226.5 + %add.57.i917.5 = fadd float %add.57.i917.4, %multiply.53227.5 + %270 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278976 + %271 = load i8, ptr addrspace(1) %270, align 4, !invariant.load !142 + %272 = sitofp i8 %271 to float + %multiply.18226.6 = fmul float %272, 0x3FC3BF2820000000 + %multiply.53227.6 = fmul float %multiply.1032.6, %multiply.18226.6 + %add.57.i917.6 = fadd float %add.57.i917.5, %multiply.53227.6 + %273 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278980 + %274 = load i8, ptr addrspace(1) %273, align 4, !invariant.load !142 + %275 = sitofp i8 %274 to float + %multiply.18226.7 = fmul float %275, 0x3FC3BF2820000000 + %multiply.53227.7 = fmul float %multiply.1032.7, %multiply.18226.7 + %add.57.i917.7 = fadd float %add.57.i917.6, %multiply.53227.7 + %276 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278984 + %277 = load i8, ptr addrspace(1) %276, align 4, !invariant.load !142 + %278 = sitofp i8 %277 to float + %multiply.18226.8 = fmul float %278, 0x3FC3BF2820000000 + %multiply.53227.8 = fmul float %multiply.1032.8, %multiply.18226.8 + %add.57.i917.8 = fadd float %add.57.i917.7, %multiply.53227.8 + %279 = fptrunc float %add.57.i917.8 to half + %280 = getelementptr inbounds half, ptr addrspace(1) %100, i64 4 + store half %279, ptr addrspace(1) %280, align 8 + %281 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1277953 + %282 = load i8, ptr addrspace(1) %281, align 1, !invariant.load !142 + %283 = sitofp i8 %282 to float + %multiply.18278 = fmul float %283, 0x3FC3BF2820000000 + %multiply.53279 = fmul float %multiply.1032, %multiply.18278 + %add.57.i918 = fadd float %multiply.53279, 0.000000e+00 + %284 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1277957 + %285 = load i8, ptr addrspace(1) %284, align 1, !invariant.load !142 + %286 = sitofp i8 %285 to float + %multiply.18278.1 = fmul float %286, 0x3FC3BF2820000000 + %multiply.53279.1 = fmul float %multiply.1032.1, %multiply.18278.1 + %add.57.i918.1 = fadd float %add.57.i918, %multiply.53279.1 + %287 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1277961 + %288 = load i8, ptr addrspace(1) %287, align 1, !invariant.load !142 + %289 = sitofp i8 %288 to float + %multiply.18278.2 = fmul float %289, 0x3FC3BF2820000000 + %multiply.53279.2 = fmul float %multiply.1032.2, %multiply.18278.2 + %add.57.i918.2 = fadd float %add.57.i918.1, %multiply.53279.2 + %290 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278465 + %291 = load i8, ptr addrspace(1) %290, align 1, !invariant.load !142 + %292 = sitofp i8 %291 to float + %multiply.18278.3 = fmul float %292, 0x3FC3BF2820000000 + %multiply.53279.3 = fmul float %multiply.1032.3, %multiply.18278.3 + %add.57.i918.3 = fadd float %add.57.i918.2, %multiply.53279.3 + %293 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278469 + %294 = load i8, ptr addrspace(1) %293, align 1, !invariant.load !142 + %295 = sitofp i8 %294 to float + %multiply.18278.4 = fmul float %295, 0x3FC3BF2820000000 + %multiply.53279.4 = fmul float %multiply.1032.4, %multiply.18278.4 + %add.57.i918.4 = fadd float %add.57.i918.3, %multiply.53279.4 + %296 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278473 + %297 = load i8, ptr addrspace(1) %296, align 1, !invariant.load !142 + %298 = sitofp i8 %297 to float + %multiply.18278.5 = fmul float %298, 0x3FC3BF2820000000 + %multiply.53279.5 = fmul float %multiply.1032.5, %multiply.18278.5 + %add.57.i918.5 = fadd float %add.57.i918.4, %multiply.53279.5 + %299 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278977 + %300 = load i8, ptr addrspace(1) %299, align 1, !invariant.load !142 + %301 = sitofp i8 %300 to float + %multiply.18278.6 = fmul float %301, 0x3FC3BF2820000000 + %multiply.53279.6 = fmul float %multiply.1032.6, %multiply.18278.6 + %add.57.i918.6 = fadd float %add.57.i918.5, %multiply.53279.6 + %302 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278981 + %303 = load i8, ptr addrspace(1) %302, align 1, !invariant.load !142 + %304 = sitofp i8 %303 to float + %multiply.18278.7 = fmul float %304, 0x3FC3BF2820000000 + %multiply.53279.7 = fmul float %multiply.1032.7, %multiply.18278.7 + %add.57.i918.7 = fadd float %add.57.i918.6, %multiply.53279.7 + %305 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278985 + %306 = load i8, ptr addrspace(1) %305, align 1, !invariant.load !142 + %307 = sitofp i8 %306 to float + %multiply.18278.8 = fmul float %307, 0x3FC3BF2820000000 + %multiply.53279.8 = fmul float %multiply.1032.8, %multiply.18278.8 + %add.57.i918.8 = fadd float %add.57.i918.7, %multiply.53279.8 + %308 = fptrunc float %add.57.i918.8 to half + %309 = getelementptr inbounds half, ptr addrspace(1) %100, i64 5 + store half %308, ptr addrspace(1) %309, align 2 + %310 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1277954 + %311 = load i8, ptr addrspace(1) %310, align 2, !invariant.load !142 + %312 = sitofp i8 %311 to float + %multiply.18330 = fmul float %312, 0x3FC3BF2820000000 + %multiply.53331 = fmul float %multiply.1032, %multiply.18330 + %add.57.i919 = fadd float %multiply.53331, 0.000000e+00 + %313 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1277958 + %314 = load i8, ptr addrspace(1) %313, align 2, !invariant.load !142 + %315 = sitofp i8 %314 to float + %multiply.18330.1 = fmul float %315, 0x3FC3BF2820000000 + %multiply.53331.1 = fmul float %multiply.1032.1, %multiply.18330.1 + %add.57.i919.1 = fadd float %add.57.i919, %multiply.53331.1 + %316 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1277962 + %317 = load i8, ptr addrspace(1) %316, align 2, !invariant.load !142 + %318 = sitofp i8 %317 to float + %multiply.18330.2 = fmul float %318, 0x3FC3BF2820000000 + %multiply.53331.2 = fmul float %multiply.1032.2, %multiply.18330.2 + %add.57.i919.2 = fadd float %add.57.i919.1, %multiply.53331.2 + %319 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278466 + %320 = load i8, ptr addrspace(1) %319, align 2, !invariant.load !142 + %321 = sitofp i8 %320 to float + %multiply.18330.3 = fmul float %321, 0x3FC3BF2820000000 + %multiply.53331.3 = fmul float %multiply.1032.3, %multiply.18330.3 + %add.57.i919.3 = fadd float %add.57.i919.2, %multiply.53331.3 + %322 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278470 + %323 = load i8, ptr addrspace(1) %322, align 2, !invariant.load !142 + %324 = sitofp i8 %323 to float + %multiply.18330.4 = fmul float %324, 0x3FC3BF2820000000 + %multiply.53331.4 = fmul float %multiply.1032.4, %multiply.18330.4 + %add.57.i919.4 = fadd float %add.57.i919.3, %multiply.53331.4 + %325 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278474 + %326 = load i8, ptr addrspace(1) %325, align 2, !invariant.load !142 + %327 = sitofp i8 %326 to float + %multiply.18330.5 = fmul float %327, 0x3FC3BF2820000000 + %multiply.53331.5 = fmul float %multiply.1032.5, %multiply.18330.5 + %add.57.i919.5 = fadd float %add.57.i919.4, %multiply.53331.5 + %328 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278978 + %329 = load i8, ptr addrspace(1) %328, align 2, !invariant.load !142 + %330 = sitofp i8 %329 to float + %multiply.18330.6 = fmul float %330, 0x3FC3BF2820000000 + %multiply.53331.6 = fmul float %multiply.1032.6, %multiply.18330.6 + %add.57.i919.6 = fadd float %add.57.i919.5, %multiply.53331.6 + %331 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278982 + %332 = load i8, ptr addrspace(1) %331, align 2, !invariant.load !142 + %333 = sitofp i8 %332 to float + %multiply.18330.7 = fmul float %333, 0x3FC3BF2820000000 + %multiply.53331.7 = fmul float %multiply.1032.7, %multiply.18330.7 + %add.57.i919.7 = fadd float %add.57.i919.6, %multiply.53331.7 + %334 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278986 + %335 = load i8, ptr addrspace(1) %334, align 2, !invariant.load !142 + %336 = sitofp i8 %335 to float + %multiply.18330.8 = fmul float %336, 0x3FC3BF2820000000 + %multiply.53331.8 = fmul float %multiply.1032.8, %multiply.18330.8 + %add.57.i919.8 = fadd float %add.57.i919.7, %multiply.53331.8 + %337 = fptrunc float %add.57.i919.8 to half + %338 = getelementptr inbounds half, ptr addrspace(1) %100, i64 6 + store half %337, ptr addrspace(1) %338, align 4 + %339 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1277955 + %340 = load i8, ptr addrspace(1) %339, align 1, !invariant.load !142 + %341 = sitofp i8 %340 to float + %multiply.18382 = fmul float %341, 0x3FC3BF2820000000 + %multiply.53383 = fmul float %multiply.1032, %multiply.18382 + %add.57.i920 = fadd float %multiply.53383, 0.000000e+00 + %342 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1277959 + %343 = load i8, ptr addrspace(1) %342, align 1, !invariant.load !142 + %344 = sitofp i8 %343 to float + %multiply.18382.1 = fmul float %344, 0x3FC3BF2820000000 + %multiply.53383.1 = fmul float %multiply.1032.1, %multiply.18382.1 + %add.57.i920.1 = fadd float %add.57.i920, %multiply.53383.1 + %345 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1277963 + %346 = load i8, ptr addrspace(1) %345, align 1, !invariant.load !142 + %347 = sitofp i8 %346 to float + %multiply.18382.2 = fmul float %347, 0x3FC3BF2820000000 + %multiply.53383.2 = fmul float %multiply.1032.2, %multiply.18382.2 + %add.57.i920.2 = fadd float %add.57.i920.1, %multiply.53383.2 + %348 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278467 + %349 = load i8, ptr addrspace(1) %348, align 1, !invariant.load !142 + %350 = sitofp i8 %349 to float + %multiply.18382.3 = fmul float %350, 0x3FC3BF2820000000 + %multiply.53383.3 = fmul float %multiply.1032.3, %multiply.18382.3 + %add.57.i920.3 = fadd float %add.57.i920.2, %multiply.53383.3 + %351 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278471 + %352 = load i8, ptr addrspace(1) %351, align 1, !invariant.load !142 + %353 = sitofp i8 %352 to float + %multiply.18382.4 = fmul float %353, 0x3FC3BF2820000000 + %multiply.53383.4 = fmul float %multiply.1032.4, %multiply.18382.4 + %add.57.i920.4 = fadd float %add.57.i920.3, %multiply.53383.4 + %354 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278475 + %355 = load i8, ptr addrspace(1) %354, align 1, !invariant.load !142 + %356 = sitofp i8 %355 to float + %multiply.18382.5 = fmul float %356, 0x3FC3BF2820000000 + %multiply.53383.5 = fmul float %multiply.1032.5, %multiply.18382.5 + %add.57.i920.5 = fadd float %add.57.i920.4, %multiply.53383.5 + %357 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278979 + %358 = load i8, ptr addrspace(1) %357, align 1, !invariant.load !142 + %359 = sitofp i8 %358 to float + %multiply.18382.6 = fmul float %359, 0x3FC3BF2820000000 + %multiply.53383.6 = fmul float %multiply.1032.6, %multiply.18382.6 + %add.57.i920.6 = fadd float %add.57.i920.5, %multiply.53383.6 + %360 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278983 + %361 = load i8, ptr addrspace(1) %360, align 1, !invariant.load !142 + %362 = sitofp i8 %361 to float + %multiply.18382.7 = fmul float %362, 0x3FC3BF2820000000 + %multiply.53383.7 = fmul float %multiply.1032.7, %multiply.18382.7 + %add.57.i920.7 = fadd float %add.57.i920.6, %multiply.53383.7 + %363 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 1278987 + %364 = load i8, ptr addrspace(1) %363, align 1, !invariant.load !142 + %365 = sitofp i8 %364 to float + %multiply.18382.8 = fmul float %365, 0x3FC3BF2820000000 + %multiply.53383.8 = fmul float %multiply.1032.8, %multiply.18382.8 + %add.57.i920.8 = fadd float %add.57.i920.7, %multiply.53383.8 + %366 = fptrunc float %add.57.i920.8 to half + %367 = getelementptr inbounds half, ptr addrspace(1) %100, i64 7 + store half %366, ptr addrspace(1) %367, align 2 + %368 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2555904 + %369 = load i8, ptr addrspace(1) %368, align 4, !invariant.load !142 + %370 = sitofp i8 %369 to float + %multiply.18434 = fmul float %370, 0x3FC3BF2820000000 + %multiply.53435 = fmul float %multiply.1032, %multiply.18434 + %add.57.i921 = fadd float %multiply.53435, 0.000000e+00 + %371 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2555908 + %372 = load i8, ptr addrspace(1) %371, align 4, !invariant.load !142 + %373 = sitofp i8 %372 to float + %multiply.18434.1 = fmul float %373, 0x3FC3BF2820000000 + %multiply.53435.1 = fmul float %multiply.1032.1, %multiply.18434.1 + %add.57.i921.1 = fadd float %add.57.i921, %multiply.53435.1 + %374 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2555912 + %375 = load i8, ptr addrspace(1) %374, align 4, !invariant.load !142 + %376 = sitofp i8 %375 to float + %multiply.18434.2 = fmul float %376, 0x3FC3BF2820000000 + %multiply.53435.2 = fmul float %multiply.1032.2, %multiply.18434.2 + %add.57.i921.2 = fadd float %add.57.i921.1, %multiply.53435.2 + %377 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556416 + %378 = load i8, ptr addrspace(1) %377, align 4, !invariant.load !142 + %379 = sitofp i8 %378 to float + %multiply.18434.3 = fmul float %379, 0x3FC3BF2820000000 + %multiply.53435.3 = fmul float %multiply.1032.3, %multiply.18434.3 + %add.57.i921.3 = fadd float %add.57.i921.2, %multiply.53435.3 + %380 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556420 + %381 = load i8, ptr addrspace(1) %380, align 4, !invariant.load !142 + %382 = sitofp i8 %381 to float + %multiply.18434.4 = fmul float %382, 0x3FC3BF2820000000 + %multiply.53435.4 = fmul float %multiply.1032.4, %multiply.18434.4 + %add.57.i921.4 = fadd float %add.57.i921.3, %multiply.53435.4 + %383 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556424 + %384 = load i8, ptr addrspace(1) %383, align 4, !invariant.load !142 + %385 = sitofp i8 %384 to float + %multiply.18434.5 = fmul float %385, 0x3FC3BF2820000000 + %multiply.53435.5 = fmul float %multiply.1032.5, %multiply.18434.5 + %add.57.i921.5 = fadd float %add.57.i921.4, %multiply.53435.5 + %386 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556928 + %387 = load i8, ptr addrspace(1) %386, align 4, !invariant.load !142 + %388 = sitofp i8 %387 to float + %multiply.18434.6 = fmul float %388, 0x3FC3BF2820000000 + %multiply.53435.6 = fmul float %multiply.1032.6, %multiply.18434.6 + %add.57.i921.6 = fadd float %add.57.i921.5, %multiply.53435.6 + %389 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556932 + %390 = load i8, ptr addrspace(1) %389, align 4, !invariant.load !142 + %391 = sitofp i8 %390 to float + %multiply.18434.7 = fmul float %391, 0x3FC3BF2820000000 + %multiply.53435.7 = fmul float %multiply.1032.7, %multiply.18434.7 + %add.57.i921.7 = fadd float %add.57.i921.6, %multiply.53435.7 + %392 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556936 + %393 = load i8, ptr addrspace(1) %392, align 4, !invariant.load !142 + %394 = sitofp i8 %393 to float + %multiply.18434.8 = fmul float %394, 0x3FC3BF2820000000 + %multiply.53435.8 = fmul float %multiply.1032.8, %multiply.18434.8 + %add.57.i921.8 = fadd float %add.57.i921.7, %multiply.53435.8 + %395 = fptrunc float %add.57.i921.8 to half + %396 = getelementptr inbounds half, ptr addrspace(1) %100, i64 8 + store half %395, ptr addrspace(1) %396, align 16 + %397 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2555905 + %398 = load i8, ptr addrspace(1) %397, align 1, !invariant.load !142 + %399 = sitofp i8 %398 to float + %multiply.18486 = fmul float %399, 0x3FC3BF2820000000 + %multiply.53487 = fmul float %multiply.1032, %multiply.18486 + %add.57.i922 = fadd float %multiply.53487, 0.000000e+00 + %400 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2555909 + %401 = load i8, ptr addrspace(1) %400, align 1, !invariant.load !142 + %402 = sitofp i8 %401 to float + %multiply.18486.1 = fmul float %402, 0x3FC3BF2820000000 + %multiply.53487.1 = fmul float %multiply.1032.1, %multiply.18486.1 + %add.57.i922.1 = fadd float %add.57.i922, %multiply.53487.1 + %403 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2555913 + %404 = load i8, ptr addrspace(1) %403, align 1, !invariant.load !142 + %405 = sitofp i8 %404 to float + %multiply.18486.2 = fmul float %405, 0x3FC3BF2820000000 + %multiply.53487.2 = fmul float %multiply.1032.2, %multiply.18486.2 + %add.57.i922.2 = fadd float %add.57.i922.1, %multiply.53487.2 + %406 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556417 + %407 = load i8, ptr addrspace(1) %406, align 1, !invariant.load !142 + %408 = sitofp i8 %407 to float + %multiply.18486.3 = fmul float %408, 0x3FC3BF2820000000 + %multiply.53487.3 = fmul float %multiply.1032.3, %multiply.18486.3 + %add.57.i922.3 = fadd float %add.57.i922.2, %multiply.53487.3 + %409 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556421 + %410 = load i8, ptr addrspace(1) %409, align 1, !invariant.load !142 + %411 = sitofp i8 %410 to float + %multiply.18486.4 = fmul float %411, 0x3FC3BF2820000000 + %multiply.53487.4 = fmul float %multiply.1032.4, %multiply.18486.4 + %add.57.i922.4 = fadd float %add.57.i922.3, %multiply.53487.4 + %412 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556425 + %413 = load i8, ptr addrspace(1) %412, align 1, !invariant.load !142 + %414 = sitofp i8 %413 to float + %multiply.18486.5 = fmul float %414, 0x3FC3BF2820000000 + %multiply.53487.5 = fmul float %multiply.1032.5, %multiply.18486.5 + %add.57.i922.5 = fadd float %add.57.i922.4, %multiply.53487.5 + %415 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556929 + %416 = load i8, ptr addrspace(1) %415, align 1, !invariant.load !142 + %417 = sitofp i8 %416 to float + %multiply.18486.6 = fmul float %417, 0x3FC3BF2820000000 + %multiply.53487.6 = fmul float %multiply.1032.6, %multiply.18486.6 + %add.57.i922.6 = fadd float %add.57.i922.5, %multiply.53487.6 + %418 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556933 + %419 = load i8, ptr addrspace(1) %418, align 1, !invariant.load !142 + %420 = sitofp i8 %419 to float + %multiply.18486.7 = fmul float %420, 0x3FC3BF2820000000 + %multiply.53487.7 = fmul float %multiply.1032.7, %multiply.18486.7 + %add.57.i922.7 = fadd float %add.57.i922.6, %multiply.53487.7 + %421 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556937 + %422 = load i8, ptr addrspace(1) %421, align 1, !invariant.load !142 + %423 = sitofp i8 %422 to float + %multiply.18486.8 = fmul float %423, 0x3FC3BF2820000000 + %multiply.53487.8 = fmul float %multiply.1032.8, %multiply.18486.8 + %add.57.i922.8 = fadd float %add.57.i922.7, %multiply.53487.8 + %424 = fptrunc float %add.57.i922.8 to half + %425 = getelementptr inbounds half, ptr addrspace(1) %100, i64 9 + store half %424, ptr addrspace(1) %425, align 2 + %426 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2555906 + %427 = load i8, ptr addrspace(1) %426, align 2, !invariant.load !142 + %428 = sitofp i8 %427 to float + %multiply.18538 = fmul float %428, 0x3FC3BF2820000000 + %multiply.53539 = fmul float %multiply.1032, %multiply.18538 + %add.57.i923 = fadd float %multiply.53539, 0.000000e+00 + %429 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2555910 + %430 = load i8, ptr addrspace(1) %429, align 2, !invariant.load !142 + %431 = sitofp i8 %430 to float + %multiply.18538.1 = fmul float %431, 0x3FC3BF2820000000 + %multiply.53539.1 = fmul float %multiply.1032.1, %multiply.18538.1 + %add.57.i923.1 = fadd float %add.57.i923, %multiply.53539.1 + %432 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2555914 + %433 = load i8, ptr addrspace(1) %432, align 2, !invariant.load !142 + %434 = sitofp i8 %433 to float + %multiply.18538.2 = fmul float %434, 0x3FC3BF2820000000 + %multiply.53539.2 = fmul float %multiply.1032.2, %multiply.18538.2 + %add.57.i923.2 = fadd float %add.57.i923.1, %multiply.53539.2 + %435 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556418 + %436 = load i8, ptr addrspace(1) %435, align 2, !invariant.load !142 + %437 = sitofp i8 %436 to float + %multiply.18538.3 = fmul float %437, 0x3FC3BF2820000000 + %multiply.53539.3 = fmul float %multiply.1032.3, %multiply.18538.3 + %add.57.i923.3 = fadd float %add.57.i923.2, %multiply.53539.3 + %438 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556422 + %439 = load i8, ptr addrspace(1) %438, align 2, !invariant.load !142 + %440 = sitofp i8 %439 to float + %multiply.18538.4 = fmul float %440, 0x3FC3BF2820000000 + %multiply.53539.4 = fmul float %multiply.1032.4, %multiply.18538.4 + %add.57.i923.4 = fadd float %add.57.i923.3, %multiply.53539.4 + %441 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556426 + %442 = load i8, ptr addrspace(1) %441, align 2, !invariant.load !142 + %443 = sitofp i8 %442 to float + %multiply.18538.5 = fmul float %443, 0x3FC3BF2820000000 + %multiply.53539.5 = fmul float %multiply.1032.5, %multiply.18538.5 + %add.57.i923.5 = fadd float %add.57.i923.4, %multiply.53539.5 + %444 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556930 + %445 = load i8, ptr addrspace(1) %444, align 2, !invariant.load !142 + %446 = sitofp i8 %445 to float + %multiply.18538.6 = fmul float %446, 0x3FC3BF2820000000 + %multiply.53539.6 = fmul float %multiply.1032.6, %multiply.18538.6 + %add.57.i923.6 = fadd float %add.57.i923.5, %multiply.53539.6 + %447 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556934 + %448 = load i8, ptr addrspace(1) %447, align 2, !invariant.load !142 + %449 = sitofp i8 %448 to float + %multiply.18538.7 = fmul float %449, 0x3FC3BF2820000000 + %multiply.53539.7 = fmul float %multiply.1032.7, %multiply.18538.7 + %add.57.i923.7 = fadd float %add.57.i923.6, %multiply.53539.7 + %450 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556938 + %451 = load i8, ptr addrspace(1) %450, align 2, !invariant.load !142 + %452 = sitofp i8 %451 to float + %multiply.18538.8 = fmul float %452, 0x3FC3BF2820000000 + %multiply.53539.8 = fmul float %multiply.1032.8, %multiply.18538.8 + %add.57.i923.8 = fadd float %add.57.i923.7, %multiply.53539.8 + %453 = fptrunc float %add.57.i923.8 to half + %454 = getelementptr inbounds half, ptr addrspace(1) %100, i64 10 + store half %453, ptr addrspace(1) %454, align 4 + %455 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2555907 + %456 = load i8, ptr addrspace(1) %455, align 1, !invariant.load !142 + %457 = sitofp i8 %456 to float + %multiply.18590 = fmul float %457, 0x3FC3BF2820000000 + %multiply.53591 = fmul float %multiply.1032, %multiply.18590 + %add.57.i924 = fadd float %multiply.53591, 0.000000e+00 + %458 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2555911 + %459 = load i8, ptr addrspace(1) %458, align 1, !invariant.load !142 + %460 = sitofp i8 %459 to float + %multiply.18590.1 = fmul float %460, 0x3FC3BF2820000000 + %multiply.53591.1 = fmul float %multiply.1032.1, %multiply.18590.1 + %add.57.i924.1 = fadd float %add.57.i924, %multiply.53591.1 + %461 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2555915 + %462 = load i8, ptr addrspace(1) %461, align 1, !invariant.load !142 + %463 = sitofp i8 %462 to float + %multiply.18590.2 = fmul float %463, 0x3FC3BF2820000000 + %multiply.53591.2 = fmul float %multiply.1032.2, %multiply.18590.2 + %add.57.i924.2 = fadd float %add.57.i924.1, %multiply.53591.2 + %464 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556419 + %465 = load i8, ptr addrspace(1) %464, align 1, !invariant.load !142 + %466 = sitofp i8 %465 to float + %multiply.18590.3 = fmul float %466, 0x3FC3BF2820000000 + %multiply.53591.3 = fmul float %multiply.1032.3, %multiply.18590.3 + %add.57.i924.3 = fadd float %add.57.i924.2, %multiply.53591.3 + %467 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556423 + %468 = load i8, ptr addrspace(1) %467, align 1, !invariant.load !142 + %469 = sitofp i8 %468 to float + %multiply.18590.4 = fmul float %469, 0x3FC3BF2820000000 + %multiply.53591.4 = fmul float %multiply.1032.4, %multiply.18590.4 + %add.57.i924.4 = fadd float %add.57.i924.3, %multiply.53591.4 + %470 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556427 + %471 = load i8, ptr addrspace(1) %470, align 1, !invariant.load !142 + %472 = sitofp i8 %471 to float + %multiply.18590.5 = fmul float %472, 0x3FC3BF2820000000 + %multiply.53591.5 = fmul float %multiply.1032.5, %multiply.18590.5 + %add.57.i924.5 = fadd float %add.57.i924.4, %multiply.53591.5 + %473 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556931 + %474 = load i8, ptr addrspace(1) %473, align 1, !invariant.load !142 + %475 = sitofp i8 %474 to float + %multiply.18590.6 = fmul float %475, 0x3FC3BF2820000000 + %multiply.53591.6 = fmul float %multiply.1032.6, %multiply.18590.6 + %add.57.i924.6 = fadd float %add.57.i924.5, %multiply.53591.6 + %476 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556935 + %477 = load i8, ptr addrspace(1) %476, align 1, !invariant.load !142 + %478 = sitofp i8 %477 to float + %multiply.18590.7 = fmul float %478, 0x3FC3BF2820000000 + %multiply.53591.7 = fmul float %multiply.1032.7, %multiply.18590.7 + %add.57.i924.7 = fadd float %add.57.i924.6, %multiply.53591.7 + %479 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 2556939 + %480 = load i8, ptr addrspace(1) %479, align 1, !invariant.load !142 + %481 = sitofp i8 %480 to float + %multiply.18590.8 = fmul float %481, 0x3FC3BF2820000000 + %multiply.53591.8 = fmul float %multiply.1032.8, %multiply.18590.8 + %add.57.i924.8 = fadd float %add.57.i924.7, %multiply.53591.8 + %482 = fptrunc float %add.57.i924.8 to half + %483 = getelementptr inbounds half, ptr addrspace(1) %100, i64 11 + store half %482, ptr addrspace(1) %483, align 2 + %484 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3833856 + %485 = load i8, ptr addrspace(1) %484, align 4, !invariant.load !142 + %486 = sitofp i8 %485 to float + %multiply.18642 = fmul float %486, 0x3FC3BF2820000000 + %multiply.53643 = fmul float %multiply.1032, %multiply.18642 + %add.57.i925 = fadd float %multiply.53643, 0.000000e+00 + %487 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3833860 + %488 = load i8, ptr addrspace(1) %487, align 4, !invariant.load !142 + %489 = sitofp i8 %488 to float + %multiply.18642.1 = fmul float %489, 0x3FC3BF2820000000 + %multiply.53643.1 = fmul float %multiply.1032.1, %multiply.18642.1 + %add.57.i925.1 = fadd float %add.57.i925, %multiply.53643.1 + %490 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3833864 + %491 = load i8, ptr addrspace(1) %490, align 4, !invariant.load !142 + %492 = sitofp i8 %491 to float + %multiply.18642.2 = fmul float %492, 0x3FC3BF2820000000 + %multiply.53643.2 = fmul float %multiply.1032.2, %multiply.18642.2 + %add.57.i925.2 = fadd float %add.57.i925.1, %multiply.53643.2 + %493 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834368 + %494 = load i8, ptr addrspace(1) %493, align 4, !invariant.load !142 + %495 = sitofp i8 %494 to float + %multiply.18642.3 = fmul float %495, 0x3FC3BF2820000000 + %multiply.53643.3 = fmul float %multiply.1032.3, %multiply.18642.3 + %add.57.i925.3 = fadd float %add.57.i925.2, %multiply.53643.3 + %496 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834372 + %497 = load i8, ptr addrspace(1) %496, align 4, !invariant.load !142 + %498 = sitofp i8 %497 to float + %multiply.18642.4 = fmul float %498, 0x3FC3BF2820000000 + %multiply.53643.4 = fmul float %multiply.1032.4, %multiply.18642.4 + %add.57.i925.4 = fadd float %add.57.i925.3, %multiply.53643.4 + %499 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834376 + %500 = load i8, ptr addrspace(1) %499, align 4, !invariant.load !142 + %501 = sitofp i8 %500 to float + %multiply.18642.5 = fmul float %501, 0x3FC3BF2820000000 + %multiply.53643.5 = fmul float %multiply.1032.5, %multiply.18642.5 + %add.57.i925.5 = fadd float %add.57.i925.4, %multiply.53643.5 + %502 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834880 + %503 = load i8, ptr addrspace(1) %502, align 4, !invariant.load !142 + %504 = sitofp i8 %503 to float + %multiply.18642.6 = fmul float %504, 0x3FC3BF2820000000 + %multiply.53643.6 = fmul float %multiply.1032.6, %multiply.18642.6 + %add.57.i925.6 = fadd float %add.57.i925.5, %multiply.53643.6 + %505 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834884 + %506 = load i8, ptr addrspace(1) %505, align 4, !invariant.load !142 + %507 = sitofp i8 %506 to float + %multiply.18642.7 = fmul float %507, 0x3FC3BF2820000000 + %multiply.53643.7 = fmul float %multiply.1032.7, %multiply.18642.7 + %add.57.i925.7 = fadd float %add.57.i925.6, %multiply.53643.7 + %508 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834888 + %509 = load i8, ptr addrspace(1) %508, align 4, !invariant.load !142 + %510 = sitofp i8 %509 to float + %multiply.18642.8 = fmul float %510, 0x3FC3BF2820000000 + %multiply.53643.8 = fmul float %multiply.1032.8, %multiply.18642.8 + %add.57.i925.8 = fadd float %add.57.i925.7, %multiply.53643.8 + %511 = fptrunc float %add.57.i925.8 to half + %512 = getelementptr inbounds half, ptr addrspace(1) %100, i64 12 + store half %511, ptr addrspace(1) %512, align 8 + %513 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3833857 + %514 = load i8, ptr addrspace(1) %513, align 1, !invariant.load !142 + %515 = sitofp i8 %514 to float + %multiply.18694 = fmul float %515, 0x3FC3BF2820000000 + %multiply.53695 = fmul float %multiply.1032, %multiply.18694 + %add.57.i926 = fadd float %multiply.53695, 0.000000e+00 + %516 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3833861 + %517 = load i8, ptr addrspace(1) %516, align 1, !invariant.load !142 + %518 = sitofp i8 %517 to float + %multiply.18694.1 = fmul float %518, 0x3FC3BF2820000000 + %multiply.53695.1 = fmul float %multiply.1032.1, %multiply.18694.1 + %add.57.i926.1 = fadd float %add.57.i926, %multiply.53695.1 + %519 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3833865 + %520 = load i8, ptr addrspace(1) %519, align 1, !invariant.load !142 + %521 = sitofp i8 %520 to float + %multiply.18694.2 = fmul float %521, 0x3FC3BF2820000000 + %multiply.53695.2 = fmul float %multiply.1032.2, %multiply.18694.2 + %add.57.i926.2 = fadd float %add.57.i926.1, %multiply.53695.2 + %522 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834369 + %523 = load i8, ptr addrspace(1) %522, align 1, !invariant.load !142 + %524 = sitofp i8 %523 to float + %multiply.18694.3 = fmul float %524, 0x3FC3BF2820000000 + %multiply.53695.3 = fmul float %multiply.1032.3, %multiply.18694.3 + %add.57.i926.3 = fadd float %add.57.i926.2, %multiply.53695.3 + %525 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834373 + %526 = load i8, ptr addrspace(1) %525, align 1, !invariant.load !142 + %527 = sitofp i8 %526 to float + %multiply.18694.4 = fmul float %527, 0x3FC3BF2820000000 + %multiply.53695.4 = fmul float %multiply.1032.4, %multiply.18694.4 + %add.57.i926.4 = fadd float %add.57.i926.3, %multiply.53695.4 + %528 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834377 + %529 = load i8, ptr addrspace(1) %528, align 1, !invariant.load !142 + %530 = sitofp i8 %529 to float + %multiply.18694.5 = fmul float %530, 0x3FC3BF2820000000 + %multiply.53695.5 = fmul float %multiply.1032.5, %multiply.18694.5 + %add.57.i926.5 = fadd float %add.57.i926.4, %multiply.53695.5 + %531 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834881 + %532 = load i8, ptr addrspace(1) %531, align 1, !invariant.load !142 + %533 = sitofp i8 %532 to float + %multiply.18694.6 = fmul float %533, 0x3FC3BF2820000000 + %multiply.53695.6 = fmul float %multiply.1032.6, %multiply.18694.6 + %add.57.i926.6 = fadd float %add.57.i926.5, %multiply.53695.6 + %534 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834885 + %535 = load i8, ptr addrspace(1) %534, align 1, !invariant.load !142 + %536 = sitofp i8 %535 to float + %multiply.18694.7 = fmul float %536, 0x3FC3BF2820000000 + %multiply.53695.7 = fmul float %multiply.1032.7, %multiply.18694.7 + %add.57.i926.7 = fadd float %add.57.i926.6, %multiply.53695.7 + %537 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834889 + %538 = load i8, ptr addrspace(1) %537, align 1, !invariant.load !142 + %539 = sitofp i8 %538 to float + %multiply.18694.8 = fmul float %539, 0x3FC3BF2820000000 + %multiply.53695.8 = fmul float %multiply.1032.8, %multiply.18694.8 + %add.57.i926.8 = fadd float %add.57.i926.7, %multiply.53695.8 + %540 = fptrunc float %add.57.i926.8 to half + %541 = getelementptr inbounds half, ptr addrspace(1) %100, i64 13 + store half %540, ptr addrspace(1) %541, align 2 + %542 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3833858 + %543 = load i8, ptr addrspace(1) %542, align 2, !invariant.load !142 + %544 = sitofp i8 %543 to float + %multiply.18746 = fmul float %544, 0x3FC3BF2820000000 + %multiply.53747 = fmul float %multiply.1032, %multiply.18746 + %add.57.i927 = fadd float %multiply.53747, 0.000000e+00 + %545 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3833862 + %546 = load i8, ptr addrspace(1) %545, align 2, !invariant.load !142 + %547 = sitofp i8 %546 to float + %multiply.18746.1 = fmul float %547, 0x3FC3BF2820000000 + %multiply.53747.1 = fmul float %multiply.1032.1, %multiply.18746.1 + %add.57.i927.1 = fadd float %add.57.i927, %multiply.53747.1 + %548 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3833866 + %549 = load i8, ptr addrspace(1) %548, align 2, !invariant.load !142 + %550 = sitofp i8 %549 to float + %multiply.18746.2 = fmul float %550, 0x3FC3BF2820000000 + %multiply.53747.2 = fmul float %multiply.1032.2, %multiply.18746.2 + %add.57.i927.2 = fadd float %add.57.i927.1, %multiply.53747.2 + %551 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834370 + %552 = load i8, ptr addrspace(1) %551, align 2, !invariant.load !142 + %553 = sitofp i8 %552 to float + %multiply.18746.3 = fmul float %553, 0x3FC3BF2820000000 + %multiply.53747.3 = fmul float %multiply.1032.3, %multiply.18746.3 + %add.57.i927.3 = fadd float %add.57.i927.2, %multiply.53747.3 + %554 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834374 + %555 = load i8, ptr addrspace(1) %554, align 2, !invariant.load !142 + %556 = sitofp i8 %555 to float + %multiply.18746.4 = fmul float %556, 0x3FC3BF2820000000 + %multiply.53747.4 = fmul float %multiply.1032.4, %multiply.18746.4 + %add.57.i927.4 = fadd float %add.57.i927.3, %multiply.53747.4 + %557 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834378 + %558 = load i8, ptr addrspace(1) %557, align 2, !invariant.load !142 + %559 = sitofp i8 %558 to float + %multiply.18746.5 = fmul float %559, 0x3FC3BF2820000000 + %multiply.53747.5 = fmul float %multiply.1032.5, %multiply.18746.5 + %add.57.i927.5 = fadd float %add.57.i927.4, %multiply.53747.5 + %560 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834882 + %561 = load i8, ptr addrspace(1) %560, align 2, !invariant.load !142 + %562 = sitofp i8 %561 to float + %multiply.18746.6 = fmul float %562, 0x3FC3BF2820000000 + %multiply.53747.6 = fmul float %multiply.1032.6, %multiply.18746.6 + %add.57.i927.6 = fadd float %add.57.i927.5, %multiply.53747.6 + %563 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834886 + %564 = load i8, ptr addrspace(1) %563, align 2, !invariant.load !142 + %565 = sitofp i8 %564 to float + %multiply.18746.7 = fmul float %565, 0x3FC3BF2820000000 + %multiply.53747.7 = fmul float %multiply.1032.7, %multiply.18746.7 + %add.57.i927.7 = fadd float %add.57.i927.6, %multiply.53747.7 + %566 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834890 + %567 = load i8, ptr addrspace(1) %566, align 2, !invariant.load !142 + %568 = sitofp i8 %567 to float + %multiply.18746.8 = fmul float %568, 0x3FC3BF2820000000 + %multiply.53747.8 = fmul float %multiply.1032.8, %multiply.18746.8 + %add.57.i927.8 = fadd float %add.57.i927.7, %multiply.53747.8 + %569 = fptrunc float %add.57.i927.8 to half + %570 = getelementptr inbounds half, ptr addrspace(1) %100, i64 14 + store half %569, ptr addrspace(1) %570, align 4 + %571 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3833859 + %572 = load i8, ptr addrspace(1) %571, align 1, !invariant.load !142 + %573 = sitofp i8 %572 to float + %multiply.18798 = fmul float %573, 0x3FC3BF2820000000 + %multiply.53799 = fmul float %multiply.1032, %multiply.18798 + %add.57.i928 = fadd float %multiply.53799, 0.000000e+00 + %574 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3833863 + %575 = load i8, ptr addrspace(1) %574, align 1, !invariant.load !142 + %576 = sitofp i8 %575 to float + %multiply.18798.1 = fmul float %576, 0x3FC3BF2820000000 + %multiply.53799.1 = fmul float %multiply.1032.1, %multiply.18798.1 + %add.57.i928.1 = fadd float %add.57.i928, %multiply.53799.1 + %577 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3833867 + %578 = load i8, ptr addrspace(1) %577, align 1, !invariant.load !142 + %579 = sitofp i8 %578 to float + %multiply.18798.2 = fmul float %579, 0x3FC3BF2820000000 + %multiply.53799.2 = fmul float %multiply.1032.2, %multiply.18798.2 + %add.57.i928.2 = fadd float %add.57.i928.1, %multiply.53799.2 + %580 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834371 + %581 = load i8, ptr addrspace(1) %580, align 1, !invariant.load !142 + %582 = sitofp i8 %581 to float + %multiply.18798.3 = fmul float %582, 0x3FC3BF2820000000 + %multiply.53799.3 = fmul float %multiply.1032.3, %multiply.18798.3 + %add.57.i928.3 = fadd float %add.57.i928.2, %multiply.53799.3 + %583 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834375 + %584 = load i8, ptr addrspace(1) %583, align 1, !invariant.load !142 + %585 = sitofp i8 %584 to float + %multiply.18798.4 = fmul float %585, 0x3FC3BF2820000000 + %multiply.53799.4 = fmul float %multiply.1032.4, %multiply.18798.4 + %add.57.i928.4 = fadd float %add.57.i928.3, %multiply.53799.4 + %586 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834379 + %587 = load i8, ptr addrspace(1) %586, align 1, !invariant.load !142 + %588 = sitofp i8 %587 to float + %multiply.18798.5 = fmul float %588, 0x3FC3BF2820000000 + %multiply.53799.5 = fmul float %multiply.1032.5, %multiply.18798.5 + %add.57.i928.5 = fadd float %add.57.i928.4, %multiply.53799.5 + %589 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834883 + %590 = load i8, ptr addrspace(1) %589, align 1, !invariant.load !142 + %591 = sitofp i8 %590 to float + %multiply.18798.6 = fmul float %591, 0x3FC3BF2820000000 + %multiply.53799.6 = fmul float %multiply.1032.6, %multiply.18798.6 + %add.57.i928.6 = fadd float %add.57.i928.5, %multiply.53799.6 + %592 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834887 + %593 = load i8, ptr addrspace(1) %592, align 1, !invariant.load !142 + %594 = sitofp i8 %593 to float + %multiply.18798.7 = fmul float %594, 0x3FC3BF2820000000 + %multiply.53799.7 = fmul float %multiply.1032.7, %multiply.18798.7 + %add.57.i928.7 = fadd float %add.57.i928.6, %multiply.53799.7 + %595 = getelementptr inbounds i8, ptr addrspace(1) %144, i64 3834891 + %596 = load i8, ptr addrspace(1) %595, align 1, !invariant.load !142 + %597 = sitofp i8 %596 to float + %multiply.18798.8 = fmul float %597, 0x3FC3BF2820000000 + %multiply.53799.8 = fmul float %multiply.1032.8, %multiply.18798.8 + %add.57.i928.8 = fadd float %add.57.i928.7, %multiply.53799.8 + %598 = fptrunc float %add.57.i928.8 to half + %599 = getelementptr inbounds half, ptr addrspace(1) %100, i64 15 + store half %598, ptr addrspace(1) %599, align 2 + ret void +} + +attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } +attributes #1 = { mustprogress nofree nosync nounwind willreturn memory(argmem: readwrite) } + +!140 = !{i32 0, i32 8658} +!141 = !{i32 0, i32 64} +!142 = !{} diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/overlapping_chains.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/overlapping_chains.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/overlapping_chains.ll @@ -0,0 +1,17 @@ +; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -S -o - %s | FileCheck %s + +; CHECK-LABEL: @overlapping_stores +; CHECK: store i16 +; CHECK: store i16 +; CHECK: store i16 +define void @overlapping_stores(ptr nocapture align 2 %ptr) { + %ptr0 = getelementptr i16, ptr %ptr, i64 0 + %ptr1 = getelementptr i8, ptr %ptr, i64 1 + %ptr2 = getelementptr i16, ptr %ptr, i64 1 + + store i16 0, ptr %ptr0, align 2 + store i16 0, ptr %ptr1, align 1 + store i16 0, ptr %ptr2, align 2 + + ret void +} diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i16.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i16.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i16.ll @@ -0,0 +1,17 @@ +; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -S -o - %s | FileCheck %s + +; CHECK-LABEL: @int16x2 +; CHECK: load <2 x i16> +; CHECK: store <2 x i16> +define void @int16x2(ptr nocapture align 4 %ptr) { + %ptr0 = getelementptr i16, ptr %ptr, i64 0 + %ptr1 = getelementptr i16, ptr %ptr, i64 1 + + %l0 = load i16, ptr %ptr0, align 4 + %l1 = load i16, ptr %ptr1, align 2 + + store i16 %l1, ptr %ptr0, align 4 + store i16 %l0, ptr %ptr1, align 2 + + ret void +} diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i24.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i24.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i24.ll @@ -0,0 +1,21 @@ +; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -S -o - %s | FileCheck %s + +; We don't need to vectorize this. Just make sure it doesn't crash. + +; CHECK-LABEL: @int24x2 +; CHECK: load i24 +; CHECK: load i24 +; CHECK: store i24 +; CHECK: store i24 +define void @int24x2(ptr nocapture align 4 %ptr) { + %ptr0 = getelementptr i24, ptr %ptr, i64 0 + %ptr1 = getelementptr i24, ptr %ptr, i64 1 + + %l0 = load i24, ptr %ptr0, align 4 + %l1 = load i24, ptr %ptr1, align 1 + + store i24 %l1, ptr %ptr0, align 4 + store i24 %l0, ptr %ptr1, align 1 + + ret void +} diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i8.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i8.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i8.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i8.ll @@ -41,8 +41,10 @@ ret void ; CHECK-LABEL: @int8x3a4 -; CHECK: load <3 x i8> -; CHECK: store <3 x i8> +; CHECK: load <2 x i8> +; CHECK: load i8 +; CHECK: store <2 x i8> +; CHECK: store i8 } define void @int8x12a4(ptr nocapture align 4 %ptr) { diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_vectors.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_vectors.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_vectors.ll @@ -0,0 +1,17 @@ +; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -S -o - %s | FileCheck %s + +; CHECK-LABEL: @int8x3Plus1 +; CHECK: load <4 x i8> +; CHECK: store <4 x i8> +define void @int8x3Plus1(ptr nocapture align 4 %ptr) { + %ptr0 = getelementptr i8, ptr %ptr, i64 0 + %ptr3 = getelementptr i8, ptr %ptr, i64 3 + + %l0 = load <3 x i8>, ptr %ptr0, align 4 + %l1 = load i8, ptr %ptr3, align 1 + + store <3 x i8> , ptr %ptr0, align 4 + store i8 0, ptr %ptr3, align 1 + + ret void +} diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/correct-order.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/correct-order.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/correct-order.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/correct-order.ll @@ -7,12 +7,13 @@ define void @correct_order(ptr noalias %ptr) { ; CHECK-LABEL: @correct_order( ; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 1 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr [[NEXT_GEP1]], align 4 -; CHECK-NEXT: [[L11:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0 -; CHECK-NEXT: [[L42:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1 -; CHECK-NEXT: [[L2:%.*]] = load i32, ptr [[PTR]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[PTR]], align 4 +; CHECK-NEXT: [[L21:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0 +; CHECK-NEXT: [[L12:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1 ; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr [[PTR]], align 4 -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[NEXT_GEP1]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr [[NEXT_GEP1]], align 4 +; CHECK-NEXT: [[L33:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0 +; CHECK-NEXT: [[L44:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1 ; CHECK-NEXT: ret void ; %next.gep1 = getelementptr i32, ptr %ptr, i64 1 diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/subchain-interleaved.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/subchain-interleaved.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/subchain-interleaved.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/subchain-interleaved.ll @@ -8,9 +8,8 @@ ; CHECK-LABEL: @interleave_2L_2S( ; CHECK: load <2 x i32> -; CHECK: load i32 ; CHECK: store <2 x i32> -; CHECK: load i32 +; CHECK: load <2 x i32> define void @interleave_2L_2S(ptr noalias %ptr) { %next.gep1 = getelementptr i32, ptr %ptr, i64 1 %next.gep2 = getelementptr i32, ptr %ptr, i64 2 @@ -26,9 +25,9 @@ } ; CHECK-LABEL: @interleave_3L_2S_1L( -; CHECK: load <3 x i32> +; CHECK: load <2 x i32> ; CHECK: store <2 x i32> -; CHECK: load i32 +; CHECK: load <2 x i32> define void @interleave_3L_2S_1L(ptr noalias %ptr) { %next.gep1 = getelementptr i32, ptr %ptr, i64 1 @@ -65,7 +64,8 @@ ; CHECK-LABEL: @chain_prefix_suffix( ; CHECK: load <2 x i32> ; CHECK: store <2 x i32> -; CHECK: load <3 x i32> +; CHECK: load <2 x i32> +; CHECK: load i32 define void @chain_prefix_suffix(ptr noalias %ptr) { %next.gep1 = getelementptr i32, ptr %ptr, i64 1 %next.gep2 = getelementptr i32, ptr %ptr, i64 2 @@ -82,22 +82,17 @@ ret void } -; FIXME: If the chain is too long and TLI says misaligned is not fast, -; then LSV fails to vectorize anything in that chain. -; To reproduce below, add a tmp5 (ptr+4) and load tmp5 into l6 and l7. - ; CHECK-LABEL: @interleave_get_longest -; CHECK: load <3 x i32> -; CHECK: load i32 +; CHECK: load <2 x i32> ; CHECK: store <2 x i32> zeroinitializer -; CHECK: load i32 -; CHECK: load i32 -; CHECK: load i32 +; CHECK: load <2 x i32> +; CHECK: load <2 x i32> define void @interleave_get_longest(ptr noalias %ptr) { %tmp2 = getelementptr i32, ptr %ptr, i64 1 %tmp3 = getelementptr i32, ptr %ptr, i64 2 %tmp4 = getelementptr i32, ptr %ptr, i64 3 + %tmp5 = getelementptr i32, ptr %ptr, i64 4 %l1 = load i32, ptr %tmp2, align 4 %l2 = load i32, ptr %ptr, align 4 @@ -106,8 +101,32 @@ %l3 = load i32, ptr %tmp2, align 4 %l4 = load i32, ptr %tmp3, align 4 %l5 = load i32, ptr %tmp4, align 4 - %l6 = load i32, ptr %tmp4, align 4 - %l7 = load i32, ptr %tmp4, align 4 + %l6 = load i32, ptr %tmp5, align 4 + %l7 = load i32, ptr %tmp5, align 4 ret void } + +; CHECK-LABEL: @interleave_get_longest_aligned +; CHECK: load <2 x i32> +; CHECK: store <2 x i32> zeroinitializer +; CHECK: load <4 x i32> + +define void @interleave_get_longest_aligned(ptr noalias %ptr) { + %tmp2 = getelementptr i32, ptr %ptr, i64 1 + %tmp3 = getelementptr i32, ptr %ptr, i64 2 + %tmp4 = getelementptr i32, ptr %ptr, i64 3 + %tmp5 = getelementptr i32, ptr %ptr, i64 4 + + %l1 = load i32, ptr %tmp2, align 4 + %l2 = load i32, ptr %ptr, align 4 + store i32 0, ptr %tmp2, align 4 + store i32 0, ptr %ptr, align 4 + %l3 = load i32, ptr %tmp2, align 16 + %l4 = load i32, ptr %tmp3, align 4 + %l5 = load i32, ptr %tmp4, align 8 + %l6 = load i32, ptr %tmp5, align 4 + %l7 = load i32, ptr %tmp5, align 4 + + ret void +} \ No newline at end of file diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll @@ -4,8 +4,7 @@ ; Check that the LoadStoreVectorizer does not crash due to not differentiating <1 x T> and T. ; CHECK-LABEL: @vector_scalar( -; CHECK: store double -; CHECK: store <1 x double> +; CHECK: store <2 x double> define void @vector_scalar(ptr %ptr, double %a, <1 x double> %b) { %1 = getelementptr <1 x double>, ptr %ptr, i32 1 store double %a, ptr %ptr, align 8 diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add-inseltpoison.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add-inseltpoison.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add-inseltpoison.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add-inseltpoison.ll @@ -55,53 +55,6 @@ ret void } -define void @ld_v4i8_add_nuw(i32 %v0, i32 %v1, ptr %src, ptr %dst) { -; CHECK-LABEL: @ld_v4i8_add_nuw( -; CHECK-NEXT: bb: -; CHECK-NEXT: [[TMP:%.*]] = add nuw i32 [[V0:%.*]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = add nuw i32 [[V1:%.*]], [[TMP]] -; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1 -; CHECK-NEXT: [[TMP41:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0 -; CHECK-NEXT: [[TMP82:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1 -; CHECK-NEXT: [[TMP133:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2 -; CHECK-NEXT: [[TMP184:%.*]] = extractelement <4 x i8> [[TMP1]], i32 3 -; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i8> poison, i8 [[TMP41]], i32 0 -; CHECK-NEXT: [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP82]], i32 1 -; CHECK-NEXT: [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP133]], i32 2 -; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP184]], i32 3 -; CHECK-NEXT: store <4 x i8> [[TMP22]], ptr [[DST:%.*]], align 4 -; CHECK-NEXT: ret void -; -bb: - %tmp = add nuw i32 %v0, -1 - %tmp1 = add nuw i32 %v1, %tmp - %tmp2 = zext i32 %tmp1 to i64 - %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2 - %tmp4 = load i8, ptr %tmp3, align 1 - %tmp5 = add nuw i32 %v1, %v0 - %tmp6 = zext i32 %tmp5 to i64 - %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6 - %tmp8 = load i8, ptr %tmp7, align 1 - %tmp9 = add nuw i32 %v0, 1 - %tmp10 = add nuw i32 %v1, %tmp9 - %tmp11 = zext i32 %tmp10 to i64 - %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11 - %tmp13 = load i8, ptr %tmp12, align 1 - %tmp14 = add nuw i32 %v0, 2 - %tmp15 = add nuw i32 %v1, %tmp14 - %tmp16 = zext i32 %tmp15 to i64 - %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16 - %tmp18 = load i8, ptr %tmp17, align 1 - %tmp19 = insertelement <4 x i8> poison, i8 %tmp4, i32 0 - %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1 - %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2 - %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3 - store <4 x i8> %tmp22, ptr %dst - ret void -} - ; Make sure we don't vectorize the loads below because the source of ; sext instructions doesn't have the nsw flag. diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/vectorize-i8-nested-add.ll @@ -55,53 +55,6 @@ ret void } -define void @ld_v4i8_add_nuw(i32 %v0, i32 %v1, ptr %src, ptr %dst) { -; CHECK-LABEL: @ld_v4i8_add_nuw( -; CHECK-NEXT: bb: -; CHECK-NEXT: [[TMP:%.*]] = add nuw i32 [[V0:%.*]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = add nuw i32 [[V1:%.*]], [[TMP]] -; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1 -; CHECK-NEXT: [[TMP41:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0 -; CHECK-NEXT: [[TMP82:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1 -; CHECK-NEXT: [[TMP133:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2 -; CHECK-NEXT: [[TMP184:%.*]] = extractelement <4 x i8> [[TMP1]], i32 3 -; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i8> undef, i8 [[TMP41]], i32 0 -; CHECK-NEXT: [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP82]], i32 1 -; CHECK-NEXT: [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP133]], i32 2 -; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP184]], i32 3 -; CHECK-NEXT: store <4 x i8> [[TMP22]], ptr [[DST:%.*]] -; CHECK-NEXT: ret void -; -bb: - %tmp = add nuw i32 %v0, -1 - %tmp1 = add nuw i32 %v1, %tmp - %tmp2 = zext i32 %tmp1 to i64 - %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2 - %tmp4 = load i8, ptr %tmp3, align 1 - %tmp5 = add nuw i32 %v1, %v0 - %tmp6 = zext i32 %tmp5 to i64 - %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6 - %tmp8 = load i8, ptr %tmp7, align 1 - %tmp9 = add nuw i32 %v0, 1 - %tmp10 = add nuw i32 %v1, %tmp9 - %tmp11 = zext i32 %tmp10 to i64 - %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11 - %tmp13 = load i8, ptr %tmp12, align 1 - %tmp14 = add nuw i32 %v0, 2 - %tmp15 = add nuw i32 %v1, %tmp14 - %tmp16 = zext i32 %tmp15 to i64 - %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16 - %tmp18 = load i8, ptr %tmp17, align 1 - %tmp19 = insertelement <4 x i8> undef, i8 %tmp4, i32 0 - %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1 - %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2 - %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3 - store <4 x i8> %tmp22, ptr %dst - ret void -} - ; Apply different operand orders for the nested add sequences define void @ld_v4i8_add_nsw_operand_orders(i32 %v0, i32 %v1, ptr %src, ptr %dst) { ; CHECK-LABEL: @ld_v4i8_add_nsw_operand_orders( @@ -150,54 +103,6 @@ ret void } -; Apply different operand orders for the nested add sequences -define void @ld_v4i8_add_nuw_operand_orders(i32 %v0, i32 %v1, ptr %src, ptr %dst) { -; CHECK-LABEL: @ld_v4i8_add_nuw_operand_orders( -; CHECK-NEXT: bb: -; CHECK-NEXT: [[TMP:%.*]] = add nuw i32 [[V0:%.*]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = add nuw i32 [[V1:%.*]], [[TMP]] -; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1 -; CHECK-NEXT: [[TMP41:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0 -; CHECK-NEXT: [[TMP82:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1 -; CHECK-NEXT: [[TMP133:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2 -; CHECK-NEXT: [[TMP184:%.*]] = extractelement <4 x i8> [[TMP1]], i32 3 -; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i8> undef, i8 [[TMP41]], i32 0 -; CHECK-NEXT: [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP82]], i32 1 -; CHECK-NEXT: [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP133]], i32 2 -; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP184]], i32 3 -; CHECK-NEXT: store <4 x i8> [[TMP22]], ptr [[DST:%.*]] -; CHECK-NEXT: ret void -; -bb: - %tmp = add nuw i32 %v0, -1 - %tmp1 = add nuw i32 %v1, %tmp - %tmp2 = zext i32 %tmp1 to i64 - %tmp3 = getelementptr inbounds i8, ptr %src, i64 %tmp2 - %tmp4 = load i8, ptr %tmp3, align 1 - %tmp5 = add nuw i32 %v0, %v1 - %tmp6 = zext i32 %tmp5 to i64 - %tmp7 = getelementptr inbounds i8, ptr %src, i64 %tmp6 - %tmp8 = load i8, ptr %tmp7, align 1 - %tmp9 = add nuw i32 %v0, 1 - %tmp10 = add nuw i32 %tmp9, %v1 - %tmp11 = zext i32 %tmp10 to i64 - %tmp12 = getelementptr inbounds i8, ptr %src, i64 %tmp11 - %tmp13 = load i8, ptr %tmp12, align 1 - %tmp14 = add nuw i32 %v0, 2 - %tmp15 = add nuw i32 %v1, %tmp14 - %tmp16 = zext i32 %tmp15 to i64 - %tmp17 = getelementptr inbounds i8, ptr %src, i64 %tmp16 - %tmp18 = load i8, ptr %tmp17, align 1 - %tmp19 = insertelement <4 x i8> undef, i8 %tmp4, i32 0 - %tmp20 = insertelement <4 x i8> %tmp19, i8 %tmp8, i32 1 - %tmp21 = insertelement <4 x i8> %tmp20, i8 %tmp13, i32 2 - %tmp22 = insertelement <4 x i8> %tmp21, i8 %tmp18, i32 3 - store <4 x i8> %tmp22, ptr %dst - ret void -} - define void @ld_v4i8_add_known_bits(i32 %ind0, i32 %ind1, ptr %src, ptr %dst) { ; CHECK-LABEL: @ld_v4i8_add_known_bits( ; CHECK-NEXT: bb: @@ -211,15 +116,19 @@ ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[V1]], [[V0]] ; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP6]] -; CHECK-NEXT: [[TMP1:%.*]] = load <3 x i8>, ptr [[TMP7]], align 1 -; CHECK-NEXT: [[TMP81:%.*]] = extractelement <3 x i8> [[TMP1]], i32 0 -; CHECK-NEXT: [[TMP132:%.*]] = extractelement <3 x i8> [[TMP1]], i32 1 -; CHECK-NEXT: [[TMP183:%.*]] = extractelement <3 x i8> [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i8>, ptr [[TMP7]], align 1 +; CHECK-NEXT: [[TMP81:%.*]] = extractelement <2 x i8> [[TMP0]], i32 0 +; CHECK-NEXT: [[TMP132:%.*]] = extractelement <2 x i8> [[TMP0]], i32 1 +; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[V0]], 2 +; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[V1]], [[TMP14]] +; CHECK-NEXT: [[TMP16:%.*]] = sext i32 [[TMP15]] to i64 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP16]] +; CHECK-NEXT: [[TMP18:%.*]] = load i8, ptr [[TMP17]], align 1 ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i8> undef, i8 [[TMP4]], i32 0 ; CHECK-NEXT: [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP81]], i32 1 ; CHECK-NEXT: [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP132]], i32 2 -; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP183]], i32 3 -; CHECK-NEXT: store <4 x i8> [[TMP22]], ptr [[DST:%.*]] +; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP18]], i32 3 +; CHECK-NEXT: store <4 x i8> [[TMP22]], ptr [[DST:%.*]], align 4 ; CHECK-NEXT: ret void ; bb: @@ -383,15 +292,19 @@ ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[V1]], [[V0]] ; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP6]] -; CHECK-NEXT: [[TMP1:%.*]] = load <3 x i8>, ptr [[TMP7]], align 1 -; CHECK-NEXT: [[TMP81:%.*]] = extractelement <3 x i8> [[TMP1]], i32 0 -; CHECK-NEXT: [[TMP132:%.*]] = extractelement <3 x i8> [[TMP1]], i32 1 -; CHECK-NEXT: [[TMP183:%.*]] = extractelement <3 x i8> [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i8>, ptr [[TMP7]], align 1 +; CHECK-NEXT: [[TMP81:%.*]] = extractelement <2 x i8> [[TMP0]], i32 0 +; CHECK-NEXT: [[TMP132:%.*]] = extractelement <2 x i8> [[TMP0]], i32 1 +; CHECK-NEXT: [[TMP14:%.*]] = add nsw i32 [[V0]], 2 +; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[V1]], [[TMP14]] +; CHECK-NEXT: [[TMP16:%.*]] = sext i32 [[TMP15]] to i64 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP16]] +; CHECK-NEXT: [[TMP18:%.*]] = load i8, ptr [[TMP17]], align 1 ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i8> undef, i8 [[TMP4]], i32 0 ; CHECK-NEXT: [[TMP20:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP81]], i32 1 ; CHECK-NEXT: [[TMP21:%.*]] = insertelement <4 x i8> [[TMP20]], i8 [[TMP132]], i32 2 -; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP183]], i32 3 -; CHECK-NEXT: store <4 x i8> [[TMP22]], ptr [[DST:%.*]] +; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i8> [[TMP21]], i8 [[TMP18]], i32 3 +; CHECK-NEXT: store <4 x i8> [[TMP22]], ptr [[DST:%.*]], align 4 ; CHECK-NEXT: ret void ; bb: diff --git a/llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll b/llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll @@ -78,9 +78,9 @@ ; CHECK-NEXT: [[P2:%.*]] = getelementptr float, ptr [[P]], i64 2 ; CHECK-NEXT: [[P3:%.*]] = getelementptr float, ptr [[P]], i64 3 ; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[P]], align 16 +; CHECK-NEXT: call void @foo() #[[ATTR2:[0-9]+]] ; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[P1]], align 4 ; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[P2]], align 4 -; CHECK-NEXT: call void @foo() #[[ATTR2:[0-9]+]] ; CHECK-NEXT: [[L3:%.*]] = load float, ptr [[P3]], align 4 ; CHECK-NEXT: store float [[L0]], ptr [[P]], align 16 ; CHECK-NEXT: call void @foo() #[[ATTR2]] @@ -93,9 +93,9 @@ %p2 = getelementptr float, ptr %p, i64 2 %p3 = getelementptr float, ptr %p, i64 3 %l0 = load float, ptr %p, align 16 + call void @foo() inaccessiblememonly nounwind %l1 = load float, ptr %p1 %l2 = load float, ptr %p2 - call void @foo() inaccessiblememonly nounwind %l3 = load float, ptr %p3 store float %l0, ptr %p, align 16 call void @foo() inaccessiblememonly nounwind @@ -111,9 +111,9 @@ ; CHECK-NEXT: [[P2:%.*]] = getelementptr float, ptr [[P]], i64 2 ; CHECK-NEXT: [[P3:%.*]] = getelementptr float, ptr [[P]], i64 3 ; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[P]], align 16 +; CHECK-NEXT: call void @foo() #[[ATTR3:[0-9]+]] ; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[P1]], align 4 ; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[P2]], align 4 -; CHECK-NEXT: call void @foo() #[[ATTR3:[0-9]+]] ; CHECK-NEXT: [[L3:%.*]] = load float, ptr [[P3]], align 4 ; CHECK-NEXT: store float [[L0]], ptr [[P]], align 16 ; CHECK-NEXT: call void @foo() #[[ATTR3]] @@ -126,9 +126,9 @@ %p2 = getelementptr float, ptr %p, i64 2 %p3 = getelementptr float, ptr %p, i64 3 %l0 = load float, ptr %p, align 16 + call void @foo() inaccessiblememonly willreturn %l1 = load float, ptr %p1 %l2 = load float, ptr %p2 - call void @foo() inaccessiblememonly willreturn %l3 = load float, ptr %p3 store float %l0, ptr %p, align 16 call void @foo() inaccessiblememonly willreturn