Index: lib/Target/X86/X86InterleavedAccess.h =================================================================== --- lib/Target/X86/X86InterleavedAccess.h +++ lib/Target/X86/X86InterleavedAccess.h @@ -0,0 +1,85 @@ +//===---- X86InterleavedAccess.h - Define X86InterleavedAccessGroup class-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// +// +// This file declares the X86InterleavedAccessGroup that represents/manipulates +// a set of interleaved accesses. +// +//===---------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_X86_X86INTERLEAVEDACCESS_H +#define LLVM_LIB_TARGET_X86_X86INTERLEAVEDACCESS_H + +#include "X86Subtarget.h" +#include "llvm/IR/Instructions.h" + +using namespace llvm; + +/// This class holds necessary information to represent an interleaved +/// access group and supports utilities to lower the group into +/// X86-specific instructions/intrinsics. +// E.g. A group of interleaving access loads (Factor = 2; accessing every +// other elements) +// %wide.vec = load <8 x i32>, <8 x i32>* %ptr +// %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <0, 2, 4, 6> +// %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <1, 3, 5, 7> + +class X86InterleavedAccessGroup { + /// Reference to the wide-load instruction of an interleaved access + /// group. + Instruction *const Inst; + + /// Reference to the shuffle(s), consumer(s) of the (load) 'Inst'. + const ArrayRef Shuffles; + + /// Reference to the starting index of each user-shuffle. + const ArrayRef Indices; + + /// Reference to the interleaving stride in terms of elements. + const unsigned Factor; + + const X86Subtarget &Subtarget; + + const DataLayout &DL; + + IRBuilder<> &Builder; + +public: + X86InterleavedAccessGroup(Instruction *I, + const ArrayRef Shuffs, + const ArrayRef Ind, const unsigned F, + const X86Subtarget &STarget, IRBuilder<> &B) + : Inst(I), Shuffles(Shuffs), Indices(Ind), Factor(F), Subtarget(STarget), + DL(Inst->getModule()->getDataLayout()), Builder(B) {} + + /// Returns true if this interleaved access group can be lowered into + /// x86-specific instructions/intrinsics, false otherwise. + bool isSupported() const; + + /// Breaks down a vector 'Inst' of N elements into NumSubVectors sub vectors + /// of sub-vector of type T. + bool decompose(Instruction *Inst, uint32_t NumSubVectors, VectorType *T, + SmallVectorImpl &DecomposedVectors); + + /// Performs matrix transposition and returns the transposed-vectors. + /// E.g. + /// Input-Vectors: + /// In-V0 = p1, p2 + /// In-V1 = q1, q2 + /// Output-Vectors: + /// Out-V0 = p1, q1 + /// Out-V1 = p2, q2 + void transpose(const ArrayRef &InputVectors, + SmallVectorImpl &TrasposedVectors); + + /// Lowers this interleaved access group into X86-specific + /// instructions/intrinsics. + bool lowerIntoOptimizedSequence(); +}; + +#endif Index: lib/Target/X86/X86InterleavedAccess.cpp =================================================================== --- lib/Target/X86/X86InterleavedAccess.cpp +++ lib/Target/X86/X86InterleavedAccess.cpp @@ -13,105 +13,150 @@ // //===----------------------------------------------------------------------===// +#include "X86InterleavedAccess.h" #include "X86ISelLowering.h" #include "X86TargetMachine.h" +#include "llvm/IR/IRBuilder.h" using namespace llvm; -/// Returns true if the interleaved access group represented by the shuffles -/// is supported for the subtarget. Returns false otherwise. -static bool isSupported(const X86Subtarget &SubTarget, - const LoadInst *LI, - const ArrayRef &Shuffles, - unsigned Factor) { - - const DataLayout &DL = Shuffles[0]->getModule()->getDataLayout(); +/// Returns true if this interleaved access group is supported for the +/// subtarget. Returns false otherwise. +bool X86InterleavedAccessGroup::isSupported() const { VectorType *ShuffleVecTy = Shuffles[0]->getType(); unsigned ShuffleVecSize = DL.getTypeSizeInBits(ShuffleVecTy); Type *ShuffleEltTy = ShuffleVecTy->getVectorElementType(); - if (DL.getTypeSizeInBits(LI->getType()) < Factor * ShuffleVecSize) + if (DL.getTypeSizeInBits(Inst->getType()) < Factor * ShuffleVecSize) return false; // Currently, lowering is supported for 64 bits on AVX. - if (!SubTarget.hasAVX() || ShuffleVecSize != 256 || - DL.getTypeSizeInBits(ShuffleEltTy) != 64 || - Factor != 4) + if (!Subtarget.hasAVX() || ShuffleVecSize != 256 || + DL.getTypeSizeInBits(ShuffleEltTy) != 64 || Factor != 4) return false; return true; } -/// \brief Lower interleaved load(s) into target specific instructions/ -/// intrinsics. Lowering sequence varies depending on the vector-types, factor, -/// number of shuffles and ISA. -/// Currently, lowering is supported for 4x64 bits with Factor = 4 on AVX. -bool X86TargetLowering::lowerInterleavedLoad( - LoadInst *LI, ArrayRef Shuffles, - ArrayRef Indices, unsigned Factor) const { - assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && - "Invalid interleave factor"); - assert(!Shuffles.empty() && "Empty shufflevector input"); - assert(Shuffles.size() == Indices.size() && - "Unmatched number of shufflevectors and indices"); +/// Breaks down a vector 'VecInst' of N elements into NumSubVectors sub vectors; +/// where each sub-vector of type VecTy. +bool X86InterleavedAccessGroup::decompose( + Instruction *VecInst, uint32_t NumSubVectors, VectorType *SubVecTy, + SmallVectorImpl &DecomposedVectors) { + Type *VecTy = VecInst->getType(); + uint32_t VecSize = DL.getTypeSizeInBits(SubVecTy); + + assert(VecTy->isVectorTy() && + DL.getTypeSizeInBits(VecTy) >= VecSize * NumSubVectors && + "Invalid Inst-size!!!"); + assert(VecTy->getVectorElementType() == SubVecTy->getVectorElementType() && + "Element type mismatched!!!"); - if (!isSupported(Subtarget, LI, Shuffles, Factor)) + if (!isa(VecInst)) return false; - VectorType *ShuffleVecTy = Shuffles[0]->getType(); - - Type *VecBasePtrTy = ShuffleVecTy->getPointerTo(LI->getPointerAddressSpace()); - - IRBuilder<> Builder(LI); - SmallVector NewLoads; - SmallVector NewShuffles; - NewShuffles.resize(Factor); + LoadInst *LI = cast(VecInst); + Type *VecBasePtrTy = SubVecTy->getPointerTo(LI->getPointerAddressSpace()); Value *VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy); - // Generate 4 loads of type v4xT64 - for (unsigned Part = 0; Part < Factor; Part++) { + // Generate N loads of T type + for (uint32_t i = 0; i < NumSubVectors; i++) { // TODO: Support inbounds GEP - Value *NewBasePtr = - Builder.CreateGEP(VecBasePtr, Builder.getInt32(Part)); + Value *NewBasePtr = Builder.CreateGEP(VecBasePtr, Builder.getInt32(i)); Instruction *NewLoad = Builder.CreateAlignedLoad(NewBasePtr, LI->getAlignment()); - NewLoads.push_back(NewLoad); + DecomposedVectors.push_back(NewLoad); } + return true; +} + +/// Performs matrix transposition and returns the transposed-vectors. +/// E.g. +/// Input-Vectors: +/// In-V0 = p1, p2 +/// In-V1 = q1, q2 +/// Output-Vectors: +/// Out-V0 = p1, q1 +/// Out-V1 = p2, q2 + +void X86InterleavedAccessGroup::transpose( + const ArrayRef &Matrix, + SmallVectorImpl &TransposedMatrix) { + TransposedMatrix.resize(4); // dst = src1[0,1],src2[0,1] uint32_t IntMask1[] = {0, 1, 4, 5}; - ArrayRef ShuffleMask = makeArrayRef(IntMask1, 4); - Value *IntrVec1 = - Builder.CreateShuffleVector(NewLoads[0], NewLoads[2], ShuffleMask); - Value *IntrVec2 = - Builder.CreateShuffleVector(NewLoads[1], NewLoads[3], ShuffleMask); + ArrayRef Mask = makeArrayRef(IntMask1, 4); + Value *IntrVec1 = Builder.CreateShuffleVector(Matrix[0], Matrix[2], Mask); + Value *IntrVec2 = Builder.CreateShuffleVector(Matrix[1], Matrix[3], Mask); // dst = src1[2,3],src2[2,3] uint32_t IntMask2[] = {2, 3, 6, 7}; - ShuffleMask = makeArrayRef(IntMask2, 4); - Value *IntrVec3 = - Builder.CreateShuffleVector(NewLoads[0], NewLoads[2], ShuffleMask); - Value *IntrVec4 = - Builder.CreateShuffleVector(NewLoads[1], NewLoads[3], ShuffleMask); + Mask = makeArrayRef(IntMask2, 4); + Value *IntrVec3 = Builder.CreateShuffleVector(Matrix[0], Matrix[2], Mask); + Value *IntrVec4 = Builder.CreateShuffleVector(Matrix[1], Matrix[3], Mask); // dst = src1[0],src2[0],src1[2],src2[2] uint32_t IntMask3[] = {0, 4, 2, 6}; - ShuffleMask = makeArrayRef(IntMask3, 4); - NewShuffles[0] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, ShuffleMask); - NewShuffles[2] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, ShuffleMask); + Mask = makeArrayRef(IntMask3, 4); + TransposedMatrix[0] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, Mask); + TransposedMatrix[2] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, Mask); // dst = src1[1],src2[1],src1[3],src2[3] uint32_t IntMask4[] = {1, 5, 3, 7}; - ShuffleMask = makeArrayRef(IntMask4, 4); - NewShuffles[1] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, ShuffleMask); - NewShuffles[3] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, ShuffleMask); - - for (unsigned i = 0; i < Shuffles.size(); i++) { - unsigned Index = Indices[i]; - Shuffles[i]->replaceAllUsesWith(NewShuffles[Index]); - } + Mask = makeArrayRef(IntMask4, 4); + TransposedMatrix[1] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, Mask); + TransposedMatrix[3] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, Mask); +} + +/// Lowers this interleaved access group into X86-specific +/// instructions/intrinsics. +bool X86InterleavedAccessGroup::lowerIntoOptimizedSequence() { + SmallVector DecomposedVectors; + VectorType *VecTy = Shuffles[0]->getType(); + // Try to generate target-sized register(/instruction). + if (!decompose(Inst, Factor, VecTy, DecomposedVectors)) + return false; + + SmallVector TransposedVectors; + // Perform matrix-transposition in order to compute interleaved + // results by generating some sort of (optimized) target-specific + // instructions. + transpose(DecomposedVectors, TransposedVectors); + + // Now replace the unoptimized-interleaved-vectors with the + // transposed-interleaved vectors. + for (unsigned i = 0; i < Shuffles.size(); i++) + Shuffles[i]->replaceAllUsesWith(TransposedVectors[Indices[i]]); return true; } + +/// \brief Lower interleaved load(s) into target specific instructions/ +/// intrinsics. Lowering sequence varies depending on the vector-types, factor, +/// number of shuffles and ISA. +/// Currently, lowering is supported for 4x64 bits with Factor = 4 on AVX. +bool X86TargetLowering::lowerInterleavedLoad( + LoadInst *LI, ArrayRef Shuffles, + ArrayRef Indices, unsigned Factor) const { + assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && + "Invalid interleave factor"); + assert(!Shuffles.empty() && "Empty shufflevector input"); + assert(Shuffles.size() == Indices.size() && + "Unmatched number of shufflevectors and indices"); + + // Create an interleaved access group. + IRBuilder<> Builder(LI); + X86InterleavedAccessGroup *Grp = new X86InterleavedAccessGroup( + LI, Shuffles, Indices, Factor, Subtarget, Builder); + + if (!Grp->isSupported()) + return false; + + if (Grp->lowerIntoOptimizedSequence()) + return true; + + return false; +}