Index: llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp =================================================================== --- llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -69,8 +69,13 @@ cl::desc( "Attempt to vectorize horizontal reductions feeding into a store")); +static cl::opt +MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, + cl::desc("Attempt to vectorize for this register size in bits")); + namespace { +// FIXME: Set this via cl::opt to allow overriding. static const unsigned MinVecRegSize = 128; static const unsigned RecursionMaxDepth = 12; @@ -3088,6 +3093,17 @@ if (!TTI->getNumberOfRegisters(true)) return false; + // Use the vector register size specified by the target unless overridden + // by a command-line option. + // TODO: It would be better to limit the vectorization factor based on + // data type rather than just register size. For example, x86 AVX has + // 256-bit registers, but it does not support integer operations + // at that width (that requires AVX2). + if (MaxVectorRegSizeOption.getNumOccurrences()) + MaxVecRegSize = MaxVectorRegSizeOption; + else + MaxVecRegSize = TTI->getRegisterBitWidth(true); + // Don't vectorize when the attribute NoImplicitFloat is used. if (F.hasFnAttribute(Attribute::NoImplicitFloat)) return false; @@ -3165,12 +3181,13 @@ bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); bool vectorizeStoreChain(ArrayRef Chain, int CostThreshold, - BoUpSLP &R); + BoUpSLP &R, unsigned VecRegSize); bool vectorizeStores(ArrayRef Stores, int costThreshold, BoUpSLP &R); private: StoreListMap StoreRefs; + unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. }; /// \brief Check that the Values in the slice in VL array are still existent in @@ -3185,14 +3202,15 @@ } bool SLPVectorizer::vectorizeStoreChain(ArrayRef Chain, - int CostThreshold, BoUpSLP &R) { + int CostThreshold, BoUpSLP &R, + unsigned VecRegSize) { unsigned ChainLen = Chain.size(); DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen << "\n"); Type *StoreTy = cast(Chain[0])->getValueOperand()->getType(); auto &DL = cast(Chain[0])->getModule()->getDataLayout(); unsigned Sz = DL.getTypeSizeInBits(StoreTy); - unsigned VF = MinVecRegSize / Sz; + unsigned VF = VecRegSize / Sz; if (!isPowerOf2_32(Sz) || VF < 2) return false; @@ -3276,10 +3294,15 @@ I = ConsecutiveChain[I]; } - if (vectorizeStoreChain(Operands, costThreshold, R)) { - // Mark the vectorized stores so that we don't vectorize them again. - VectorizedStores.insert(Operands.begin(), Operands.end()); - Changed = true; + // FIXME: Is division-by-2 the correct step? Should we assert that the + // register size is a power-of-2? + for (unsigned Size = MaxVecRegSize; Size >= MinVecRegSize; Size /= 2) { + if (vectorizeStoreChain(Operands, costThreshold, R, Size)) { + // Mark the vectorized stores so that we don't vectorize them again. + VectorizedStores.insert(Operands.begin(), Operands.end()); + Changed = true; + break; + } } } @@ -3340,6 +3363,8 @@ Type *Ty0 = I0->getType(); unsigned Sz = DL.getTypeSizeInBits(Ty0); + // FIXME: Register size should be a parameter to this function, so we can + // try different vectorization factors. unsigned VF = MinVecRegSize / Sz; for (Value *V : VL) { @@ -3569,6 +3594,8 @@ const DataLayout &DL = B->getModule()->getDataLayout(); ReductionOpcode = B->getOpcode(); ReducedValueOpcode = 0; + // FIXME: Register size should be a parameter to this function, so we can + // try different vectorization factors. ReduxWidth = MinVecRegSize / DL.getTypeSizeInBits(Ty); ReductionRoot = B; ReductionPHI = Phi; @@ -3995,6 +4022,9 @@ << it->second.size() << ".\n"); // Process the stores in chunks of 16. + // TODO: The limit of 16 inhibits greater vectorization factors. + // For example, AVX2 supports v32i8. Increasing this limit, however, + // may cause a significant compile-time increase. for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { unsigned Len = std::min(CE - CI, 16); Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), Index: llvm/trunk/test/Transforms/SLPVectorizer/AMDGPU/simplebb.ll =================================================================== --- llvm/trunk/test/Transforms/SLPVectorizer/AMDGPU/simplebb.ll +++ llvm/trunk/test/Transforms/SLPVectorizer/AMDGPU/simplebb.ll @@ -1,4 +1,9 @@ ; RUN: opt -S -march=r600 -mcpu=cayman -basicaa -slp-vectorizer -dce < %s | FileCheck %s +; XFAIL: * +; +; FIXME: If this test expects to be vectorized, the TTI must indicate that the target +; has vector registers of the expected width. +; Currently, it says there are 8 vector registers that are 32-bits wide. target datalayout = "e-p:32:32:32-p3:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048-n32:64" Index: llvm/trunk/test/Transforms/SLPVectorizer/X86/cse.ll =================================================================== --- llvm/trunk/test/Transforms/SLPVectorizer/X86/cse.ll +++ llvm/trunk/test/Transforms/SLPVectorizer/X86/cse.ll @@ -12,11 +12,8 @@ ;CHECK-LABEL: @test( ;CHECK: load <2 x double> -;CHECK: fadd <2 x double> -;CHECK: store <2 x double> -;CHECK: insertelement <2 x double> -;CHECK: fadd <2 x double> -;CHECK: store <2 x double> +;CHECK: fadd <4 x double> +;CHECK: store <4 x double> ;CHECK: ret i32 define i32 @test(double* nocapture %G) { @@ -48,11 +45,12 @@ ; A[2] = A[2] * 7.6 * n + 3.0; ; A[3] = A[3] * 7.4 * n + 4.0; ;} -;CHECK-LABEL: @foo( -;CHECK: insertelement <2 x double> -;CHECK: insertelement <2 x double> -;CHECK-NOT: insertelement <2 x double> -;CHECK: ret +; CHECK-LABEL: @foo( +; CHECK: load <4 x double> +; CHECK: fmul <4 x double> +; CHECK: fmul <4 x double> +; CHECK: fadd <4 x double> +; CHECK: store <4 x double> define i32 @foo(double* nocapture %A, i32 %n) { entry: %0 = load double, double* %A, align 8 @@ -140,11 +138,12 @@ ; A[2] = A[2] * 7.9 * n + 6.0; ; A[3] = A[3] * 7.9 * n + 6.0; ;} -;CHECK-LABEL: @foo4( -;CHECK: insertelement <2 x double> -;CHECK: insertelement <2 x double> -;CHECK-NOT: insertelement <2 x double> -;CHECK: ret +; CHECK-LABEL: @foo4( +; CHECK: load <4 x double> +; CHECK: fmul <4 x double> +; CHECK: fmul <4 x double> +; CHECK: fadd <4 x double> +; CHECK: store <4 x double> define i32 @foo4(double* nocapture %A, i32 %n) { entry: %0 = load double, double* %A, align 8 Index: llvm/trunk/test/Transforms/SLPVectorizer/X86/gep.ll =================================================================== --- llvm/trunk/test/Transforms/SLPVectorizer/X86/gep.ll +++ llvm/trunk/test/Transforms/SLPVectorizer/X86/gep.ll @@ -1,5 +1,6 @@ ; RUN: opt < %s -basicaa -slp-vectorizer -S |FileCheck %s target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-unknown" ; Test if SLP can handle GEP expressions. ; The test perform the following action: Index: llvm/trunk/test/Transforms/SLPVectorizer/X86/loopinvariant.ll =================================================================== --- llvm/trunk/test/Transforms/SLPVectorizer/X86/loopinvariant.ll +++ llvm/trunk/test/Transforms/SLPVectorizer/X86/loopinvariant.ll @@ -4,12 +4,9 @@ target triple = "x86_64-apple-macosx10.8.0" ;CHECK-LABEL: @foo( -;CHECK: load <4 x i32> -;CHECK: add nsw <4 x i32> -;CHECK: store <4 x i32> -;CHECK: load <4 x i32> -;CHECK: add nsw <4 x i32> -;CHECK: store <4 x i32> +;CHECK: load <8 x i32> +;CHECK: add nsw <8 x i32> +;CHECK: store <8 x i32> ;CHECK: ret define i32 @foo(i32* nocapture %A, i32 %n) { entry: Index: llvm/trunk/test/Transforms/SLPVectorizer/X86/pr19657.ll =================================================================== --- llvm/trunk/test/Transforms/SLPVectorizer/X86/pr19657.ll +++ llvm/trunk/test/Transforms/SLPVectorizer/X86/pr19657.ll @@ -1,11 +1,24 @@ ; RUN: opt < %s -basicaa -slp-vectorizer -S -mcpu=corei7-avx | FileCheck %s +; RUN: opt < %s -basicaa -slp-vectorizer -slp-max-reg-size=128 -S -mcpu=corei7-avx | FileCheck %s --check-prefix=V128 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" -; CHECK: load <2 x double>, <2 x double>* -; CHECK: fadd <2 x double> -; CHECK: store <2 x double> +; CHECK-LABEL: @foo( +; CHECK: load <4 x double> +; CHECK: fadd <4 x double> +; CHECK: fadd <4 x double> +; CHECK: store <4 x double> + +; V128-LABEL: @foo( +; V128: load <2 x double> +; V128: fadd <2 x double> +; V128: fadd <2 x double> +; V128: store <2 x double> +; V128: load <2 x double> +; V128: fadd <2 x double> +; V128: fadd <2 x double> +; V128: store <2 x double> define void @foo(double* %x) { %1 = load double, double* %x, align 8