Index: include/llvm/CodeGen/BasicTTIImpl.h =================================================================== --- include/llvm/CodeGen/BasicTTIImpl.h +++ include/llvm/CodeGen/BasicTTIImpl.h @@ -528,18 +528,29 @@ // Assume that we need to scalarize this intrinsic. unsigned ScalarizationCost = 0; unsigned ScalarCalls = 1; + Type *ScalarRetTy = RetTy; if (RetTy->isVectorTy()) { ScalarizationCost = getScalarizationOverhead(RetTy, true, false); ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements()); + ScalarRetTy = RetTy->getScalarType(); } + SmallVector ScalarTys; for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) { - if (Tys[i]->isVectorTy()) { - ScalarizationCost += getScalarizationOverhead(Tys[i], false, true); - ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements()); + Type *Ty = Tys[i]; + if (Ty->isVectorTy()) { + ScalarizationCost += getScalarizationOverhead(Ty, false, true); + ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements()); + Ty = Ty->getScalarType(); } + ScalarTys.push_back(Ty); } + if (ScalarCalls == 1) + return 1; // Return cost of a scalar intrinsic. Assume it to be cheap. + + unsigned ScalarCost = static_cast(this)->getIntrinsicInstrCost( + IID, ScalarRetTy, ScalarTys); - return ScalarCalls + ScalarizationCost; + return ScalarCalls * ScalarCost + ScalarizationCost; } // Look for intrinsics that can be lowered directly or turned into a scalar // intrinsic call. @@ -649,10 +660,25 @@ // this will emit a costly libcall, adding call overhead and spills. Make it // very expensive. if (RetTy->isVectorTy()) { - unsigned Num = RetTy->getVectorNumElements(); - unsigned Cost = static_cast(this)->getIntrinsicInstrCost( - IID, RetTy->getScalarType(), Tys); - return 10 * Cost * Num; + unsigned ScalarizationCost = getScalarizationOverhead(RetTy, true, false); + unsigned ScalarCalls = RetTy->getVectorNumElements(); + SmallVector ScalarTys; + for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) { + Type *Ty = Tys[i]; + if (Ty->isVectorTy()) + Ty = Ty->getScalarType(); + ScalarTys.push_back(Ty); + } + unsigned ScalarCost = static_cast(this)->getIntrinsicInstrCost( + IID, RetTy->getScalarType(), ScalarTys); + for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) { + if (Tys[i]->isVectorTy()) { + ScalarizationCost += getScalarizationOverhead(Tys[i], false, true); + ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements()); + } + } + + return ScalarCalls * ScalarCost + ScalarizationCost; } // This is going to be turned into a library call, make it expensive. Index: test/Analysis/CostModel/X86/intrinsic-cost.ll =================================================================== --- test/Analysis/CostModel/X86/intrinsic-cost.ll +++ test/Analysis/CostModel/X86/intrinsic-cost.ll @@ -22,7 +22,7 @@ ret void ; CORE2: Printing analysis 'Cost Model Analysis' for function 'test1': -; CORE2: Cost Model: Found an estimated cost of 400 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load) +; CORE2: Cost Model: Found an estimated cost of 46 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load) ; COREI7: Printing analysis 'Cost Model Analysis' for function 'test1': ; COREI7: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load) @@ -50,7 +50,7 @@ ret void ; CORE2: Printing analysis 'Cost Model Analysis' for function 'test2': -; CORE2: Cost Model: Found an estimated cost of 400 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load) +; CORE2: Cost Model: Found an estimated cost of 46 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load) ; COREI7: Printing analysis 'Cost Model Analysis' for function 'test2': ; COREI7: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load) Index: test/Transforms/LoopVectorize/X86/vect.omp.force.ll =================================================================== --- test/Transforms/LoopVectorize/X86/vect.omp.force.ll +++ test/Transforms/LoopVectorize/X86/vect.omp.force.ll @@ -15,9 +15,9 @@ ; The source code for the test: ; ; #include -; void foo(float* restrict A, float * restrict B, int size) +; void foo(float* restrict A, float * restrict B) ; { -; for (int i = 0; i < size; ++i) A[i] = sinf(B[i]); +; for (int i = 0; i < 1000; i+=2) A[i] = sinf(B[i]); ; } ; @@ -25,24 +25,20 @@ ; This loop will be vectorized, although the scalar cost is lower than any of vector costs, but vectorization is explicitly forced in metadata. ; -define void @vectorized(float* noalias nocapture %A, float* noalias nocapture %B, i32 %size) { +define void @vectorized(float* noalias nocapture %A, float* noalias nocapture %B) { entry: - %cmp6 = icmp sgt i32 %size, 0 - br i1 %cmp6, label %for.body.preheader, label %for.end - -for.body.preheader: br label %for.body for.body: - %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ] + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ] %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1 %call = tail call float @llvm.sin.f32(float %0) %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1 - %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 - %exitcond = icmp eq i32 %lftr.wideiv, %size + %exitcond = icmp eq i32 %lftr.wideiv, 1000 br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !1 for.end.loopexit: @@ -59,24 +55,20 @@ ; This method will not be vectorized, as scalar cost is lower than any of vector costs. ; -define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture %B, i32 %size) { +define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture %B) { entry: - %cmp6 = icmp sgt i32 %size, 0 - br i1 %cmp6, label %for.body.preheader, label %for.end - -for.body.preheader: br label %for.body for.body: - %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ] + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ] %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3 %call = tail call float @llvm.sin.f32(float %0) %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3 - %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 - %exitcond = icmp eq i32 %lftr.wideiv, %size + %exitcond = icmp eq i32 %lftr.wideiv, 1000 br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !3 for.end.loopexit: