Index: lib/Target/X86/X86TargetTransformInfo.cpp =================================================================== --- lib/Target/X86/X86TargetTransformInfo.cpp +++ lib/Target/X86/X86TargetTransformInfo.cpp @@ -324,6 +324,12 @@ { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. + { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ + { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ + { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ + { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ + { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ + { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ }; // Look for AVX2 lowering tricks for custom cases. @@ -334,6 +340,12 @@ } static const CostTblEntry AVXCustomCostTable[] = { + { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ + { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ + { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ + { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ + { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ + { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ // Vectorizing division is a bad idea. See the SSE2 table for more comments. { ISD::SDIV, MVT::v32i8, 32*20 }, { ISD::SDIV, MVT::v16i16, 16*20 }, @@ -352,6 +364,19 @@ return LT.first * Entry->Cost; } + static const CostTblEntry SSE42FloatCostTable[] = { + { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ + { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ + { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ + { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ + }; + + if (ST->hasSSE42()) { + if (const auto *Entry = CostTableLookup(SSE42FloatCostTable, ISD, + LT.second)) + return LT.first * Entry->Cost; + } + static const CostTblEntry SSE2UniformCostTable[] = { // Uniform splats are cheaper for the following instructions. @@ -469,6 +494,11 @@ { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence. + { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ + { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ + { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ + { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ + // It is not a good idea to vectorize division. We have to scalarize it and // in the process we will often end up having to spilling regular // registers. The overhead of division is going to dominate most kernels @@ -533,6 +563,15 @@ !ST->hasSSE41()) return LT.first * 6; + static const CostTblEntry SSE1FloatCostTable[] = { + { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ + { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ + }; + + if (ST->hasSSE1()) + if (const auto *Entry = CostTableLookup(SSE1FloatCostTable, ISD, + LT.second)) + return LT.first * Entry->Cost; // Fallback to the default implementation. return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); } @@ -1111,7 +1150,13 @@ { ISD::CTTZ, MVT::v4i64, 10 }, { ISD::CTTZ, MVT::v8i32, 14 }, { ISD::CTTZ, MVT::v16i16, 12 }, - { ISD::CTTZ, MVT::v32i8, 9 } + { ISD::CTTZ, MVT::v32i8, 9 }, + { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ + { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ + { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ + { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ + { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ + { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ }; static const CostTblEntry AVX1CostTbl[] = { { ISD::BITREVERSE, MVT::v4i64, 10 }, @@ -1133,6 +1178,16 @@ { ISD::CTTZ, MVT::v8i32, 28 }, { ISD::CTTZ, MVT::v16i16, 24 }, { ISD::CTTZ, MVT::v32i8, 18 }, + { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ + { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ + { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ + { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ + { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ + { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ + }; + static const CostTblEntry SSE42CostTbl[] = { + { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ + { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ }; static const CostTblEntry SSSE3CostTbl[] = { { ISD::BITREVERSE, MVT::v2i64, 5 }, @@ -1167,7 +1222,13 @@ { ISD::CTTZ, MVT::v2i64, 14 }, { ISD::CTTZ, MVT::v4i32, 18 }, { ISD::CTTZ, MVT::v8i16, 16 }, - { ISD::CTTZ, MVT::v16i8, 13 } + { ISD::CTTZ, MVT::v16i8, 13 }, + { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ + { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ + }; + static const CostTblEntry SSE1CostTbl[] = { + { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ + { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ }; unsigned ISD = ISD::DELETED_NODE; @@ -1189,6 +1250,9 @@ case Intrinsic::cttz: ISD = ISD::CTTZ; break; + case Intrinsic::sqrt: + ISD = ISD::FSQRT; + break; } // Legalize the type. @@ -1208,6 +1272,10 @@ if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) return LT.first * Entry->Cost; + if (ST->hasSSE42()) + if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) + return LT.first * Entry->Cost; + if (ST->hasSSSE3()) if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) return LT.first * Entry->Cost; @@ -1216,6 +1284,10 @@ if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) return LT.first * Entry->Cost; + if (ST->hasSSE1()) + if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) + return LT.first * Entry->Cost; + return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF); } Index: test/Analysis/CostModel/X86/arith-fp.ll =================================================================== --- test/Analysis/CostModel/X86/arith-fp.ll +++ test/Analysis/CostModel/X86/arith-fp.ll @@ -1,9 +1,9 @@ -; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE2 -; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+sse4.2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE42 -; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx,+fma | FileCheck %s --check-prefix=CHECK --check-prefix=AVX -; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx2,+fma | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2 -; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F -; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW +; RUN: opt < %s -enable-no-nans-fp-math -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE2 +; RUN: opt < %s -enable-no-nans-fp-math -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+sse4.2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE42 +; RUN: opt < %s -enable-no-nans-fp-math -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx,+fma | FileCheck %s --check-prefix=CHECK --check-prefix=AVX +; RUN: opt < %s -enable-no-nans-fp-math -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx2,+fma | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2 +; RUN: opt < %s -enable-no-nans-fp-math -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F +; RUN: opt < %s -enable-no-nans-fp-math -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.8.0" @@ -175,53 +175,53 @@ ; CHECK-LABEL: 'fdiv' define i32 @fdiv(i32 %arg) { - ; SSE2: cost of 2 {{.*}} %F32 = fdiv - ; SSE42: cost of 2 {{.*}} %F32 = fdiv - ; AVX: cost of 2 {{.*}} %F32 = fdiv - ; AVX2: cost of 2 {{.*}} %F32 = fdiv - ; AVX512: cost of 2 {{.*}} %F32 = fdiv + ; SSE2: cost of 23 {{.*}} %F32 = fdiv + ; SSE42: cost of 14 {{.*}} %F32 = fdiv + ; AVX: cost of 14 {{.*}} %F32 = fdiv + ; AVX2: cost of 7 {{.*}} %F32 = fdiv + ; AVX512: cost of 7 {{.*}} %F32 = fdiv %F32 = fdiv float undef, undef - ; SSE2: cost of 2 {{.*}} %V4F32 = fdiv - ; SSE42: cost of 2 {{.*}} %V4F32 = fdiv - ; AVX: cost of 2 {{.*}} %V4F32 = fdiv - ; AVX2: cost of 2 {{.*}} %V4F32 = fdiv - ; AVX512: cost of 2 {{.*}} %V4F32 = fdiv + ; SSE2: cost of 39 {{.*}} %V4F32 = fdiv + ; SSE42: cost of 14 {{.*}} %V4F32 = fdiv + ; AVX: cost of 14 {{.*}} %V4F32 = fdiv + ; AVX2: cost of 7 {{.*}} %V4F32 = fdiv + ; AVX512: cost of 7 {{.*}} %V4F32 = fdiv %V4F32 = fdiv <4 x float> undef, undef - ; SSE2: cost of 4 {{.*}} %V8F32 = fdiv - ; SSE42: cost of 4 {{.*}} %V8F32 = fdiv - ; AVX: cost of 2 {{.*}} %V8F32 = fdiv - ; AVX2: cost of 2 {{.*}} %V8F32 = fdiv - ; AVX512: cost of 2 {{.*}} %V8F32 = fdiv + ; SSE2: cost of 78 {{.*}} %V8F32 = fdiv + ; SSE42: cost of 28 {{.*}} %V8F32 = fdiv + ; AVX: cost of 28 {{.*}} %V8F32 = fdiv + ; AVX2: cost of 14 {{.*}} %V8F32 = fdiv + ; AVX512: cost of 14 {{.*}} %V8F32 = fdiv %V8F32 = fdiv <8 x float> undef, undef - ; SSE2: cost of 8 {{.*}} %V16F32 = fdiv - ; SSE42: cost of 8 {{.*}} %V16F32 = fdiv - ; AVX: cost of 4 {{.*}} %V16F32 = fdiv - ; AVX2: cost of 4 {{.*}} %V16F32 = fdiv + ; SSE2: cost of 156 {{.*}} %V16F32 = fdiv + ; SSE42: cost of 56 {{.*}} %V16F32 = fdiv + ; AVX: cost of 56 {{.*}} %V16F32 = fdiv + ; AVX2: cost of 28 {{.*}} %V16F32 = fdiv ; AVX512: cost of 2 {{.*}} %V16F32 = fdiv %V16F32 = fdiv <16 x float> undef, undef - ; SSE2: cost of 2 {{.*}} %F64 = fdiv - ; SSE42: cost of 2 {{.*}} %F64 = fdiv - ; AVX: cost of 2 {{.*}} %F64 = fdiv - ; AVX2: cost of 2 {{.*}} %F64 = fdiv - ; AVX512: cost of 2 {{.*}} %F64 = fdiv + ; SSE2: cost of 38 {{.*}} %F64 = fdiv + ; SSE42: cost of 22 {{.*}} %F64 = fdiv + ; AVX: cost of 22 {{.*}} %F64 = fdiv + ; AVX2: cost of 14 {{.*}} %F64 = fdiv + ; AVX512: cost of 14 {{.*}} %F64 = fdiv %F64 = fdiv double undef, undef - ; SSE2: cost of 2 {{.*}} %V2F64 = fdiv - ; SSE42: cost of 2 {{.*}} %V2F64 = fdiv - ; AVX: cost of 2 {{.*}} %V2F64 = fdiv - ; AVX2: cost of 2 {{.*}} %V2F64 = fdiv - ; AVX512: cost of 2 {{.*}} %V2F64 = fdiv + ; SSE2: cost of 69 {{.*}} %V2F64 = fdiv + ; SSE42: cost of 22 {{.*}} %V2F64 = fdiv + ; AVX: cost of 22 {{.*}} %V2F64 = fdiv + ; AVX2: cost of 14 {{.*}} %V2F64 = fdiv + ; AVX512: cost of 14 {{.*}} %V2F64 = fdiv %V2F64 = fdiv <2 x double> undef, undef - ; SSE2: cost of 4 {{.*}} %V4F64 = fdiv - ; SSE42: cost of 4 {{.*}} %V4F64 = fdiv - ; AVX: cost of 2 {{.*}} %V4F64 = fdiv - ; AVX2: cost of 2 {{.*}} %V4F64 = fdiv - ; AVX512: cost of 2 {{.*}} %V4F64 = fdiv + ; SSE2: cost of 138 {{.*}} %V4F64 = fdiv + ; SSE42: cost of 44 {{.*}} %V4F64 = fdiv + ; AVX: cost of 44 {{.*}} %V4F64 = fdiv + ; AVX2: cost of 28 {{.*}} %V4F64 = fdiv + ; AVX512: cost of 28 {{.*}} %V4F64 = fdiv %V4F64 = fdiv <4 x double> undef, undef - ; SSE2: cost of 8 {{.*}} %V8F64 = fdiv - ; SSE42: cost of 8 {{.*}} %V8F64 = fdiv - ; AVX: cost of 4 {{.*}} %V8F64 = fdiv - ; AVX2: cost of 4 {{.*}} %V8F64 = fdiv + ; SSE2: cost of 276 {{.*}} %V8F64 = fdiv + ; SSE42: cost of 88 {{.*}} %V8F64 = fdiv + ; AVX: cost of 88 {{.*}} %V8F64 = fdiv + ; AVX2: cost of 56 {{.*}} %V8F64 = fdiv ; AVX512: cost of 2 {{.*}} %V8F64 = fdiv %V8F64 = fdiv <8 x double> undef, undef @@ -285,53 +285,53 @@ ; CHECK-LABEL: 'fsqrt' define i32 @fsqrt(i32 %arg) { - ; SSE2: cost of 1 {{.*}} %F32 = call float @llvm.sqrt.f32 - ; SSE42: cost of 1 {{.*}} %F32 = call float @llvm.sqrt.f32 - ; AVX: cost of 1 {{.*}} %F32 = call float @llvm.sqrt.f32 - ; AVX2: cost of 1 {{.*}} %F32 = call float @llvm.sqrt.f32 - ; AVX512: cost of 1 {{.*}} %F32 = call float @llvm.sqrt.f32 + ; SSE2: cost of 28 {{.*}} %F32 = call float @llvm.sqrt.f32 + ; SSE42: cost of 18 {{.*}} %F32 = call float @llvm.sqrt.f32 + ; AVX: cost of 14 {{.*}} %F32 = call float @llvm.sqrt.f32 + ; AVX2: cost of 7 {{.*}} %F32 = call float @llvm.sqrt.f32 + ; AVX512: cost of 7 {{.*}} %F32 = call float @llvm.sqrt.f32 %F32 = call float @llvm.sqrt.f32(float undef) - ; SSE2: cost of 1 {{.*}} %V4F32 = call <4 x float> @llvm.sqrt.v4f32 - ; SSE42: cost of 1 {{.*}} %V4F32 = call <4 x float> @llvm.sqrt.v4f32 - ; AVX: cost of 1 {{.*}} %V4F32 = call <4 x float> @llvm.sqrt.v4f32 - ; AVX2: cost of 1 {{.*}} %V4F32 = call <4 x float> @llvm.sqrt.v4f32 - ; AVX512: cost of 1 {{.*}} %V4F32 = call <4 x float> @llvm.sqrt.v4f32 + ; SSE2: cost of 56 {{.*}} %V4F32 = call <4 x float> @llvm.sqrt.v4f32 + ; SSE42: cost of 18 {{.*}} %V4F32 = call <4 x float> @llvm.sqrt.v4f32 + ; AVX: cost of 14 {{.*}} %V4F32 = call <4 x float> @llvm.sqrt.v4f32 + ; AVX2: cost of 7 {{.*}} %V4F32 = call <4 x float> @llvm.sqrt.v4f32 + ; AVX512: cost of 7 {{.*}} %V4F32 = call <4 x float> @llvm.sqrt.v4f32 %V4F32 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> undef) - ; SSE2: cost of 4 {{.*}} %V8F32 = call <8 x float> @llvm.sqrt.v8f32 - ; SSE42: cost of 4 {{.*}} %V8F32 = call <8 x float> @llvm.sqrt.v8f32 - ; AVX: cost of 1 {{.*}} %V8F32 = call <8 x float> @llvm.sqrt.v8f32 - ; AVX2: cost of 1 {{.*}} %V8F32 = call <8 x float> @llvm.sqrt.v8f32 - ; AVX512: cost of 1 {{.*}} %V8F32 = call <8 x float> @llvm.sqrt.v8f32 + ; SSE2: cost of 112 {{.*}} %V8F32 = call <8 x float> @llvm.sqrt.v8f32 + ; SSE42: cost of 36 {{.*}} %V8F32 = call <8 x float> @llvm.sqrt.v8f32 + ; AVX: cost of 28 {{.*}} %V8F32 = call <8 x float> @llvm.sqrt.v8f32 + ; AVX2: cost of 14 {{.*}} %V8F32 = call <8 x float> @llvm.sqrt.v8f32 + ; AVX512: cost of 14 {{.*}} %V8F32 = call <8 x float> @llvm.sqrt.v8f32 %V8F32 = call <8 x float> @llvm.sqrt.v8f32(<8 x float> undef) - ; SSE2: cost of 8 {{.*}} %V16F32 = call <16 x float> @llvm.sqrt.v16f32 - ; SSE42: cost of 8 {{.*}} %V16F32 = call <16 x float> @llvm.sqrt.v16f32 - ; AVX: cost of 4 {{.*}} %V16F32 = call <16 x float> @llvm.sqrt.v16f32 - ; AVX2: cost of 4 {{.*}} %V16F32 = call <16 x float> @llvm.sqrt.v16f32 + ; SSE2: cost of 224 {{.*}} %V16F32 = call <16 x float> @llvm.sqrt.v16f32 + ; SSE42: cost of 72 {{.*}} %V16F32 = call <16 x float> @llvm.sqrt.v16f32 + ; AVX: cost of 56 {{.*}} %V16F32 = call <16 x float> @llvm.sqrt.v16f32 + ; AVX2: cost of 28 {{.*}} %V16F32 = call <16 x float> @llvm.sqrt.v16f32 ; AVX512: cost of 1 {{.*}} %V16F32 = call <16 x float> @llvm.sqrt.v16f32 %V16F32 = call <16 x float> @llvm.sqrt.v16f32(<16 x float> undef) - ; SSE2: cost of 1 {{.*}} %F64 = call double @llvm.sqrt.f64 - ; SSE42: cost of 1 {{.*}} %F64 = call double @llvm.sqrt.f64 - ; AVX: cost of 1 {{.*}} %F64 = call double @llvm.sqrt.f64 - ; AVX2: cost of 1 {{.*}} %F64 = call double @llvm.sqrt.f64 - ; AVX512: cost of 1 {{.*}} %F64 = call double @llvm.sqrt.f64 + ; SSE2: cost of 32 {{.*}} %F64 = call double @llvm.sqrt.f64 + ; SSE42: cost of 32 {{.*}} %F64 = call double @llvm.sqrt.f64 + ; AVX: cost of 21 {{.*}} %F64 = call double @llvm.sqrt.f64 + ; AVX2: cost of 14 {{.*}} %F64 = call double @llvm.sqrt.f64 + ; AVX512: cost of 14 {{.*}} %F64 = call double @llvm.sqrt.f64 %F64 = call double @llvm.sqrt.f64(double undef) - ; SSE2: cost of 1 {{.*}} %V2F64 = call <2 x double> @llvm.sqrt.v2f64 - ; SSE42: cost of 1 {{.*}} %V2F64 = call <2 x double> @llvm.sqrt.v2f64 - ; AVX: cost of 1 {{.*}} %V2F64 = call <2 x double> @llvm.sqrt.v2f64 - ; AVX2: cost of 1 {{.*}} %V2F64 = call <2 x double> @llvm.sqrt.v2f64 - ; AVX512: cost of 1 {{.*}} %V2F64 = call <2 x double> @llvm.sqrt.v2f64 + ; SSE2: cost of 32 {{.*}} %V2F64 = call <2 x double> @llvm.sqrt.v2f64 + ; SSE42: cost of 32 {{.*}} %V2F64 = call <2 x double> @llvm.sqrt.v2f64 + ; AVX: cost of 21 {{.*}} %V2F64 = call <2 x double> @llvm.sqrt.v2f64 + ; AVX2: cost of 14 {{.*}} %V2F64 = call <2 x double> @llvm.sqrt.v2f64 + ; AVX512: cost of 14 {{.*}} %V2F64 = call <2 x double> @llvm.sqrt.v2f64 %V2F64 = call <2 x double> @llvm.sqrt.v2f64(<2 x double> undef) - ; SSE2: cost of 4 {{.*}} %V4F64 = call <4 x double> @llvm.sqrt.v4f64 - ; SSE42: cost of 4 {{.*}} %V4F64 = call <4 x double> @llvm.sqrt.v4f64 - ; AVX: cost of 1 {{.*}} %V4F64 = call <4 x double> @llvm.sqrt.v4f64 - ; AVX2: cost of 1 {{.*}} %V4F64 = call <4 x double> @llvm.sqrt.v4f64 - ; AVX512: cost of 1 {{.*}} %V4F64 = call <4 x double> @llvm.sqrt.v4f64 + ; SSE2: cost of 64 {{.*}} %V4F64 = call <4 x double> @llvm.sqrt.v4f64 + ; SSE42: cost of 64 {{.*}} %V4F64 = call <4 x double> @llvm.sqrt.v4f64 + ; AVX: cost of 43 {{.*}} %V4F64 = call <4 x double> @llvm.sqrt.v4f64 + ; AVX2: cost of 28 {{.*}} %V4F64 = call <4 x double> @llvm.sqrt.v4f64 + ; AVX512: cost of 28 {{.*}} %V4F64 = call <4 x double> @llvm.sqrt.v4f64 %V4F64 = call <4 x double> @llvm.sqrt.v4f64(<4 x double> undef) - ; SSE2: cost of 8 {{.*}} %V8F64 = call <8 x double> @llvm.sqrt.v8f64 - ; SSE42: cost of 8 {{.*}} %V8F64 = call <8 x double> @llvm.sqrt.v8f64 - ; AVX: cost of 4 {{.*}} %V8F64 = call <8 x double> @llvm.sqrt.v8f64 - ; AVX2: cost of 4 {{.*}} %V8F64 = call <8 x double> @llvm.sqrt.v8f64 + ; SSE2: cost of 128 {{.*}} %V8F64 = call <8 x double> @llvm.sqrt.v8f64 + ; SSE42: cost of 128 {{.*}} %V8F64 = call <8 x double> @llvm.sqrt.v8f64 + ; AVX: cost of 86 {{.*}} %V8F64 = call <8 x double> @llvm.sqrt.v8f64 + ; AVX2: cost of 56 {{.*}} %V8F64 = call <8 x double> @llvm.sqrt.v8f64 ; AVX512: cost of 1 {{.*}} %V8F64 = call <8 x double> @llvm.sqrt.v8f64 %V8F64 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef)