diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h @@ -214,6 +214,16 @@ int getArithmeticReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise); + template + int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, + ArrayRef Args, FastMathFlags FMF, + unsigned VF); + int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, + ArrayRef Tys, FastMathFlags FMF, + unsigned ScalarizationCostPassed = UINT_MAX); + int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, + ArrayRef Args, FastMathFlags FMF, + unsigned VF = 1); int getMinMaxReductionCost(Type *Ty, Type *CondTy, bool IsPairwiseForm, bool IsUnsigned); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -463,6 +463,49 @@ Opd1PropInfo, Opd2PropInfo); } +template +int GCNTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, + ArrayRef Args, + FastMathFlags FMF, unsigned VF) { + if (ID != Intrinsic::fma) + return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF); + + EVT OrigTy = TLI->getValueType(DL, RetTy); + if (!OrigTy.isSimple()) { + return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF); + } + + // Legalize the type. + std::pair LT = TLI->getTypeLegalizationCost(DL, RetTy); + + unsigned NElts = LT.second.isVector() ? + LT.second.getVectorNumElements() : 1; + + MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy; + + if (SLT == MVT::f64) + return LT.first * NElts * get64BitInstrCost(); + + if (ST->has16BitInsts() && SLT == MVT::f16) + NElts = (NElts + 1) / 2; + + return LT.first * NElts * (ST->hasFastFMAF32() ? getHalfRateInstrCost() + : getQuarterRateInstrCost()); +} + +int GCNTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, + ArrayRef Args, FastMathFlags FMF, + unsigned VF) { + return getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF); +} + +int GCNTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, + ArrayRef Tys, FastMathFlags FMF, + unsigned ScalarizationCostPassed) { + return getIntrinsicInstrCost(ID, RetTy, Tys, FMF, + ScalarizationCostPassed); +} + unsigned GCNTTIImpl::getCFInstrCost(unsigned Opcode) { // XXX - For some reason this isn't called for switch. switch (Opcode) { diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fma.ll b/llvm/test/Analysis/CostModel/AMDGPU/fma.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Analysis/CostModel/AMDGPU/fma.ll @@ -0,0 +1,120 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=FASTF64,FAST32,FASTF16,ALL %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=SLOWF64,SLOW32,SLOWF16,ALL %s +; RUN: opt -cost-model -cost-kind=code-size -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=FASTF64,FAST32,FASTF16,ALL %s +; RUN: opt -cost-model -cost-kind=code-size -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=SLOWF64,SLOW32,SLOWF16,ALL %s + +; ALL-LABEL: 'fma_f32' +; SLOW32: estimated cost of 3 for {{.*}} call float @llvm.fma.f32 +; FAST32: estimated cost of 2 for {{.*}} call float @llvm.fma.f32 +define amdgpu_kernel void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 { + %vec = load float, float addrspace(1)* %vaddr + %fma = call float @llvm.fma.f32(float %vec, float %vec, float %vec) #1 + store float %fma, float addrspace(1)* %out + ret void +} + +; ALL-LABEL: 'fma_v2f32' +; SLOW32: estimated cost of 6 for {{.*}} call <2 x float> @llvm.fma.v2f32 +; FAST32: estimated cost of 4 for {{.*}} call <2 x float> @llvm.fma.v2f32 +define amdgpu_kernel void @fma_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 { + %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr + %fma = call <2 x float> @llvm.fma.v2f32(<2 x float> %vec, <2 x float> %vec, <2 x float> %vec) #1 + store <2 x float> %fma, <2 x float> addrspace(1)* %out + ret void +} + +; ALL-LABEL: 'fma_v3f32' +; SLOW32: estimated cost of 9 for {{.*}} call <3 x float> @llvm.fma.v3f32 +; FAST32: estimated cost of 6 for {{.*}} call <3 x float> @llvm.fma.v3f32 +define amdgpu_kernel void @fma_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr) #0 { + %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr + %fma = call <3 x float> @llvm.fma.v3f32(<3 x float> %vec, <3 x float> %vec, <3 x float> %vec) #1 + store <3 x float> %fma, <3 x float> addrspace(1)* %out + ret void +} + +; ALL-LABEL: 'fma_v5f32' +; SLOW32: estimated cost of 15 for {{.*}} call <5 x float> @llvm.fma.v5f32 +; FAST32: estimated cost of 10 for {{.*}} call <5 x float> @llvm.fma.v5f32 +define amdgpu_kernel void @fma_v5f32(<5 x float> addrspace(1)* %out, <5 x float> addrspace(1)* %vaddr) #0 { + %vec = load <5 x float>, <5 x float> addrspace(1)* %vaddr + %fma = call <5 x float> @llvm.fma.v5f32(<5 x float> %vec, <5 x float> %vec, <5 x float> %vec) #1 + store <5 x float> %fma, <5 x float> addrspace(1)* %out + ret void +} + +; ALL-LABEL: 'fma_f64' +; SLOW64: estimated cost of 3 for {{.*}} call double @llvm.fma.f64 +; FAST64: estimated cost of 2 for {{.*}} call double @llvm.fma.f64 +define amdgpu_kernel void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 { + %vec = load double, double addrspace(1)* %vaddr + %fma = call double @llvm.fma.f64(double %vec, double %vec, double %vec) #1 + store double %fma, double addrspace(1)* %out + ret void +} + +; ALL-LABEL: 'fma_v2f64' +; SLOW64: estimated cost of 6 for {{.*}} call <2 x double> @llvm.fma.v2f64 +; FAST64: estimated cost of 4 for {{.*}} call <2 x double> @llvm.fma.v2f64 +define amdgpu_kernel void @fma_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr) #0 { + %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr + %fma = call <2 x double> @llvm.fma.v2f64(<2 x double> %vec, <2 x double> %vec, <2 x double> %vec) #1 + store <2 x double> %fma, <2 x double> addrspace(1)* %out + ret void +} + +; ALL-LABEL: 'fma_v3f64' +; SLOW64: estimated cost of 9 for {{.*}} call <3 x double> @llvm.fma.v3f64 +; FAST64: estimated cost of 6 for {{.*}} call <3 x double> @llvm.fma.v3f64 +define amdgpu_kernel void @fma_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr) #0 { + %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr + %fma = call <3 x double> @llvm.fma.v3f64(<3 x double> %vec, <3 x double> %vec, <3 x double> %vec) #1 + store <3 x double> %fma, <3 x double> addrspace(1)* %out + ret void +} + +; ALL-LABEL: 'fma_f16' +; SLOW16: estimated cost of 3 for {{.*}} call half @llvm.fma.f16 +; FAST16: estimated cost of 2 for {{.*}} call half @llvm.fma.f16 +define amdgpu_kernel void @fma_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 { + %vec = load half, half addrspace(1)* %vaddr + %fma = call half @llvm.fma.f16(half %vec, half %vec, half %vec) #1 + store half %fma, half addrspace(1)* %out + ret void +} + +; ALL-LABEL: 'fma_v2f16' +; SLOW16: estimated cost of 6 for {{.*}} call <2 x half> @llvm.fma.v2f16 +; FAST16: estimated cost of 2 for {{.*}} call <2 x half> @llvm.fma.v2f16 +define amdgpu_kernel void @fma_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr) #0 { + %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr + %fma = call <2 x half> @llvm.fma.v2f16(<2 x half> %vec, <2 x half> %vec, <2 x half> %vec) #1 + store <2 x half> %fma, <2 x half> addrspace(1)* %out + ret void +} + +; ALL-LABEL: 'fma_v3f16' +; SLOW16: estimated cost of 12 for {{.*}} call <3 x half> @llvm.fma.v3f16 +; FAST16: estimated cost of 4 for {{.*}} call <3 x half> @llvm.fma.v3f16 +define amdgpu_kernel void @fma_v3f16(<3 x half> addrspace(1)* %out, <3 x half> addrspace(1)* %vaddr) #0 { + %vec = load <3 x half>, <3 x half> addrspace(1)* %vaddr + %fma = call <3 x half> @llvm.fma.v3f16(<3 x half> %vec, <3 x half> %vec, <3 x half> %vec) #1 + store <3 x half> %fma, <3 x half> addrspace(1)* %out + ret void +} + +declare float @llvm.fma.f32(float, float, float) #1 +declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) #1 +declare <3 x float> @llvm.fma.v3f32(<3 x float>, <3 x float>, <3 x float>) #1 +declare <5 x float> @llvm.fma.v5f32(<5 x float>, <5 x float>, <5 x float>) #1 + +declare double @llvm.fma.f64(double, double, double) #1 +declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #1 +declare <3 x double> @llvm.fma.v3f64(<3 x double>, <3 x double>, <3 x double>) #1 + +declare half @llvm.fma.f16(half, half, half) #1 +declare <2 x half> @llvm.fma.v2f16(<2 x half>, <2 x half>, <2 x half>) #1 +declare <3 x half> @llvm.fma.v3f16(<3 x half>, <3 x half>, <3 x half>) #1 + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone }