Index: include/llvm/IR/IntrinsicsAMDGPU.td =================================================================== --- include/llvm/IR/IntrinsicsAMDGPU.td +++ include/llvm/IR/IntrinsicsAMDGPU.td @@ -360,6 +360,12 @@ [IntrNoMem, IntrSpeculatable] >; +// v_mad_f32/v_mac_f32, selected regardless of denorm support. +def int_amdgcn_fmad_ftz : + Intrinsic<[llvm_float_ty], + [llvm_float_ty, llvm_float_ty, llvm_float_ty], + [IntrNoMem, IntrSpeculatable] +>; // Fields should mirror atomicrmw class AMDGPUAtomicIncIntrin : Intrinsic<[llvm_anyint_ty], Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -4922,6 +4922,9 @@ return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src), 0); } + case Intrinsic::amdgcn_fmad_ftz: + return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1), + Op.getOperand(2), Op.getOperand(3)); default: if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) Index: test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.ll @@ -0,0 +1,63 @@ +; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+fp32-denormals -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=+fp32-denormals -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s + +declare float @llvm.amdgcn.fmad.ftz(float %a, float %b, float %c) + +; GCN-LABEL: {{^}}mad_f32: +; GCN: v_ma{{[dc]}}_f32 +define amdgpu_kernel void @mad_f32( + float addrspace(1)* %r, + float addrspace(1)* %a, + float addrspace(1)* %b, + float addrspace(1)* %c) { + %a.val = load float, float addrspace(1)* %a + %b.val = load float, float addrspace(1)* %b + %c.val = load float, float addrspace(1)* %c + %r.val = call float @llvm.amdgcn.fmad.ftz(float %a.val, float %b.val, float %c.val) + store float %r.val, float addrspace(1)* %r + ret void +} + +; GCN-LABEL: {{^}}mad_f32_imm_a: +; GCN: v_mov_b32_e32 [[KA:v[0-9]+]], 0x41000000 +; GCN: v_ma{{[dc]}}_f32 {{v[0-9]+}}, [[KA]], +define amdgpu_kernel void @mad_f32_imm_a( + float addrspace(1)* %r, + float addrspace(1)* %b, + float addrspace(1)* %c) { + %b.val = load float, float addrspace(1)* %b + %c.val = load float, float addrspace(1)* %c + %r.val = call float @llvm.amdgcn.fmad.ftz(float 8.0, float %b.val, float %c.val) + store float %r.val, float addrspace(1)* %r + ret void +} + +; GCN-LABEL: {{^}}mad_f32_imm_b: +; GCN: v_mov_b32_e32 [[KB:v[0-9]+]], 0x41000000 +; GCN: v_ma{{[dc]}}_f32 {{v[0-9]+}}, {{[vs][0-9]+}}, [[KB]], +define amdgpu_kernel void @mad_f32_imm_b( + float addrspace(1)* %r, + float addrspace(1)* %a, + float addrspace(1)* %c) { + %a.val = load float, float addrspace(1)* %a + %c.val = load float, float addrspace(1)* %c + %r.val = call float @llvm.amdgcn.fmad.ftz(float %a.val, float 8.0, float %c.val) + store float %r.val, float addrspace(1)* %r + ret void +} + +; GCN-LABEL: {{^}}mad_f32_imm_c: +; GCN: v_mov_b32_e32 [[KC:v[0-9]+]], 0x41000000 +; GCN: v_ma{{[dc]}}_f32 {{v[0-9]+}}, {{[vs][0-9]+}}, {{v[0-9]+}}, [[KC]]{{$}} +define amdgpu_kernel void @mad_f32_imm_c( + float addrspace(1)* %r, + float addrspace(1)* %a, + float addrspace(1)* %b) { + %a.val = load float, float addrspace(1)* %a + %b.val = load float, float addrspace(1)* %b + %r.val = call float @llvm.amdgcn.fmad.ftz(float %a.val, float %b.val, float 8.0) + store float %r.val, float addrspace(1)* %r + ret void +}