Index: lib/Target/AMDGPU/SIDefines.h =================================================================== --- lib/Target/AMDGPU/SIDefines.h +++ lib/Target/AMDGPU/SIDefines.h @@ -67,11 +67,22 @@ SCALAR_STORE = UINT64_C(1) << 39, FIXED_SIZE = UINT64_C(1) << 40, VOPAsmPrefer32Bit = UINT64_C(1) << 41, - HasFPClamp = UINT64_C(1) << 42, - VOP3_OPSEL = UINT64_C(1) << 43, - maybeAtomic = UINT64_C(1) << 44, - F16_ZFILL = UINT64_C(1) << 45, - IntClamp = UINT64_C(1) << 46 + VOP3_OPSEL = UINT64_C(1) << 42, + maybeAtomic = UINT64_C(1) << 43, + F16_ZFILL = UINT64_C(1) << 44, + + // Is a clamp on FP type. + FPClamp = UINT64_C(1) << 45, + + // Is an integer clamp + IntClamp = UINT64_C(1) << 46, + + // Clamps lo component of register. + ClampLo = UINT64_C(1) << 47, + + // Clamps hi component of register. + // ClampLo and ClampHi set for packed clamp. + ClampHi = UINT64_C(1) << 48 }; // v_cmp_class_* etc. use a 10-bit mask for what operation is checked. Index: lib/Target/AMDGPU/SIFoldOperands.cpp =================================================================== --- lib/Target/AMDGPU/SIFoldOperands.cpp +++ lib/Target/AMDGPU/SIFoldOperands.cpp @@ -728,7 +728,8 @@ switch (Op) { case AMDGPU::V_MAX_F32_e64: case AMDGPU::V_MAX_F16_e64: - case AMDGPU::V_MAX_F64: { + case AMDGPU::V_MAX_F64: + case AMDGPU::V_PK_MAX_F16: { if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm()) return nullptr; @@ -741,9 +742,18 @@ return nullptr; // Can't fold up if we have modifiers. - if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || - TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || - TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) + if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) + return nullptr; + + unsigned Src0Mods + = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm(); + unsigned Src1Mods + = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm(); + + // Having a 0 op_sel_hi would require swizzling the output in the source + // instruction, which we can't do. + unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 : 0; + if (Src0Mods != UnsetMods && Src1Mods != UnsetMods) return nullptr; return Src0; } @@ -771,8 +781,11 @@ return false; MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg()); - if (!TII->hasFPClamp(*Def)) + + // The type of clamp must be compatible. + if (TII->getClampMask(*Def) != TII->getClampMask(MI)) return false; + MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp); if (!DefClamp) return false; Index: lib/Target/AMDGPU/SIInstrFormats.td =================================================================== --- lib/Target/AMDGPU/SIInstrFormats.td +++ lib/Target/AMDGPU/SIInstrFormats.td @@ -79,10 +79,6 @@ // is unable to infer the encoding from the operands. field bit VOPAsmPrefer32Bit = 0; - // This bit indicates that this has a floating point result type, so - // the clamp modifier has floating point semantics. - field bit FPClamp = 0; - // This bit indicates that this is a VOP3 opcode which supports op_sel // modifier (gfx9 only). field bit VOP3_OPSEL = 0; @@ -94,10 +90,22 @@ // unused bits in dst. Note that new GFX9 opcodes preserve unused bits. field bit F16_ZFILL = 0; + // This bit indicates that this has a floating point result type, so + // the clamp modifier has floating point semantics. + field bit FPClamp = 0; + // This bit indicates that instruction may support integer clamping // which depends on GPU features. field bit IntClamp = 0; + // This field indicates that the clamp applies to the low component + // of a packed output register. + field bit ClampLo = 0; + + // This field indicates that the clamp applies to the high component + // of a packed output register. + field bit ClampHi = 0; + // These need to be kept in sync with the enum in SIInstrFlags. let TSFlags{0} = SALU; let TSFlags{1} = VALU; @@ -141,12 +149,15 @@ let TSFlags{39} = ScalarStore; let TSFlags{40} = FixedSize; let TSFlags{41} = VOPAsmPrefer32Bit; - let TSFlags{42} = FPClamp; - let TSFlags{43} = VOP3_OPSEL; + let TSFlags{42} = VOP3_OPSEL; + + let TSFlags{43} = maybeAtomic; + let TSFlags{44} = F16_ZFILL; - let TSFlags{44} = maybeAtomic; - let TSFlags{45} = F16_ZFILL; + let TSFlags{45} = FPClamp; let TSFlags{46} = IntClamp; + let TSFlags{47} = ClampLo; + let TSFlags{48} = ClampHi; let SchedRW = [Write32Bit]; Index: lib/Target/AMDGPU/SIInstrInfo.h =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.h +++ lib/Target/AMDGPU/SIInstrInfo.h @@ -548,11 +548,23 @@ } static bool hasFPClamp(const MachineInstr &MI) { - return MI.getDesc().TSFlags & SIInstrFlags::HasFPClamp; + return MI.getDesc().TSFlags & SIInstrFlags::FPClamp; } bool hasFPClamp(uint16_t Opcode) const { - return get(Opcode).TSFlags & SIInstrFlags::HasFPClamp; + return get(Opcode).TSFlags & SIInstrFlags::FPClamp; + } + + static bool hasIntClamp(const MachineInstr &MI) { + return MI.getDesc().TSFlags & SIInstrFlags::IntClamp; + } + + uint64_t getClampMask(const MachineInstr &MI) const { + const uint64_t ClampFlags = SIInstrFlags::FPClamp | + SIInstrFlags::IntClamp | + SIInstrFlags::ClampLo | + SIInstrFlags::ClampHi; + return MI.getDesc().TSFlags & ClampFlags; } bool isVGPRCopy(const MachineInstr &MI) const { Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -1529,6 +1529,8 @@ field bit HasSDWAClamp = EmitDst; field bit HasFPClamp = BitAnd.ret, HasClamp>.ret; field bit HasIntClamp = !if(isFloatType.ret, 0, HasClamp); + field bit HasClampLo = HasClamp; + field bit HasClampHi = BitAnd.ret, HasClamp>.ret; field bit HasHigh = 0; field bit IsPacked = isPackedType.ret; Index: lib/Target/AMDGPU/VOPInstructions.td =================================================================== --- lib/Target/AMDGPU/VOPInstructions.td +++ lib/Target/AMDGPU/VOPInstructions.td @@ -106,6 +106,10 @@ let VOP3 = 1; let VALU = 1; let FPClamp = P.HasFPClamp; + let IntClamp = P.HasIntClamp; + let ClampLo = P.HasClampLo; + let ClampHi = P.HasClampHi; + let Uses = [EXEC]; let AsmVariantName = AMDGPUAsmVariants.VOP3; Index: test/CodeGen/AMDGPU/clamp-modifier.ll =================================================================== --- test/CodeGen/AMDGPU/clamp-modifier.ll +++ test/CodeGen/AMDGPU/clamp-modifier.ll @@ -1,8 +1,9 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s -; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s +; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX89 %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX89,GFX9 %s ; GCN-LABEL: {{^}}v_clamp_add_src_f32: -; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] ; GCN-NOT: [[A]] ; GCN: v_add_f32_e64 v{{[0-9]+}}, [[A]], 1.0 clamp{{$}} define amdgpu_kernel void @v_clamp_add_src_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { @@ -18,7 +19,7 @@ } ; GCN-LABEL: {{^}}v_clamp_multi_use_src_f32: -; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] ; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}} ; GCN: v_max_f32_e64 v{{[0-9]+}}, [[ADD]], [[ADD]] clamp{{$}} define amdgpu_kernel void @v_clamp_multi_use_src_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { @@ -35,7 +36,7 @@ } ; GCN-LABEL: {{^}}v_clamp_dbg_use_src_f32: -; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] ; GCN-NOT: [[A]] ; GCN: v_add_f32_e64 v{{[0-9]+}}, [[A]], 1.0 clamp{{$}} define amdgpu_kernel void @v_clamp_dbg_use_src_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { @@ -52,7 +53,7 @@ } ; GCN-LABEL: {{^}}v_clamp_add_neg_src_f32: -; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] ; GCN: v_floor_f32_e32 [[FLOOR:v[0-9]+]], [[A]] ; GCN: v_max_f32_e64 v{{[0-9]+}}, -[[FLOOR]], -[[FLOOR]] clamp{{$}} define amdgpu_kernel void @v_clamp_add_neg_src_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { @@ -69,7 +70,7 @@ } ; GCN-LABEL: {{^}}v_non_clamp_max_f32: -; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] ; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}} ; GCN: v_max_f32_e32 v{{[0-9]+}}, 0, [[ADD]]{{$}} define amdgpu_kernel void @v_non_clamp_max_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { @@ -84,7 +85,7 @@ } ; GCN-LABEL: {{^}}v_clamp_add_src_f32_denormals: -; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] ; GCN: v_add_f32_e64 [[ADD:v[0-9]+]], [[A]], 1.0 clamp{{$}} define amdgpu_kernel void @v_clamp_add_src_f32_denormals(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 { %tid = call i32 @llvm.amdgcn.workitem.id.x() @@ -99,8 +100,8 @@ } ; GCN-LABEL: {{^}}v_clamp_add_src_f16_denorm: -; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]] -; VI: v_add_f16_e64 [[ADD:v[0-9]+]], [[A]], 1.0 clamp{{$}} +; GCN: {{buffer|flat|global}}_load_ushort [[A:v[0-9]+]] +; GFX89: v_add_f16_e64 [[ADD:v[0-9]+]], [[A]], 1.0 clamp{{$}} ; SI: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], [[A]] ; SI: v_add_f32_e64 [[ADD:v[0-9]+]], [[CVT]], 1.0 clamp{{$}} @@ -118,9 +119,9 @@ } ; GCN-LABEL: {{^}}v_clamp_add_src_f16_no_denormals: -; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]] -; VI-NOT: [[A]] -; VI: v_add_f16_e64 v{{[0-9]+}}, [[A]], 1.0 clamp{{$}} +; GCN: {{buffer|flat|global}}_load_ushort [[A:v[0-9]+]] +; GFX89-NOT: [[A]] +; GFX89: v_add_f16_e64 v{{[0-9]+}}, [[A]], 1.0 clamp{{$}} ; SI: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], [[A]] ; SI: v_add_f32_e64 [[ADD:v[0-9]+]], [[CVT]], 1.0 clamp{{$}} @@ -138,7 +139,7 @@ } ; GCN-LABEL: {{^}}v_clamp_add_src_v2f32: -; GCN: {{buffer|flat}}_load_dwordx2 v{{\[}}[[A:[0-9]+]]:[[B:[0-9]+]]{{\]}} +; GCN: {{buffer|flat|global}}_load_dwordx2 v{{\[}}[[A:[0-9]+]]:[[B:[0-9]+]]{{\]}} ; GCN-DAG: v_add_f32_e64 v{{[0-9]+}}, v[[A]], 1.0 clamp{{$}} ; GCN-DAG: v_add_f32_e64 v{{[0-9]+}}, v[[B]], 1.0 clamp{{$}} define amdgpu_kernel void @v_clamp_add_src_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %aptr) #0 { @@ -154,7 +155,7 @@ } ; GCN-LABEL: {{^}}v_clamp_add_src_f64: -; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]] +; GCN: {{buffer|flat|global}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]] ; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, [[A]], 1.0 clamp{{$}} define amdgpu_kernel void @v_clamp_add_src_f64(double addrspace(1)* %out, double addrspace(1)* %aptr) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() @@ -185,6 +186,152 @@ ret void } + +; GCN-LABEL: {{^}}v_clamp_add_src_v2f16_denorm: +; GCN-DAG: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] +; GFX9-DAG: s_mov_b32 [[ONE:s[0-9]+]], 0x3c003c00 +; GFX9: v_pk_add_f16 [[ADD:v[0-9]+]], [[A]], [[ONE]] clamp{{$}} +define amdgpu_kernel void @v_clamp_add_src_v2f16_denorm(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr <2 x half>, <2 x half> addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid + %a = load <2 x half>, <2 x half> addrspace(1)* %gep0 + %add = fadd <2 x half> %a, + %max = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %add, <2 x half> zeroinitializer) + %clamp = call <2 x half> @llvm.minnum.v2f16(<2 x half> %max, <2 x half> ) + store <2 x half> %clamp, <2 x half> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_clamp_add_src_v2f16_no_denormals: +; GCN-DAG: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] +; GFX9-DAG: s_mov_b32 [[ONE:s[0-9]+]], 0x3c003c00 +; GFX9: v_pk_add_f16 [[ADD:v[0-9]+]], [[A]], [[ONE]] clamp{{$}} +define amdgpu_kernel void @v_clamp_add_src_v2f16_no_denormals(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %aptr) #3 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr <2 x half>, <2 x half> addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid + %a = load <2 x half>, <2 x half> addrspace(1)* %gep0 + %add = fadd <2 x half> %a, + %max = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %add, <2 x half> zeroinitializer) + %clamp = call <2 x half> @llvm.minnum.v2f16(<2 x half> %max, <2 x half> ) + store <2 x half> %clamp, <2 x half> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_clamp_add_src_v2f16_denorm_neg: +; GCN-DAG: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] +; GFX9-DAG: s_mov_b32 [[ONE:s[0-9]+]], 0x3c003c00 +; GFX9: v_pk_add_f16 [[ADD:v[0-9]+]], [[A]], [[ONE]]{{$}} +; GFX9: v_pk_max_f16 [[MAX:v[0-9]+]], [[ADD]], [[ADD]] neg_lo:[1,1] neg_hi:[1,1] clamp{{$}} +define amdgpu_kernel void @v_clamp_add_src_v2f16_denorm_neg(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr <2 x half>, <2 x half> addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid + %a = load <2 x half>, <2 x half> addrspace(1)* %gep0 + %add = fadd <2 x half> %a, + %neg.add = fsub <2 x half> , %add + %max = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %neg.add, <2 x half> zeroinitializer) + %clamp = call <2 x half> @llvm.minnum.v2f16(<2 x half> %max, <2 x half> ) + store <2 x half> %clamp, <2 x half> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_clamp_add_src_v2f16_denorm_neg_lo: +; GCN-DAG: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] +; GFX9-DAG: s_mov_b32 [[ONE:s[0-9]+]], 0x3c003c00 +; GFX9: v_pk_add_f16 [[ADD:v[0-9]+]], [[A]], [[ONE]]{{$}} +; GFX9: v_pk_max_f16 [[MAX:v[0-9]+]], [[ADD]], [[ADD]] neg_lo:[1,1] clamp{{$}} +define amdgpu_kernel void @v_clamp_add_src_v2f16_denorm_neg_lo(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr <2 x half>, <2 x half> addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid + %a = load <2 x half>, <2 x half> addrspace(1)* %gep0 + %add = fadd <2 x half> %a, + %lo = extractelement <2 x half> %add, i32 0 + %neg.lo = fsub half -0.0, %lo + %neg.lo.add = insertelement <2 x half> %add, half %neg.lo, i32 0 + %max = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %neg.lo.add, <2 x half> zeroinitializer) + %clamp = call <2 x half> @llvm.minnum.v2f16(<2 x half> %max, <2 x half> ) + store <2 x half> %clamp, <2 x half> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_clamp_add_src_v2f16_denorm_neg_hi: +; GCN-DAG: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] +; GFX9-DAG: s_mov_b32 [[ONE:s[0-9]+]], 0x3c003c00 +; GFX9: v_pk_add_f16 [[ADD:v[0-9]+]], [[A]], [[ONE]]{{$}} +; GFX9: v_pk_max_f16 [[MAX:v[0-9]+]], [[ADD]], [[ADD]] neg_hi:[1,1] clamp{{$}} +define amdgpu_kernel void @v_clamp_add_src_v2f16_denorm_neg_hi(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr <2 x half>, <2 x half> addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid + %a = load <2 x half>, <2 x half> addrspace(1)* %gep0 + %add = fadd <2 x half> %a, + %hi = extractelement <2 x half> %add, i32 1 + %neg.hi = fsub half -0.0, %hi + %neg.hi.add = insertelement <2 x half> %add, half %neg.hi, i32 1 + %max = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %neg.hi.add, <2 x half> zeroinitializer) + %clamp = call <2 x half> @llvm.minnum.v2f16(<2 x half> %max, <2 x half> ) + store <2 x half> %clamp, <2 x half> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_clamp_add_src_v2f16_denorm_shuf: +; GCN-DAG: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] +; GFX9-DAG: s_mov_b32 [[ONE:s[0-9]+]], 0x3c003c00 +; GFX9: v_pk_add_f16 [[ADD:v[0-9]+]], [[A]], [[ONE]]{{$}} +; GFX9: v_pk_max_f16 [[MAX:v[0-9]+]], [[ADD]], [[ADD]] op_sel:[1,1] op_sel_hi:[0,0] clamp{{$}} +define amdgpu_kernel void @v_clamp_add_src_v2f16_denorm_shuf(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr <2 x half>, <2 x half> addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid + %a = load <2 x half>, <2 x half> addrspace(1)* %gep0 + %add = fadd <2 x half> %a, + %shuf = shufflevector <2 x half> %add, <2 x half> undef, <2 x i32> + + %max = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %shuf, <2 x half> zeroinitializer) + %clamp = call <2 x half> @llvm.minnum.v2f16(<2 x half> %max, <2 x half> ) + store <2 x half> %clamp, <2 x half> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_no_clamp_add_src_v2f16_not_packed_src: +; GCN-DAG: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] +; GFX9: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}} +; GFX9: v_pk_max_f16 [[CLAMP:v[0-9]+]], [[ADD]], [[ADD]] clamp{{$}} +define amdgpu_kernel void @v_no_clamp_add_src_v2f16_not_packed_src(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr <2 x half>, <2 x half> addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid + %a = load <2 x half>, <2 x half> addrspace(1)* %gep0 + %bc = bitcast <2 x half> %a to float + %f32.op = fadd float %bc, 1.0 + %f32.op.cast = bitcast float %f32.op to <2 x half> + %max = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %f32.op.cast, <2 x half> zeroinitializer) + %clamp = call <2 x half> @llvm.minnum.v2f16(<2 x half> %max, <2 x half> ) + store <2 x half> %clamp, <2 x half> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_no_clamp_add_packed_src_f32: +; GCN-DAG: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] +; GFX9-DAG: s_mov_b32 [[ONE:s[0-9]+]], 0x3c003c00 +; GFX9: v_pk_add_f16 [[ADD:v[0-9]+]], [[A]], [[ONE]]{{$}} +; GFX9: v_max_f32_e64 [[CLAMP:v[0-9]+]], [[ADD]], [[ADD]] clamp{{$}} +define amdgpu_kernel void @v_no_clamp_add_packed_src_f32(float addrspace(1)* %out, <2 x half> addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr <2 x half>, <2 x half> addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load <2 x half>, <2 x half> addrspace(1)* %gep0 + %add = fadd <2 x half> %a, + %bc.add = bitcast <2 x half> %add to float + %max = call float @llvm.maxnum.f32(float %bc.add, float 0.0) + %clamp = call float @llvm.minnum.f32(float %max, float 1.0) + store float %clamp, float addrspace(1)* %out.gep + ret void +} + declare i32 @llvm.amdgcn.workitem.id.x() #1 declare float @llvm.fabs.f32(float) #1 declare float @llvm.floor.f32(float) #1 @@ -197,8 +344,12 @@ declare half @llvm.fabs.f16(half) #1 declare half @llvm.minnum.f16(half, half) #1 declare half @llvm.maxnum.f16(half, half) #1 +declare <2 x half> @llvm.minnum.v2f16(<2 x half>, <2 x half>) #1 +declare <2 x half> @llvm.maxnum.v2f16(<2 x half>, <2 x half>) #1 declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>) #1 declare <2 x float> @llvm.maxnum.v2f32(<2 x float>, <2 x float>) #1 + + declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1 attributes #0 = { nounwind }