Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -207,6 +207,8 @@ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); + setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); + setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); @@ -3699,6 +3701,69 @@ return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL); } +static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI, + SDNode *N, SelectionDAG &DAG) { + EVT VT = N->getValueType(0); + const auto *CD = dyn_cast(N->getOperand(3)); + if (!CD) + return DAG.getUNDEF(VT); + + int CondCode = CD->getSExtValue(); + if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || + CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE) + return DAG.getUNDEF(VT); + + ICmpInst::Predicate IcInput = static_cast(CondCode); + + + SDValue LHS = N->getOperand(1); + SDValue RHS = N->getOperand(2); + + SDLoc DL(N); + + EVT CmpVT = LHS.getValueType(); + if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) { + unsigned PromoteOp = ICmpInst::isSigned(IcInput) ? + ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; + LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS); + RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS); + } + + ISD::CondCode CCOpcode = getICmpCondCode(IcInput); + + return DAG.getNode(AMDGPUISD::SETCC, DL, VT, LHS, RHS, + DAG.getCondCode(CCOpcode)); +} + +static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI, + SDNode *N, SelectionDAG &DAG) { + EVT VT = N->getValueType(0); + const auto *CD = dyn_cast(N->getOperand(3)); + if (!CD) + return DAG.getUNDEF(VT); + + int CondCode = CD->getSExtValue(); + if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE || + CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) { + return DAG.getUNDEF(VT); + } + + SDValue Src0 = N->getOperand(1); + SDValue Src1 = N->getOperand(2); + EVT CmpVT = Src0.getValueType(); + SDLoc SL(N); + + if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) { + Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); + Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); + } + + FCmpInst::Predicate IcInput = static_cast(CondCode); + ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); + return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src0, + Src1, DAG.getCondCode(CCOpcode)); +} + void SITargetLowering::ReplaceNodeResults(SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) const { @@ -4999,34 +5064,10 @@ Denominator, Numerator); } case Intrinsic::amdgcn_icmp: { - const auto *CD = dyn_cast(Op.getOperand(3)); - if (!CD) - return DAG.getUNDEF(VT); - - int CondCode = CD->getSExtValue(); - if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || - CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE) - return DAG.getUNDEF(VT); - - ICmpInst::Predicate IcInput = static_cast(CondCode); - ISD::CondCode CCOpcode = getICmpCondCode(IcInput); - return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), - Op.getOperand(2), DAG.getCondCode(CCOpcode)); + return lowerICMPIntrinsic(*this, Op.getNode(), DAG); } case Intrinsic::amdgcn_fcmp: { - const auto *CD = dyn_cast(Op.getOperand(3)); - if (!CD) - return DAG.getUNDEF(VT); - - int CondCode = CD->getSExtValue(); - if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE || - CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) - return DAG.getUNDEF(VT); - - FCmpInst::Predicate IcInput = static_cast(CondCode); - ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); - return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), - Op.getOperand(2), DAG.getCondCode(CCOpcode)); + return lowerFCMPIntrinsic(*this, Op.getNode(), DAG); } case Intrinsic::amdgcn_fmed3: return DAG.getNode(AMDGPUISD::FMED3, DL, VT, Index: lib/Target/AMDGPU/VOPCInstructions.td =================================================================== --- lib/Target/AMDGPU/VOPCInstructions.td +++ lib/Target/AMDGPU/VOPCInstructions.td @@ -635,6 +635,17 @@ def : ICMP_Pattern ; def : ICMP_Pattern ; +def : ICMP_Pattern ; +def : ICMP_Pattern ; +def : ICMP_Pattern ; +def : ICMP_Pattern ; +def : ICMP_Pattern ; +def : ICMP_Pattern ; +def : ICMP_Pattern ; +def : ICMP_Pattern ; +def : ICMP_Pattern ; +def : ICMP_Pattern ; + class FCMP_Pattern : GCNPat < (i64 (AMDGPUsetcc (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)), (vt (VOP3Mods vt:$src1, i32:$src1_modifiers)), cond)), @@ -656,6 +667,14 @@ def : FCMP_Pattern ; def : FCMP_Pattern ; +def : FCMP_Pattern ; +def : FCMP_Pattern ; +def : FCMP_Pattern ; +def : FCMP_Pattern ; +def : FCMP_Pattern ; +def : FCMP_Pattern ; + + def : FCMP_Pattern ; def : FCMP_Pattern ; def : FCMP_Pattern ; @@ -670,6 +689,13 @@ def : FCMP_Pattern ; def : FCMP_Pattern ; +def : FCMP_Pattern ; +def : FCMP_Pattern ; +def : FCMP_Pattern ; +def : FCMP_Pattern ; +def : FCMP_Pattern ; +def : FCMP_Pattern ; + //===----------------------------------------------------------------------===// // Target //===----------------------------------------------------------------------===// Index: test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll +++ test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll @@ -1,10 +1,13 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s -; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s +; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s declare i64 @llvm.amdgcn.fcmp.f32(float, float, i32) #0 declare i64 @llvm.amdgcn.fcmp.f64(double, double, i32) #0 declare float @llvm.fabs.f32(float) #0 +declare i64 @llvm.amdgcn.fcmp.f16(half, half, i32) #0 +declare half @llvm.fabs.f16(half) #0 + ; GCN-LABEL: {{^}}v_fcmp_f32_dynamic_cc: ; GCN: s_endpgm define amdgpu_kernel void @v_fcmp_f32_dynamic_cc(i64 addrspace(1)* %out, float %src0, float %src1, i32 %cc) { @@ -32,9 +35,9 @@ ret void } -; GCN-LABEL: {{^}}v_fcmp: +; GCN-LABEL: {{^}}v_fcmp_f32: ; GCN-NOT: v_cmp_eq_f32_e64 -define amdgpu_kernel void @v_fcmp(i64 addrspace(1)* %out, float %src) { +define amdgpu_kernel void @v_fcmp_f32(i64 addrspace(1)* %out, float %src) { %result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 -1) store i64 %result, i64 addrspace(1)* %out ret void @@ -233,4 +236,183 @@ ret void } +; GCN-LABEL: {{^}}v_fcmp_f16_oeq_with_fabs: +; VI: v_cmp_eq_f16_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}}, |{{v[0-9]+}}| + +; SI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], s{{[0-9]+}} +; SI: v_cvt_f32_f16_e64 [[CVT1:v[0-9]+]], |s{{[0-9]+}}| +; SI: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT0]], [[CVT1]] +define amdgpu_kernel void @v_fcmp_f16_oeq_with_fabs(i64 addrspace(1)* %out, half %src, half %a) { + %temp = call half @llvm.fabs.f16(half %a) + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half %temp, i32 1) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_oeq_both_operands_with_fabs: +; VI: v_cmp_eq_f16_e64 {{s\[[0-9]+:[0-9]+\]}}, |{{s[0-9]+}}|, |{{v[0-9]+}}| + +; SI: v_cvt_f32_f16_e64 [[CVT0:v[0-9]+]], |s{{[0-9]+}}| +; SI: v_cvt_f32_f16_e64 [[CVT1:v[0-9]+]], |s{{[0-9]+}}| +; SI: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT0]], [[CVT1]] +define amdgpu_kernel void @v_fcmp_f16_oeq_both_operands_with_fabs(i64 addrspace(1)* %out, half %src, half %a) { + %temp = call half @llvm.fabs.f16(half %a) + %src_input = call half @llvm.fabs.f16(half %src) + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src_input, half %temp, i32 1) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16: +; GCN-NOT: v_cmp_eq_ +define amdgpu_kernel void @v_fcmp_f16(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 -1) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_oeq: +; VI: v_cmp_eq_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_oeq(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 1) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_one: +; VI: v_cmp_neq_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_neq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_one(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 6) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_ogt: +; VI: v_cmp_gt_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_gt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_ogt(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 2) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_oge: +; VI: v_cmp_ge_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_ge_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_oge(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 3) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_olt: +; VI: v_cmp_lt_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_lt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_olt(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 4) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_ole: +; VI: v_cmp_le_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_le_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_ole(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 5) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_ueq: +; VI: v_cmp_nlg_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_nlg_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_ueq(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 9) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_une: +; VI: v_cmp_neq_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_neq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_une(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 14) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_ugt: +; VI: v_cmp_nle_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_nle_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_ugt(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 10) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_uge: +; VI: v_cmp_nlt_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_nlt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_uge(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 11) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_ult: +; VI: v_cmp_nge_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_nge_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_ult(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 12) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_ule: +; VI: v_cmp_ngt_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_ngt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_ule(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 13) + store i64 %result, i64 addrspace(1)* %out + ret void +} + attributes #0 = { nounwind readnone convergent } Index: test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll +++ test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll @@ -1,8 +1,9 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s -; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s +; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s declare i64 @llvm.amdgcn.icmp.i32(i32, i32, i32) #0 declare i64 @llvm.amdgcn.icmp.i64(i64, i64, i32) #0 +declare i64 @llvm.amdgcn.icmp.i16(i16, i16, i32) #0 ; No crash on invalid input ; GCN-LABEL: {{^}}v_icmp_i32_dynamic_cc: @@ -21,13 +22,14 @@ ret void } -; GCN-LABEL: {{^}}v_icmp: +; GCN-LABEL: {{^}}v_icmp_i32: ; GCN-NOT: v_cmp_eq_u32_e64 -define amdgpu_kernel void @v_icmp(i64 addrspace(1)* %out, i32 %src) { +define amdgpu_kernel void @v_icmp_i32(i64 addrspace(1)* %out, i32 %src) { %result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 30) store i64 %result, i64 addrspace(1)* %out ret void } + ; GCN-LABEL: {{^}}v_icmp_i32_ne: ; GCN: v_cmp_ne_u32_e64 define amdgpu_kernel void @v_icmp_i32_ne(i64 addrspace(1)* %out, i32 %src) { @@ -36,33 +38,33 @@ ret void } -; GCN-LABEL: {{^}}v_icmp_u32_ugt: +; GCN-LABEL: {{^}}v_icmp_i32_ugt: ; GCN: v_cmp_gt_u32_e64 -define amdgpu_kernel void @v_icmp_u32_ugt(i64 addrspace(1)* %out, i32 %src) { +define amdgpu_kernel void @v_icmp_i32_ugt(i64 addrspace(1)* %out, i32 %src) { %result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 34) store i64 %result, i64 addrspace(1)* %out ret void } -; GCN-LABEL: {{^}}v_icmp_u32_uge: +; GCN-LABEL: {{^}}v_icmp_i32_uge: ; GCN: v_cmp_ge_u32_e64 -define amdgpu_kernel void @v_icmp_u32_uge(i64 addrspace(1)* %out, i32 %src) { +define amdgpu_kernel void @v_icmp_i32_uge(i64 addrspace(1)* %out, i32 %src) { %result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 35) store i64 %result, i64 addrspace(1)* %out ret void } -; GCN-LABEL: {{^}}v_icmp_u32_ult: +; GCN-LABEL: {{^}}v_icmp_i32_ult: ; GCN: v_cmp_lt_u32_e64 -define amdgpu_kernel void @v_icmp_u32_ult(i64 addrspace(1)* %out, i32 %src) { +define amdgpu_kernel void @v_icmp_i32_ult(i64 addrspace(1)* %out, i32 %src) { %result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 36) store i64 %result, i64 addrspace(1)* %out ret void } -; GCN-LABEL: {{^}}v_icmp_u32_ule: +; GCN-LABEL: {{^}}v_icmp_i32_ule: ; GCN: v_cmp_le_u32_e64 -define amdgpu_kernel void @v_icmp_u32_ule(i64 addrspace(1)* %out, i32 %src) { +define amdgpu_kernel void @v_icmp_i32_ule(i64 addrspace(1)* %out, i32 %src) { %result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 37) store i64 %result, i64 addrspace(1)* %out ret void @@ -178,4 +180,138 @@ ret void } +; GCN-LABEL: {{^}}v_icmp_i16_dynamic_cc: +; GCN: s_endpgm +define amdgpu_kernel void @v_icmp_i16_dynamic_cc(i64 addrspace(1)* %out, i16 %src, i32 %cc) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 %cc) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_eq: +; VI: v_cmp_eq_u16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}} +; SI: v_cmp_eq_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_eq(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 32) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16: +; GCN-NOT: v_cmp_eq_ +define amdgpu_kernel void @v_icmp_i16(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 30) + store i64 %result, i64 addrspace(1)* %out + ret void +} +; GCN-LABEL: {{^}}v_icmp_i16_ne: +; VI: v_cmp_ne_u16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}} +; SI: v_cmp_ne_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_ne(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 33) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_ugt: +; VI: v_cmp_gt_u16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}} +; SI: v_cmp_gt_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_ugt(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 34) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_uge: +; VI: v_cmp_ge_u16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}} +; SI: v_cmp_ge_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_uge(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 35) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_ult: +; VI: v_cmp_lt_u16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}} +; SI: v_cmp_lt_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_ult(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 36) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_ule: +; VI: v_cmp_le_u16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}} +; SI: v_cmp_le_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_ule(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 37) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_sgt: +; VI: v_cmp_gt_i16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_sext_i32_i16 [[CVT:s[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_gt_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_sgt(i64 addrspace(1)* %out, i16 %src) #1 { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 38) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_sge: +; VI: v_cmp_ge_i16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_sext_i32_i16 [[CVT:s[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_ge_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_sge(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 39) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_slt: +; VI: v_cmp_lt_i16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_sext_i32_i16 [[CVT:s[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_lt_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_slt(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 40) + store i64 %result, i64 addrspace(1)* %out + ret void +} +; GCN-LABEL: {{^}}v_icmp_i16_sle: +; VI: v_cmp_le_i16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_sext_i32_i16 [[CVT:s[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_le_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_sle(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 41) + store i64 %result, i64 addrspace(1)* %out + ret void +} + attributes #0 = { nounwind readnone convergent }