Index: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp @@ -5135,6 +5135,35 @@ SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); EVT VT = LHS.getValueType(); + ISD::CondCode CC = cast(N->getOperand(2))->get(); + + auto CRHS = dyn_cast(RHS); + if (!CRHS) { + CRHS = dyn_cast(LHS); + if (CRHS) { + std::swap(LHS, RHS); + CC = getSetCCSwappedOperands(CC); + } + } + + if (CRHS && VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND && + isBoolSGPR(LHS.getOperand(0))) { + // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1 + // setcc (sext from i1 cc), -1, eq|sle|uge) => cc + // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1 + // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc + if ((CRHS->isAllOnesValue() && + (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) || + (CRHS->isNullValue() && + (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE))) + return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), + DAG.getConstant(-1, SL, MVT::i1)); + if ((CRHS->isAllOnesValue() && + (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) || + (CRHS->isNullValue() && + (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT))) + return LHS.getOperand(0); + } if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && VT != MVT::f16)) @@ -5142,7 +5171,6 @@ // Match isinf pattern // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) - ISD::CondCode CC = cast(N->getOperand(2))->get(); if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { const ConstantFPSDNode *CRHS = dyn_cast(RHS); if (!CRHS) Index: llvm/trunk/test/CodeGen/AMDGPU/setcc-sext.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/setcc-sext.ll +++ llvm/trunk/test/CodeGen/AMDGPU/setcc-sext.ll @@ -0,0 +1,292 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s + +; GCN-LABEL: {{^}}setcc_sgt_true_sext: +; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_sgt_true_sext(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp sgt i32 %ext, -1 + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + +; GCN-LABEL: {{^}}setcc_sgt_true_sext_swap: +; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_sgt_true_sext_swap(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp slt i32 -1, %ext + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + +; GCN-LABEL: {{^}}setcc_ne_true_sext: +; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_ne_true_sext(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp ne i32 %ext, -1 + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + +; GCN-LABEL: {{^}}setcc_ult_true_sext: +; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_ult_true_sext(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp ult i32 %ext, -1 + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + +; GCN-LABEL: {{^}}setcc_eq_true_sext: +; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_eq_true_sext(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp eq i32 %ext, -1 + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + +; GCN-LABEL: {{^}}setcc_sle_true_sext: +; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_sle_true_sext(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp sle i32 %ext, -1 + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + +; GCN-LABEL: {{^}}setcc_uge_true_sext: +; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_uge_true_sext(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp uge i32 %ext, -1 + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + +; GCN-LABEL: {{^}}setcc_eq_false_sext: +; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_eq_false_sext(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp eq i32 %ext, 0 + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + +; GCN-LABEL: {{^}}setcc_sge_false_sext: +; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_sge_false_sext(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp sge i32 %ext, 0 + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + +; GCN-LABEL: {{^}}setcc_ule_false_sext: +; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_ule_false_sext(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp ule i32 %ext, 0 + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + +; GCN-LABEL: {{^}}setcc_ne_false_sext: +; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_ne_false_sext(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp ne i32 %ext, 0 + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} +; GCN-LABEL: {{^}}setcc_ugt_false_sext: +; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_ugt_false_sext(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp ugt i32 %ext, 0 + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} +; GCN-LABEL: {{^}}setcc_slt_false_sext: +; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]] +; GCN-NOT: v_cndmask_ + +define amdgpu_kernel void @setcc_slt_false_sext(i32 addrspace(1)* nocapture %arg) { +bb: + %x = tail call i32 @llvm.amdgcn.workitem.id.x() + %y = tail call i32 @llvm.amdgcn.workitem.id.y() + %cmp = icmp ugt i32 %x, %y + %ext = sext i1 %cmp to i32 + %cond = icmp slt i32 %ext, 0 + br i1 %cond, label %then, label %endif + +then: + store i32 1, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + + +declare i32 @llvm.amdgcn.workitem.id.x() #0 + +declare i32 @llvm.amdgcn.workitem.id.y() #0 + +attributes #0 = { nounwind readnone speculatable }