diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -857,41 +857,52 @@ case Intrinsic::riscv_vmsge: { SDValue Src1 = Node->getOperand(1); SDValue Src2 = Node->getOperand(2); + bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu; + bool IsCmpUnsignedZero = false; // Only custom select scalar second operand. if (Src2.getValueType() != XLenVT) break; // Small constants are handled with patterns. if (auto *C = dyn_cast(Src2)) { int64_t CVal = C->getSExtValue(); - if (CVal >= -15 && CVal <= 16) - break; + if (CVal >= -15 && CVal <= 16) { + if (!IsUnsigned || CVal != 0) + break; + IsCmpUnsignedZero = true; + } } - bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu; MVT Src1VT = Src1.getSimpleValueType(); - unsigned VMSLTOpcode, VMNANDOpcode; + unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode; switch (RISCVTargetLowering::getLMUL(Src1VT)) { default: llvm_unreachable("Unexpected LMUL!"); -#define CASE_VMSLT_VMNAND_OPCODES(lmulenum, suffix) \ - case RISCVII::VLMUL::lmulenum: \ - VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \ - : RISCV::PseudoVMSLT_VX_##suffix; \ - VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \ - break; - CASE_VMSLT_VMNAND_OPCODES(LMUL_F8, MF8) - CASE_VMSLT_VMNAND_OPCODES(LMUL_F4, MF4) - CASE_VMSLT_VMNAND_OPCODES(LMUL_F2, MF2) - CASE_VMSLT_VMNAND_OPCODES(LMUL_1, M1) - CASE_VMSLT_VMNAND_OPCODES(LMUL_2, M2) - CASE_VMSLT_VMNAND_OPCODES(LMUL_4, M4) - CASE_VMSLT_VMNAND_OPCODES(LMUL_8, M8) -#undef CASE_VMSLT_VMNAND_OPCODES +#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \ + case RISCVII::VLMUL::lmulenum: \ + VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \ + : RISCV::PseudoVMSLT_VX_##suffix; \ + VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \ + VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \ + break; + CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1) + CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2) + CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4) + CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8) + CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16) + CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32) + CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64) +#undef CASE_VMSLT_VMNAND_VMSET_OPCODES } SDValue SEW = CurDAG->getTargetConstant( Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT); SDValue VL; selectVLOp(Node->getOperand(3), VL); + // If vmsgeu with 0 immediate, expand it to vmset. + if (IsCmpUnsignedZero) { + ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW)); + return; + } + // Expand to // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd SDValue Cmp = SDValue( @@ -905,54 +916,61 @@ case Intrinsic::riscv_vmsge_mask: { SDValue Src1 = Node->getOperand(2); SDValue Src2 = Node->getOperand(3); + bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask; + bool IsCmpUnsignedZero = false; // Only custom select scalar second operand. if (Src2.getValueType() != XLenVT) break; // Small constants are handled with patterns. if (auto *C = dyn_cast(Src2)) { int64_t CVal = C->getSExtValue(); - if (CVal >= -15 && CVal <= 16) - break; + if (CVal >= -15 && CVal <= 16) { + if (!IsUnsigned || CVal != 0) + break; + IsCmpUnsignedZero = true; + } } - bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask; MVT Src1VT = Src1.getSimpleValueType(); - unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode; + unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode, + VMSetOpcode, VMANDOpcode; switch (RISCVTargetLowering::getLMUL(Src1VT)) { default: llvm_unreachable("Unexpected LMUL!"); -#define CASE_VMSLT_OPCODES(lmulenum, suffix) \ - case RISCVII::VLMUL::lmulenum: \ - VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \ - : RISCV::PseudoVMSLT_VX_##suffix; \ - VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \ - : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \ - break; - CASE_VMSLT_OPCODES(LMUL_F8, MF8) - CASE_VMSLT_OPCODES(LMUL_F4, MF4) - CASE_VMSLT_OPCODES(LMUL_F2, MF2) - CASE_VMSLT_OPCODES(LMUL_1, M1) - CASE_VMSLT_OPCODES(LMUL_2, M2) - CASE_VMSLT_OPCODES(LMUL_4, M4) - CASE_VMSLT_OPCODES(LMUL_8, M8) -#undef CASE_VMSLT_OPCODES +#define CASE_VMSLT_VMSET_OPCODES(lmulenum, suffix, suffix_b) \ + case RISCVII::VLMUL::lmulenum: \ + VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \ + : RISCV::PseudoVMSLT_VX_##suffix; \ + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \ + : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \ + VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \ + break; + CASE_VMSLT_VMSET_OPCODES(LMUL_F8, MF8, B1) + CASE_VMSLT_VMSET_OPCODES(LMUL_F4, MF4, B2) + CASE_VMSLT_VMSET_OPCODES(LMUL_F2, MF2, B4) + CASE_VMSLT_VMSET_OPCODES(LMUL_1, M1, B8) + CASE_VMSLT_VMSET_OPCODES(LMUL_2, M2, B16) + CASE_VMSLT_VMSET_OPCODES(LMUL_4, M4, B32) + CASE_VMSLT_VMSET_OPCODES(LMUL_8, M8, B64) +#undef CASE_VMSLT_VMSET_OPCODES } // Mask operations use the LMUL from the mask type. switch (RISCVTargetLowering::getLMUL(VT)) { default: llvm_unreachable("Unexpected LMUL!"); -#define CASE_VMXOR_VANDN_OPCODES(lmulenum, suffix) \ - case RISCVII::VLMUL::lmulenum: \ - VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \ - VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \ - break; - CASE_VMXOR_VANDN_OPCODES(LMUL_F8, MF8) - CASE_VMXOR_VANDN_OPCODES(LMUL_F4, MF4) - CASE_VMXOR_VANDN_OPCODES(LMUL_F2, MF2) - CASE_VMXOR_VANDN_OPCODES(LMUL_1, M1) - CASE_VMXOR_VANDN_OPCODES(LMUL_2, M2) - CASE_VMXOR_VANDN_OPCODES(LMUL_4, M4) - CASE_VMXOR_VANDN_OPCODES(LMUL_8, M8) -#undef CASE_VMXOR_VANDN_OPCODES +#define CASE_VMXOR_VMANDN_VMAND_OPCODES(lmulenum, suffix) \ + case RISCVII::VLMUL::lmulenum: \ + VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \ + VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \ + VMANDOpcode = RISCV::PseudoVMAND_MM_##suffix; \ + break; + CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_F8, MF8) + CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_F4, MF4) + CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_F2, MF2) + CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_1, M1) + CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_2, M2) + CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_4, M4) + CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_8, M8) +#undef CASE_VMXOR_VMANDN_VMAND_OPCODES } SDValue SEW = CurDAG->getTargetConstant( Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT); @@ -961,6 +979,16 @@ selectVLOp(Node->getOperand(5), VL); SDValue MaskedOff = Node->getOperand(1); SDValue Mask = Node->getOperand(4); + + // If vmsgeu_mask with 0 immediate, expand it to {vmset, vmand}. + if (IsCmpUnsignedZero) { + SDValue VMSet = + SDValue(CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW), 0); + ReplaceNode(Node, CurDAG->getMachineNode(VMANDOpcode, DL, VT, + {Mask, VMSet, VL, MaskSEW})); + return; + } + // If the MaskedOff value and the Mask are the same value use // vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt // This avoids needing to copy v0 to vd before starting the next sequence. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -3722,26 +3722,6 @@ } } -multiclass VPatCompareUnsignedZero { - foreach vti = AllIntegerVectors in { - defvar Intr = !cast(intrinsic); - defvar Pseudo = !cast(inst#"_VV_"#vti.LMul.MX); - def : Pat<(vti.Mask (Intr (vti.Vector vti.RegClass:$rs1), - (vti.Scalar 0), VLOpFrag)), - (Pseudo vti.RegClass:$rs1, vti.RegClass:$rs1, - GPR:$vl, vti.Log2SEW)>; - defvar IntrMask = !cast(intrinsic # "_mask"); - defvar PseudoMask = !cast(inst#"_VV_"#vti.LMul.MX#"_MASK"); - def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$merge), - (vti.Vector vti.RegClass:$rs1), - (vti.Scalar 0), - (vti.Mask V0), - VLOpFrag)), - (PseudoMask VR:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; - } -} - //===----------------------------------------------------------------------===// // Pseudo instructions //===----------------------------------------------------------------------===// @@ -4526,9 +4506,6 @@ defm : VPatCompare_VI<"int_riscv_vmsge", "PseudoVMSGT">; defm : VPatCompare_VI<"int_riscv_vmsgeu", "PseudoVMSGTU", simm5_plus1_nonzero>; -// Special cases for vmsgeu.vi 0 (always true). Instead match to vmsne.vv. -// FIXME: We could match this to vmset.m or vmset.m+vmand.mm. -defm : VPatCompareUnsignedZero<"int_riscv_vmsgeu", "PseudoVMSEQ">; //===----------------------------------------------------------------------===// // 12.9. Vector Integer Min/Max Instructions diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll @@ -2097,11 +2097,9 @@ define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vv v10, v8, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmset.m v8 +; CHECK-NEXT: vmand.mm v0, v9, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( @@ -2118,7 +2116,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmseq.vv v0, v8, v8 +; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll @@ -2064,11 +2064,9 @@ define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vv v10, v8, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmset.m v8 +; CHECK-NEXT: vmand.mm v0, v9, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( @@ -2085,7 +2083,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmseq.vv v0, v8, v8 +; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv4i16.i16(