Index: llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h =================================================================== --- llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -182,7 +182,7 @@ bool doPeepholeMaskedRVV(SDNode *Node); bool doPeepholeMergeVVMFold(); bool performVMergeToVMv(SDNode *N); - bool performCombineVMergeAndVOps(SDNode *N, bool IsTA); + bool performCombineVMergeAndVOps(SDNode *N); }; namespace RISCV { Index: llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -3201,20 +3201,17 @@ // not the pseudo name. That is, a TA VMERGE_VVM can be either the _TU pseudo // form with an IMPLICIT_DEF passthrough operand or the unsuffixed (TA) pseudo // form. -bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N, bool IsTA) { - - SDValue Merge; - if (!IsTA) - Merge = N->getOperand(0); - unsigned Offset = IsTA ? 0 : 1; - SDValue False = N->getOperand(0 + Offset); - SDValue True = N->getOperand(1 + Offset); - SDValue Mask = N->getOperand(2 + Offset); - SDValue VL = N->getOperand(3 + Offset); - - // For the _TU psuedo form, we require that either merge and false - // are the same, or that merge is undefined. - if (!IsTA && Merge != False && !isImplicitDef(Merge)) +bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) { + + SDValue Merge = N->getOperand(0); + SDValue False = N->getOperand(1); + SDValue True = N->getOperand(2); + SDValue Mask = N->getOperand(3); + SDValue VL = N->getOperand(4); + + // We require that either merge and false are the same, or that merge + // is undefined. + if (Merge != False && !isImplicitDef(Merge)) return false; assert(True.getResNo() == 0 && @@ -3247,7 +3244,7 @@ // The vmerge instruction must be TU. // FIXME: This could be relaxed, but we need to handle the policy for the // resulting op correctly. - if (IsTA || isImplicitDef(Merge)) + if (isImplicitDef(Merge)) return false; SDValue MergeOpTrue = True->getOperand(0); // Both the vmerge instruction and the True instruction must have the same @@ -3259,7 +3256,7 @@ if (IsMasked) { assert(HasTiedDest && "Expected tied dest"); // The vmerge instruction must be TU. - if (IsTA || isImplicitDef(Merge)) + if (isImplicitDef(Merge)) return false; // The vmerge instruction must have an all 1s mask since we're going to keep // the mask from the True instruction. @@ -3325,7 +3322,7 @@ "Expected instructions with mask have a tied dest."); #endif - uint64_t Policy = (IsTA || isImplicitDef(N->getOperand(0))) ? + uint64_t Policy = isImplicitDef(N->getOperand(0)) ? RISCVII::TAIL_AGNOSTIC : /*TUMU*/ 0; SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, Subtarget->getXLenVT()); @@ -3367,17 +3364,17 @@ return true; } -// Transform (VMERGE_VVM__TU false, false, true, allones, vl, sew) to -// (VMV_V_V__TU false, true, vl, sew). It may decrease uses of VMSET. +// Transform (VMERGE_VVM_ false, false, true, allones, vl, sew) to +// (VMV_V_V_ false, true, vl, sew). It may decrease uses of VMSET. bool RISCVDAGToDAGISel::performVMergeToVMv(SDNode *N) { #define CASE_VMERGE_TO_VMV(lmul) \ - case RISCV::PseudoVMERGE_VVM_##lmul##_TU: \ + case RISCV::PseudoVMERGE_VVM_##lmul: \ NewOpc = RISCV::PseudoVMV_V_V_##lmul; \ break; unsigned NewOpc; switch (N->getMachineOpcode()) { default: - llvm_unreachable("Expected VMERGE_VVM__TU instruction."); + llvm_unreachable("Expected VMERGE_VVM_ instruction."); CASE_VMERGE_TO_VMV(MF8) CASE_VMERGE_TO_VMV(MF4) CASE_VMERGE_TO_VMV(MF2) @@ -3410,17 +3407,7 @@ if (N->use_empty() || !N->isMachineOpcode()) continue; - auto IsVMergeTU = [](unsigned Opcode) { - return Opcode == RISCV::PseudoVMERGE_VVM_MF8_TU || - Opcode == RISCV::PseudoVMERGE_VVM_MF4_TU || - Opcode == RISCV::PseudoVMERGE_VVM_MF2_TU || - Opcode == RISCV::PseudoVMERGE_VVM_M1_TU || - Opcode == RISCV::PseudoVMERGE_VVM_M2_TU || - Opcode == RISCV::PseudoVMERGE_VVM_M4_TU || - Opcode == RISCV::PseudoVMERGE_VVM_M8_TU; - }; - - auto IsVMergeTA = [](unsigned Opcode) { + auto IsVMerge = [](unsigned Opcode) { return Opcode == RISCV::PseudoVMERGE_VVM_MF8 || Opcode == RISCV::PseudoVMERGE_VVM_MF4 || Opcode == RISCV::PseudoVMERGE_VVM_MF2 || @@ -3431,9 +3418,9 @@ }; unsigned Opc = N->getMachineOpcode(); - if (IsVMergeTU(Opc) || IsVMergeTA(Opc)) - MadeChange |= performCombineVMergeAndVOps(N, IsVMergeTA(Opc)); - if (IsVMergeTU(Opc) && N->getOperand(0) == N->getOperand(1)) + if (IsVMerge(Opc)) + MadeChange |= performCombineVMergeAndVOps(N); + if (IsVMerge(Opc) && N->getOperand(0) == N->getOperand(1)) MadeChange |= performVMergeToVMv(N); } return MadeChange; Index: llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -2108,7 +2108,7 @@ } multiclass VPseudoTiedBinaryV_VM { - def "_VVM" # "_" # m.MX # "_TU" : + def "_VVM" # "_" # m.MX: VPseudoTiedBinaryCarryIn.R, m.vrclass, m.vrclass, m, 1, "">; } @@ -2123,7 +2123,7 @@ } multiclass VPseudoTiedBinaryV_XM { - def "_VXM" # "_" # m.MX # "_TU": + def "_VXM" # "_" # m.MX: VPseudoTiedBinaryCarryIn.R, m.vrclass, GPR, m, 1, "">; } @@ -2136,12 +2136,7 @@ defvar ReadVFMergeV_MX = !cast("ReadVFMergeV_" # mx); defvar ReadVFMergeF_MX = !cast("ReadVFMergeF_" # mx); - def "_V" # f.FX # "M_" # mx : - VPseudoBinaryCarryIn.R, - m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">, - Sched<[WriteVFMergeV_MX, ReadVFMergeV_MX, ReadVFMergeF_MX, ReadVMask]>; - // Tied version to allow codegen control over the tail elements - def "_V" # f.FX # "M_" # mx # "_TU": + def "_V" # f.FX # "M_" # mx: VPseudoTiedBinaryCarryIn.R, m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">, Sched<[WriteVFMergeV_MX, ReadVFMergeV_MX, ReadVFMergeF_MX, ReadVMask]>; @@ -2159,7 +2154,7 @@ } multiclass VPseudoTiedBinaryV_IM { - def "_VIM" # "_" # m.MX # "_TU": + def "_VIM" # "_" # m.MX: VPseudoTiedBinaryCarryIn.R, m.vrclass, simm5, m, 1, "">; } @@ -2833,28 +2828,15 @@ defvar ReadVIMergeV_MX = !cast("ReadVIMergeV_" # mx); defvar ReadVIMergeX_MX = !cast("ReadVIMergeX_" # mx); - def "_VVM" # "_" # m.MX : - VPseudoBinaryCarryIn.R, - m.vrclass, m.vrclass, m, 1, "">, - Sched<[WriteVIMergeV_MX, ReadVIMergeV_MX, ReadVIMergeV_MX, ReadVMask]>; - def "_VXM" # "_" # m.MX : - VPseudoBinaryCarryIn.R, - m.vrclass, GPR, m, 1, "">, - Sched<[WriteVIMergeX_MX, ReadVIMergeV_MX, ReadVIMergeX_MX, ReadVMask]>; - def "_VIM" # "_" # m.MX : - VPseudoBinaryCarryIn.R, - m.vrclass, simm5, m, 1, "">, - Sched<[WriteVIMergeI_MX, ReadVIMergeV_MX, ReadVMask]>; - // Tied versions to allow codegen control over the tail elements - def "_VVM" # "_" # m.MX # "_TU" : + def "_VVM" # "_" # m.MX: VPseudoTiedBinaryCarryIn.R, m.vrclass, m.vrclass, m, 1, "">, Sched<[WriteVIMergeV_MX, ReadVIMergeV_MX, ReadVIMergeV_MX, ReadVMask]>; - def "_VXM" # "_" # m.MX # "_TU": + def "_VXM" # "_" # m.MX: VPseudoTiedBinaryCarryIn.R, m.vrclass, GPR, m, 1, "">, Sched<[WriteVIMergeX_MX, ReadVIMergeV_MX, ReadVIMergeX_MX, ReadVMask]>; - def "_VIM" # "_" # m.MX # "_TU": + def "_VIM" # "_" # m.MX: VPseudoTiedBinaryCarryIn.R, m.vrclass, simm5, m, 1, "">, Sched<[WriteVIMergeI_MX, ReadVIMergeV_MX, ReadVMask]>; @@ -2870,13 +2852,6 @@ defvar ReadVICALUV_MX = !cast("ReadVICALUV_" # mx); defvar ReadVICALUX_MX = !cast("ReadVICALUX_" # mx); - defm "" : VPseudoBinaryV_VM, - Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>; - defm "" : VPseudoBinaryV_XM, - Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>; - defm "" : VPseudoBinaryV_IM, - Sched<[WriteVICALUI_MX, ReadVICALUV_MX, ReadVMask]>; - // Tied versions to allow codegen control over the tail elements defm "" : VPseudoTiedBinaryV_VM, Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>; defm "" : VPseudoTiedBinaryV_XM, @@ -2894,11 +2869,6 @@ defvar ReadVICALUV_MX = !cast("ReadVICALUV_" # mx); defvar ReadVICALUX_MX = !cast("ReadVICALUX_" # mx); - defm "" : VPseudoBinaryV_VM, - Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>; - defm "" : VPseudoBinaryV_XM, - Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>; - // Tied versions to allow codegen control over the tail elements defm "" : VPseudoTiedBinaryV_VM, Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>; defm "" : VPseudoTiedBinaryV_XM, @@ -4410,23 +4380,13 @@ VReg result_reg_class, VReg op1_reg_class, DAGOperand op2_kind> { - def : Pat<(result_type (!cast(intrinsic) - (result_type undef), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (mask_type V0), - VLOpFrag)), - (!cast(inst#"_"#kind#"_"#vlmul.MX) - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (mask_type V0), GPR:$vl, sew)>; def : Pat<(result_type (!cast(intrinsic) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (mask_type V0), VLOpFrag)), - (!cast(inst#"_"#kind#"_"#vlmul.MX#"_TU") + (!cast(inst#"_"#kind#"_"#vlmul.MX) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), @@ -6486,19 +6446,12 @@ foreach fvti = AllFloatVectors in { defvar instr = !cast("PseudoVMERGE_VIM_"#fvti.LMul.MX); let Predicates = GetVTypePredicates.Predicates in - def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector undef), - (fvti.Vector fvti.RegClass:$rs2), - (fvti.Scalar (fpimm0)), - (fvti.Mask V0), VLOpFrag)), - (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; - defvar instr_tu = !cast("PseudoVMERGE_VIM_"#fvti.LMul.MX#"_TU"); - let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$merge), (fvti.Vector fvti.RegClass:$rs2), (fvti.Scalar (fpimm0)), (fvti.Mask V0), VLOpFrag)), - (instr_tu fvti.RegClass:$merge, fvti.RegClass:$rs2, 0, - (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; + (instr fvti.RegClass:$merge, fvti.RegClass:$rs2, 0, + (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; } //===----------------------------------------------------------------------===// Index: llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -902,20 +902,20 @@ let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (vselect (vti.Mask V0), vti.RegClass:$rs1, vti.RegClass:$rs2)), - (!cast("PseudoVMERGE_VVM_"#vti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VVM_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat XLenVT:$rs1), vti.RegClass:$rs2)), - (!cast("PseudoVMERGE_VXM_"#vti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VXM_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat_simm5 simm5:$rs1), vti.RegClass:$rs2)), - (!cast("PseudoVMERGE_VIM_"#vti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VIM_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>; } @@ -1125,7 +1125,7 @@ let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Vector (vselect (fvti.Mask V0), fvti.RegClass:$rs1, fvti.RegClass:$rs2)), - (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; @@ -1133,7 +1133,7 @@ def : Pat<(fvti.Vector (vselect (fvti.Mask V0), (SplatFPOp fvti.ScalarRegClass:$rs1), fvti.RegClass:$rs2)), - (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX#"_TU") + (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs2, (fvti.Scalar fvti.ScalarRegClass:$rs1), @@ -1142,7 +1142,7 @@ def : Pat<(fvti.Vector (vselect (fvti.Mask V0), (SplatFPOp (fvti.Scalar fpimm0)), fvti.RegClass:$rs2)), - (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>; } Index: llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1757,7 +1757,7 @@ vti.RegClass:$rs1, vti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVMERGE_VVM_"#vti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VVM_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; @@ -1766,7 +1766,7 @@ (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVMERGE_VXM_"#vti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VXM_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; @@ -1774,7 +1774,7 @@ (SplatPat_simm5 simm5:$rs1), vti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVMERGE_VIM_"#vti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VIM_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; @@ -1782,7 +1782,7 @@ vti.RegClass:$rs1, vti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVMERGE_VVM_"#vti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VVM_"#vti.LMul.MX) vti.RegClass:$rs2, vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; @@ -1790,7 +1790,7 @@ (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVMERGE_VXM_"#vti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VXM_"#vti.LMul.MX) vti.RegClass:$rs2, vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; @@ -1798,7 +1798,7 @@ (SplatPat_simm5 simm5:$rs1), vti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVMERGE_VIM_"#vti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VIM_"#vti.LMul.MX) vti.RegClass:$rs2, vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; } @@ -1969,7 +1969,7 @@ fvti.RegClass:$rs1, fvti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; @@ -1978,7 +1978,7 @@ (SplatFPOp fvti.ScalarRegClass:$rs1), fvti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX#"_TU") + (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs2, (fvti.Scalar fvti.ScalarRegClass:$rs1), @@ -1988,7 +1988,7 @@ (SplatFPOp (SelectFPImm (XLenVT GPR:$imm))), fvti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVMERGE_VXM_"#fvti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VXM_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs2, GPR:$imm, @@ -1998,7 +1998,7 @@ (SplatFPOp (fvti.Scalar fpimm0)), fvti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; @@ -2006,7 +2006,7 @@ fvti.RegClass:$rs1, fvti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) fvti.RegClass:$rs2, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; @@ -2014,7 +2014,7 @@ (SplatFPOp fvti.ScalarRegClass:$rs1), fvti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX#"_TU") + (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) fvti.RegClass:$rs2, fvti.RegClass:$rs2, (fvti.Scalar fvti.ScalarRegClass:$rs1), (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; @@ -2023,7 +2023,7 @@ (SplatFPOp (fvti.Scalar fpimm0)), fvti.RegClass:$rs2, VLOpFrag)), - (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX#"_TU") + (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX) fvti.RegClass:$rs2, fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; Index: llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir =================================================================== --- llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir +++ llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir @@ -17,9 +17,9 @@ ; CHECK: liveins: $v0, $v1, $v2, $v3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 192 /* e8, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: renamable $v8 = PseudoVMERGE_VIM_M1 killed renamable $v2, 1, killed renamable $v0, 1, 3 /* e8 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: renamable $v8 = PseudoVMERGE_VIM_M1 undef renamable $v8, killed renamable $v2, 1, killed renamable $v0, 1, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: renamable $v0 = COPY killed renamable $v1 - ; CHECK-NEXT: renamable $v9 = PseudoVMERGE_VIM_M1 killed renamable $v3, 1, killed renamable $v0, 1, 3 /* e8 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: renamable $v9 = PseudoVMERGE_VIM_M1 undef renamable $v9, killed renamable $v3, 1, killed renamable $v0, 1, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: renamable $v0 = PseudoVADD_VV_M1 undef renamable $v0, killed renamable $v8, killed renamable $v9, 1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoRET implicit $v0 %0:vr = COPY $v0 @@ -27,9 +27,11 @@ %2:vr = COPY $v2 %3:vr = COPY $v3 %4:vmv0 = COPY %0 - %5:vrnov0 = PseudoVMERGE_VIM_M1 killed %2, 1, %4, 1, 3 + %pt1:vrnov0 = IMPLICIT_DEF + %5:vrnov0 = PseudoVMERGE_VIM_M1 %pt2, killed %2, 1, %4, 1, 3 %6:vmv0 = COPY %1 - %7:vrnov0 = PseudoVMERGE_VIM_M1 killed %3, 1, %6, 1, 3 + %pt2:vrnov0 = IMPLICIT_DEF + %7:vrnov0 = PseudoVMERGE_VIM_M1 %pt2, killed %3, 1, %6, 1, 3 %pt:vr = IMPLICIT_DEF %8:vr = PseudoVADD_VV_M1 %pt, killed %5, killed %7, 1, 3, 0 $v0 = COPY %8