diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -126,36 +126,88 @@ // Helpers to define the different pseudo instructions. //===----------------------------------------------------------------------===// -multiclass pseudo_binary { - let Constraints = "$rd = $merge", - Uses = [VL, VTYPE], VLIndex = 5, SEWIndex = 6, MergeOpIndex = 1, - BaseInstr = !cast(!subst("Pseudo", "", NAME)) in - def "_"# vlmul.MX : Pseudo<(outs result_reg_class:$rd), - (ins result_reg_class:$merge, - op1_reg_class:$rs2, op2_kind:$rs1, - VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), - []>, - RISCVVPseudo; +class PseudoToVInst { + string VInst = !subst("_M8", "", + !subst("_M4", "", + !subst("_M2", "", + !subst("_M1", "", + !subst("_MF2", "", + !subst("_MF4", "", + !subst("_MF8", "", + !subst("_MASK", "", + !subst("Pseudo", "", PseudoInst))))))))); } -multiclass pseudo_binary_v_vv_vx_vi { +class VPseudoBinary : + Pseudo<(outs RetClass:$rd), + (ins Op1Class:$rs2, Op2Class:$rs1, GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let Uses = [VL, VTYPE]; + let VLIndex = 3; + let SEWIndex = 4; + let MergeOpIndex = -1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoBinaryMask : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let Constraints = "$rd = $merge"; + let Uses = [VL, VTYPE]; + let VLIndex = 5; + let SEWIndex = 6; + let MergeOpIndex = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +multiclass VPseudoBinary { + def "_" # MInfo.MX : VPseudoBinary; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask; +} + +multiclass VPseudoBinaryV_VV { let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in foreach m = MxList.m in { let VLMul = m.value in - { - defvar evr = m.vrclass; - defm _VV : pseudo_binary; - defm _VX : pseudo_binary; - defm _VI : pseudo_binary; - } + defm _VV : VPseudoBinary; + } +} + +multiclass VPseudoBinaryV_VX { + let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in + foreach m = MxList.m in + { + let VLMul = m.value in + defm _VX : VPseudoBinary; + } +} + +multiclass VPseudoBinaryV_VI { + let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in + foreach m = MxList.m in + { + let VLMul = m.value in + defm _VI : VPseudoBinary; } } +multiclass VPseudoBinary_VV_VX_VI { + defm "" : VPseudoBinaryV_VV; + defm "" : VPseudoBinaryV_VX; + defm "" : VPseudoBinaryV_VI; +} + //===----------------------------------------------------------------------===// // Helpers to define the different patterns. //===----------------------------------------------------------------------===// @@ -167,7 +219,7 @@ ValueType mask_type, int sew, LMULInfo vlmul, - VReg result_reg_class, + VReg RetClass, VReg op_reg_class, bit swap = 0> { @@ -175,10 +227,8 @@ def : Pat<(result_type (vop (op_type op_reg_class:$rs1), (op_type op_reg_class:$rs2))), - (instruction (result_type (IMPLICIT_DEF)), - op_reg_class:$rs1, + (instruction op_reg_class:$rs1, op_reg_class:$rs2, - (mask_type zero_reg), VLMax, sew)>; } @@ -300,7 +350,7 @@ //===----------------------------------------------------------------------===// // Pseudo instructions. -defm PseudoVADD : pseudo_binary_v_vv_vx_vi; +defm PseudoVADD : VPseudoBinary_VV_VX_VI; // Whole-register vector patterns. defm "" : pat_vop_binary_common; diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp --- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp +++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp @@ -176,6 +176,12 @@ } OutMI.addOperand(MCOp); } + + // Unmasked pseudo instructions define MergeOpIndex to -1. + // Append dummy mask operand to V instructions. + if (RVV->getMergeOpIndex() == -1) + OutMI.addOperand(MCOperand::createReg(RISCV::NoRegister)); + return true; } diff --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir --- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir +++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir @@ -31,8 +31,7 @@ %4:vr = PseudoVLE64_V_M1 %5, %1, $noreg, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) %7:vr = IMPLICIT_DEF %6:vr = PseudoVLE64_V_M1 %7, %2, $noreg, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) - %9:vr = IMPLICIT_DEF - %8:vr = PseudoVADD_VV_M1 %9, killed %4, killed %6, $noreg, %3, 64, implicit $vl, implicit $vtype + %8:vr = PseudoVADD_VV_M1 killed %4, killed %6, %3, 64, implicit $vl, implicit $vtype PseudoVSE64_V_M1 killed %8, %0, $noreg, %3, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) PseudoRET @@ -40,16 +39,15 @@ # POST-INSERTER: %0:gpr = COPY $x13 # POST-INSERTER: %4:vr = IMPLICIT_DEF -# POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype +# POST-INSERTER: dead %9:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype # POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %4, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) # POST-INSERTER: %6:vr = IMPLICIT_DEF -# POST-INSERTER: dead %11:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype +# POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype # POST-INSERTER: %7:vr = PseudoVLE64_V_M1 %6, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) -# POST-INSERTER: %8:vr = IMPLICIT_DEF +# POST-INSERTER: dead %11:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype +# POST-INSERTER: %8:vr = PseudoVADD_VV_M1 killed %5, killed %7, $noreg, -1, implicit $vl, implicit $vtype # POST-INSERTER: dead %12:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype -# POST-INSERTER: %9:vr = PseudoVADD_VV_M1 %8, killed %5, killed %7, $noreg, $noreg, -1, implicit $vl, implicit $vtype -# POST-INSERTER: dead %13:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype -# POST-INSERTER: PseudoVSE64_V_M1 killed %9, %3, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) +# POST-INSERTER: PseudoVSE64_V_M1 killed %8, %3, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) # CODEGEN: vsetvli a4, a3, e64,m1,ta,mu # CODEGEN-NEXT: vle64.v v25, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll --- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll @@ -24,18 +24,16 @@ ; PRE-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) ; PRE-INSERTER: %6:vr = IMPLICIT_DEF ; PRE-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) -; PRE-INSERTER: %8:vr = IMPLICIT_DEF -; PRE-INSERTER: %7:vr = PseudoVADD_VV_M1 %8, killed %3, killed %5, $noreg, $x0, 64, implicit $vl, implicit $vtype +; PRE-INSERTER: %7:vr = PseudoVADD_VV_M1 killed %3, killed %5, $x0, 64, implicit $vl, implicit $vtype ; PRE-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) ; POST-INSERTER: %4:vr = IMPLICIT_DEF -; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype +; POST-INSERTER: dead %8:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype ; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) ; POST-INSERTER: %6:vr = IMPLICIT_DEF -; POST-INSERTER: dead %10:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype +; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype ; POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) -; POST-INSERTER: %8:vr = IMPLICIT_DEF +; POST-INSERTER: dead %10:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype +; POST-INSERTER: %7:vr = PseudoVADD_VV_M1 killed %3, killed %5, $noreg, -1, implicit $vl, implicit $vtype ; POST-INSERTER: dead %11:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype -; POST-INSERTER: %7:vr = PseudoVADD_VV_M1 %8, killed %3, killed %5, $noreg, $noreg, -1, implicit $vl, implicit $vtype -; POST-INSERTER: dead %12:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype ; POST-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)