diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -277,6 +277,68 @@ let VLMul = m.value; } +class VPseudoUSLoadNoMask: + Pseudo<(outs RetClass:$rd), + (ins GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 2; + let SEWIndex = 3; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUSLoadMask: + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, + GPR:$rs1, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = "$rd = $merge"; + let Uses = [VL, VTYPE]; + let VLIndex = 4; + let SEWIndex = 5; + let MergeOpIndex = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUSStoreNoMask: + Pseudo<(outs), + (ins StClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 2; + let SEWIndex = 3; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUSStoreMask: + Pseudo<(outs), + (ins StClass:$rd, GPR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 3; + let SEWIndex = 4; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoBinaryNoMask; + def "_V_" # LInfo # "_MASK" : VPseudoUSLoadMask; + } + } +} + +multiclass VPseudoUSStore { + foreach lmul = MxList.m in { + defvar LInfo = lmul.MX; + defvar vreg = lmul.vrclass; + let VLMul = lmul.value in { + def "_V_" # LInfo : VPseudoUSStoreNoMask; + def "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask; + } + } +} + multiclass VPseudoBinary +{ + defvar load_instr = !cast("PseudoVLE"#sew#"_V_"#vlmul.MX); + defvar store_instr = !cast("PseudoVSE"#sew#"_V_"#vlmul.MX); + // Load + def : Pat<(type (load reg_rs1:$rs1)), + (load_instr reg_rs1:$rs1, VLMax, sew)>; + // Store + def : Pat<(store type:$rs2, reg_rs1:$rs1), + (store_instr reg_class:$rs2, reg_rs1:$rs1, VLMax, sew)>; +} + +multiclass VPatUSLoadStoreSDNodes { + foreach vti = AllVectors in + defm "" : VPatUSLoadStoreSDNode; +} + class VPatBinarySDNode; } +//===----------------------------------------------------------------------===// +// Helpers to define the intrinsic patterns. +//===----------------------------------------------------------------------===// class VPatBinaryNoMask("VLE" # eew # "_V") in - def "PseudoVLE" # eew # "_V_" # LInfo - : Pseudo<(outs vreg:$rd), - (ins vreg:$merge, GPR:$rs1, VMaskOp:$mask, GPR:$vl, - ixlenimm:$sew), - []>, - RISCVVPseudo; - } - - let mayLoad = 0, mayStore = 1, hasSideEffects = 0, - usesCustomInserter = 1, - VLMul = vlmul in - { - // Masked stores do not have a merge operand as merge is done in memory - let Uses = [VL, VTYPE], - VLIndex = 3, SEWIndex = 4, MergeOpIndex = -1, - BaseInstr = !cast("VSE" # eew # "_V") in - def "PseudoVSE" # eew # "_V_" # LInfo - : Pseudo<(outs), - (ins vreg:$rd, GPR:$rs1, VMaskOp:$mask, GPR:$vl, - ixlenimm:$sew), - []>, - RISCVVPseudo; - } - } -} - -// Patterns. -multiclass pat_load_store -{ - defvar load_instr = !cast("PseudoVLE" # sew # "_V_"# vlmul.MX); - defvar store_instr = !cast("PseudoVSE" # sew # "_V_"# vlmul.MX); - // Load - def : Pat<(type (load GPR:$rs1)), - (load_instr (type (IMPLICIT_DEF)), - GPR:$rs1, - (mask_type zero_reg), - VLMax, sew)>; - def : Pat<(type (load AddrFI:$rs1)), - (load_instr (type (IMPLICIT_DEF)), - AddrFI:$rs1, - (mask_type zero_reg), - VLMax, sew)>; - - // Store - def : Pat<(store type:$rs2, GPR:$rs1), - (store_instr reg_class:$rs2, GPR:$rs1, - (mask_type zero_reg), - VLMax, sew)>; - def : Pat<(store type:$rs2, AddrFI:$rs1), - (store_instr reg_class:$rs2, AddrFI:$rs1, - (mask_type zero_reg), - VLMax, sew)>; -} - -foreach vti = AllVectors in -{ - defm : pat_load_store; + defm PseudoVLE # eew : VPseudoUSLoad; + defm PseudoVSE # eew : VPseudoUSStore; } //===----------------------------------------------------------------------===// @@ -1078,6 +1116,12 @@ let Predicates = [HasStdExtV] in { // Whole-register vector patterns. + +// 7.4. Vector Unit-Stride Instructions +defm "" : VPatUSLoadStoreSDNodes; +defm "" : VPatUSLoadStoreSDNodes; + +// 12.1. Vector Single-Width Integer Add and Subtract defm "" : VPatBinarySDNode; //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir --- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir +++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir @@ -27,27 +27,26 @@ %2:gpr = COPY $x12 %1:gpr = COPY $x11 %0:gpr = COPY $x10 - %5:vr = IMPLICIT_DEF - %4:vr = PseudoVLE64_V_M1 %5, %1, $noreg, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) - %7:vr = IMPLICIT_DEF - %6:vr = PseudoVLE64_V_M1 %7, %2, $noreg, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) - %8:vr = PseudoVADD_VV_M1 killed %4, killed %6, %3, 64, implicit $vl, implicit $vtype - PseudoVSE64_V_M1 killed %8, %0, $noreg, %3, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) + %4:vr = PseudoVLE64_V_M1 %1, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) + %5:vr = PseudoVLE64_V_M1 %2, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) + %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, %3, 64, implicit $vl, implicit $vtype + PseudoVSE64_V_M1 killed %6, %0, %3, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) PseudoRET ... # POST-INSERTER: %0:gpr = COPY $x13 -# POST-INSERTER: %4:vr = IMPLICIT_DEF +# POST-INSERTER: %1:gpr = COPY $x12 +# POST-INSERTER: %2:gpr = COPY $x11 +# POST-INSERTER: %3:gpr = COPY $x10 +# POST-INSERTER: dead %7:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype +# POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) +# POST-INSERTER: dead %8:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype +# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %1, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) # POST-INSERTER: dead %9:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype -# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %4, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) -# POST-INSERTER: %6:vr = IMPLICIT_DEF +# POST-INSERTER: %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, $noreg, -1, implicit $vl, implicit $vtype # POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype -# POST-INSERTER: %7:vr = PseudoVLE64_V_M1 %6, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) -# POST-INSERTER: dead %11:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype -# POST-INSERTER: %8:vr = PseudoVADD_VV_M1 killed %5, killed %7, $noreg, -1, implicit $vl, implicit $vtype -# POST-INSERTER: dead %12:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype -# POST-INSERTER: PseudoVSE64_V_M1 killed %8, %3, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) +# POST-INSERTER: PseudoVSE64_V_M1 killed %6, %3, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) # CODEGEN: vsetvli a3, a3, e64,m1,ta,mu # CODEGEN-NEXT: vle64.v v25, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll --- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll @@ -20,20 +20,16 @@ ret void } -; PRE-INSERTER: %4:vr = IMPLICIT_DEF -; PRE-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) -; PRE-INSERTER: %6:vr = IMPLICIT_DEF -; PRE-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) -; PRE-INSERTER: %7:vr = PseudoVADD_VV_M1 killed %3, killed %5, $x0, 64, implicit $vl, implicit $vtype -; PRE-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) +; PRE-INSERTER: %3:vr = PseudoVLE64_V_M1 %1, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) +; PRE-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) +; PRE-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $x0, 64, implicit $vl, implicit $vtype +; PRE-INSERTER: PseudoVSE64_V_M1 killed %5, %0, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) -; POST-INSERTER: %4:vr = IMPLICIT_DEF +; POST-INSERTER: dead %6:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype +; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %1, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) +; POST-INSERTER: dead %7:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype +; POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) ; POST-INSERTER: dead %8:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype -; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) -; POST-INSERTER: %6:vr = IMPLICIT_DEF +; POST-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $noreg, -1, implicit $vl, implicit $vtype ; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype -; POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) -; POST-INSERTER: dead %10:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype -; POST-INSERTER: %7:vr = PseudoVADD_VV_M1 killed %3, killed %5, $noreg, -1, implicit $vl, implicit $vtype -; POST-INSERTER: dead %11:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype -; POST-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) +; POST-INSERTER: PseudoVSE64_V_M1 killed %5, %0, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)