diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -137,6 +137,9 @@ LMULInfo LMul = M; ValueType Scalar = Scal; RegisterClass ScalarRegClass = ScalarReg; + // The pattern fragment which produces the AVL operand, representing the + // "natural" vector length for this type. For scalable vectors this is VLMax. + OutPatFrag AVL = VLMax; } class GroupVTypeInfo AllMasks = { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -47,6 +47,7 @@ LLVMType mask_type, int sew, LMULInfo vlmul, + OutPatFrag avl, RegisterClass reg_rs1, VReg reg_class> { @@ -54,16 +55,16 @@ defvar store_instr = !cast("PseudoVSE"#sew#"_V_"#vlmul.MX); // Load def : Pat<(type (load reg_rs1:$rs1)), - (load_instr reg_rs1:$rs1, VLMax, sew)>; + (load_instr reg_rs1:$rs1, avl, sew)>; // Store def : Pat<(store type:$rs2, reg_rs1:$rs1), - (store_instr reg_class:$rs2, reg_rs1:$rs1, VLMax, sew)>; + (store_instr reg_class:$rs2, reg_rs1:$rs1, avl, sew)>; } multiclass VPatUSLoadStoreSDNodes { foreach vti = AllVectors in defm "" : VPatUSLoadStoreSDNode; + vti.AVL, reg_rs1, vti.RegClass>; } class VPatBinarySDNode_VV : Pat<(result_type (vop @@ -81,7 +83,7 @@ (!cast(instruction_name#"_VV_"# vlmul.MX) op_reg_class:$rs1, op_reg_class:$rs2, - VLMax, sew)>; + avl, sew)>; class VPatBinarySDNode_XI(instruction_name#_#suffix#_# vlmul.MX) vop_reg_class:$rs1, xop_kind:$rs2, - VLMax, sew)>; + avl, sew)>; multiclass VPatBinarySDNode_VV_VX { foreach vti = AllIntegerVectors in { def : VPatBinarySDNode_VV; + vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; def : VPatBinarySDNode_XI; } } @@ -123,14 +126,14 @@ foreach vti = AllIntegerVectors in { def : VPatBinarySDNode_VV; + vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; def : VPatBinarySDNode_XI; def : VPatBinarySDNode_XI(SplatPat#_#ImmType), ImmType>; } @@ -144,6 +147,7 @@ ValueType mask_type, int sew, LMULInfo vlmul, + OutPatFrag avl, VReg RetClass, VReg vop_reg_class, DAGOperand xop_kind> : @@ -152,16 +156,16 @@ (!cast(instruction_name#"_VF_"#vlmul.MX) vop_reg_class:$rs1, ToFPR32.ret, - VLMax, sew)>; + avl, sew)>; multiclass VPatBinaryFPSDNode_VV_VF { foreach vti = AllFloatVectors in { def : VPatBinarySDNode_VV; + vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; def : VPatBinarySDNode_VF; } } @@ -173,7 +177,7 @@ (!cast(instruction_name#"_VF_"#fvti.LMul.MX) fvti.RegClass:$rs1, ToFPR32.ret, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; } multiclass VPatIntegerSetCCSDNode_VV.Value>; } } @@ -204,7 +208,7 @@ SwapHelper<(instruction), (instruction vti.RegClass:$rs1), (instruction xop_kind:$rs2), - (instruction VLMax, vti.SEW), + (instruction vti.AVL, vti.SEW), swap>.Value>; } } @@ -242,7 +246,7 @@ (fvti.Vector fvti.RegClass:$rs2), cc)), (!cast(instruction_name#"_VV_"#fvti.LMul.MX) - fvti.RegClass:$rs1, fvti.RegClass:$rs2, VLMax, fvti.SEW)>; + fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.SEW)>; } multiclass VPatFPSetCCSDNode_VF { @@ -253,7 +257,7 @@ (!cast(instruction_name#"_VF_"#fvti.LMul.MX) fvti.RegClass:$rs1, ToFPR32.ret, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; } multiclass VPatFPSetCCSDNode_FV { @@ -264,7 +268,7 @@ (!cast(swapped_op_instruction_name#"_VF_"#fvti.LMul.MX) fvti.RegClass:$rs1, ToFPR32.ret, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; } multiclass VPatFPSetCCSDNode_VV_VF_FV(inst_name#"_"#suffix#"_"#vti.LMul.MX) - fti.RegClass:$rs2, VLMax, vti.SEW)>; + fti.RegClass:$rs2, fti.AVL, vti.SEW)>; } } @@ -306,11 +310,11 @@ def : Pat<(sub (vti.Vector (SplatPat XLenVT:$rs2)), (vti.Vector vti.RegClass:$rs1)), (!cast("PseudoVRSUB_VX_"# vti.LMul.MX) - vti.RegClass:$rs1, GPR:$rs2, VLMax, vti.SEW)>; + vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.SEW)>; def : Pat<(sub (vti.Vector (SplatPat_simm5 XLenVT:$rs2)), (vti.Vector vti.RegClass:$rs1)), (!cast("PseudoVRSUB_VI_"# vti.LMul.MX) - vti.RegClass:$rs1, simm5:$rs2, VLMax, vti.SEW)>; + vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.SEW)>; } // 12.3. Vector Integer Extension @@ -343,7 +347,7 @@ defvar fti = vtiTofti.Fti; def : Pat<(fti.Vector (riscv_trunc_vector (vti.Vector vti.RegClass:$rs1))), (!cast("PseudoVNSRL_WI_"#fti.LMul.MX) - vti.RegClass:$rs1, 0, VLMax, fti.SEW)>; + vti.RegClass:$rs1, 0, fti.AVL, fti.SEW)>; } // 12.8. Vector Integer Comparison Instructions @@ -390,47 +394,48 @@ def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), vti.RegClass:$rs1, vti.RegClass:$rs2)), (!cast("PseudoVMERGE_VVM_"#vti.LMul.MX) - vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm, VLMax, vti.SEW)>; + vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm, + vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat XLenVT:$rs1), vti.RegClass:$rs2)), (!cast("PseudoVMERGE_VXM_"#vti.LMul.MX) - vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, VLMax, vti.SEW)>; + vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat_simm5 simm5:$rs1), vti.RegClass:$rs2)), (!cast("PseudoVMERGE_VIM_"#vti.LMul.MX) - vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, VLMax, vti.SEW)>; + vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, vti.AVL, vti.SEW)>; } // 16.1. Vector Mask-Register Logical Instructions foreach mti = AllMasks in { def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), (!cast("PseudoVMAND_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), (!cast("PseudoVMOR_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), (!cast("PseudoVMXOR_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (vnot (and VR:$rs1, VR:$rs2))), (!cast("PseudoVMNAND_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (vnot (or VR:$rs1, VR:$rs2))), (!cast("PseudoVMNOR_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (vnot (xor VR:$rs1, VR:$rs2))), (!cast("PseudoVMXNOR_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (and VR:$rs1, (vnot VR:$rs2))), (!cast("PseudoVMANDNOT_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (or VR:$rs1, (vnot VR:$rs2))), (!cast("PseudoVMORNOT_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; } } // Predicates = [HasStdExtV] @@ -468,7 +473,7 @@ fvti.RegClass:$rs2)), (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), (splat_vector fvti.ScalarRegClass:$rs1), @@ -476,13 +481,13 @@ (!cast("PseudoVFMERGE_VFM_"#fvti.LMul.MX) fvti.RegClass:$rs2, ToFPR32.ret, - VMV0:$vm, VLMax, fvti.SEW)>; + VMV0:$vm, fvti.AVL, fvti.SEW)>; def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), (splat_vector (fvti.Scalar fpimm0)), fvti.RegClass:$rs2)), (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX) - fvti.RegClass:$rs2, 0, VMV0:$vm, VLMax, fvti.SEW)>; + fvti.RegClass:$rs2, 0, VMV0:$vm, fvti.AVL, fvti.SEW)>; } } // Predicates = [HasStdExtV, HasStdExtF] @@ -494,17 +499,17 @@ foreach vti = AllIntegerVectors in { def : Pat<(vti.Vector (splat_vector GPR:$rs1)), (!cast("PseudoVMV_V_X_" # vti.LMul.MX) - GPR:$rs1, VLMax, vti.SEW)>; + GPR:$rs1, vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (splat_vector simm5:$rs1)), (!cast("PseudoVMV_V_I_" # vti.LMul.MX) - simm5:$rs1, VLMax, vti.SEW)>; + simm5:$rs1, vti.AVL, vti.SEW)>; } foreach mti = AllMasks in { def : Pat<(mti.Mask immAllOnesV), - (!cast("PseudoVMSET_M_"#mti.BX) VLMax, mti.SEW)>; + (!cast("PseudoVMSET_M_"#mti.BX) mti.AVL, mti.SEW)>; def : Pat<(mti.Mask immAllZerosV), - (!cast("PseudoVMCLR_M_"#mti.BX) VLMax, mti.SEW)>; + (!cast("PseudoVMCLR_M_"#mti.BX) mti.AVL, mti.SEW)>; } } // Predicates = [HasStdExtV] @@ -513,10 +518,10 @@ if !eq(vti.SEW, 64) then { def : Pat<(vti.Vector (rv32_splat_i64 GPR:$rs1)), (!cast("PseudoVMV_V_X_" # vti.LMul.MX) - GPR:$rs1, VLMax, vti.SEW)>; + GPR:$rs1, vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (rv32_splat_i64 simm5:$rs1)), (!cast("PseudoVMV_V_I_" # vti.LMul.MX) - simm5:$rs1, VLMax, vti.SEW)>; + simm5:$rs1, vti.AVL, vti.SEW)>; } } } // Predicates = [HasStdExtV, IsRV32] @@ -526,10 +531,10 @@ def : Pat<(fvti.Vector (splat_vector fvti.ScalarRegClass:$rs1)), (!cast("PseudoVFMV_V_F_"#fvti.LMul.MX) ToFPR32.ret, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; def : Pat<(fvti.Vector (splat_vector (fvti.Scalar fpimm0))), (!cast("PseudoVMV_V_I_"#fvti.LMul.MX) - 0, VLMax, fvti.SEW)>; + 0, fvti.AVL, fvti.SEW)>; } } // Predicates = [HasStdExtV, HasStdExtF]