diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -107,6 +107,9 @@ LMULInfo LMul = M; ValueType Scalar = Scal; RegisterClass ScalarRegClass = ScalarReg; + // The pattern fragment which produces the AVL operand, representing the + // "natural" vector length for this type. For scalable vectors this is VLMax. + OutPatFrag AVL = VLMax; } class GroupVTypeInfo AllMasks = { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -43,6 +43,7 @@ LLVMType mask_type, int sew, LMULInfo vlmul, + OutPatFrag avl, RegisterClass reg_rs1, VReg reg_class> { @@ -50,16 +51,16 @@ defvar store_instr = !cast("PseudoVSE"#sew#"_V_"#vlmul.MX); // Load def : Pat<(type (load reg_rs1:$rs1)), - (load_instr reg_rs1:$rs1, VLMax, sew)>; + (load_instr reg_rs1:$rs1, avl, sew)>; // Store def : Pat<(store type:$rs2, reg_rs1:$rs1), - (store_instr reg_class:$rs2, reg_rs1:$rs1, VLMax, sew)>; + (store_instr reg_class:$rs2, reg_rs1:$rs1, avl, sew)>; } multiclass VPatUSLoadStoreSDNodes { foreach vti = AllVectors in defm "" : VPatUSLoadStoreSDNode; + vti.AVL, reg_rs1, vti.RegClass>; } class VPatBinarySDNode_VV : Pat<(result_type (vop @@ -77,7 +79,7 @@ (!cast(instruction_name#"_VV_"# vlmul.MX) op_reg_class:$rs1, op_reg_class:$rs2, - VLMax, sew)>; + avl, sew)>; class VPatBinarySDNode_XI(instruction_name#_#suffix#_# vlmul.MX) vop_reg_class:$rs1, xop_kind:$rs2, - VLMax, sew)>; + avl, sew)>; multiclass VPatBinarySDNode_VV_VX { foreach vti = AllIntegerVectors in { def : VPatBinarySDNode_VV; + vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; def : VPatBinarySDNode_XI; } } @@ -119,14 +122,14 @@ foreach vti = AllIntegerVectors in { def : VPatBinarySDNode_VV; + vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; def : VPatBinarySDNode_XI; def : VPatBinarySDNode_XI(SplatPat#_#ImmType), ImmType>; } @@ -140,6 +143,7 @@ ValueType mask_type, int sew, LMULInfo vlmul, + OutPatFrag avl, VReg RetClass, VReg vop_reg_class, DAGOperand xop_kind> : @@ -148,16 +152,16 @@ (!cast(instruction_name#"_VF_"#vlmul.MX) vop_reg_class:$rs1, ToFPR32.ret, - VLMax, sew)>; + avl, sew)>; multiclass VPatBinaryFPSDNode_VV_VF { foreach vti = AllFloatVectors in { def : VPatBinarySDNode_VV; + vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; def : VPatBinarySDNode_VF; } } @@ -169,7 +173,7 @@ (!cast(instruction_name#"_VF_"#fvti.LMul.MX) fvti.RegClass:$rs1, ToFPR32.ret, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; } multiclass VPatIntegerSetCCSDNode_VV.Value>; } } @@ -200,7 +204,7 @@ SwapHelper<(instruction), (instruction vti.RegClass:$rs1), (instruction xop_kind:$rs2), - (instruction VLMax, vti.SEW), + (instruction vti.AVL, vti.SEW), swap>.Value>; } } @@ -238,7 +242,7 @@ (fvti.Vector fvti.RegClass:$rs2), cc)), (!cast(instruction_name#"_VV_"#fvti.LMul.MX) - fvti.RegClass:$rs1, fvti.RegClass:$rs2, VLMax, fvti.SEW)>; + fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.SEW)>; } multiclass VPatFPSetCCSDNode_VF { @@ -249,7 +253,7 @@ (!cast(instruction_name#"_VF_"#fvti.LMul.MX) fvti.RegClass:$rs1, ToFPR32.ret, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; } multiclass VPatFPSetCCSDNode_FV { @@ -260,7 +264,7 @@ (!cast(swapped_op_instruction_name#"_VF_"#fvti.LMul.MX) fvti.RegClass:$rs1, ToFPR32.ret, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; } multiclass VPatFPSetCCSDNode_VV_VF_FV("PseudoVRSUB_VX_"# vti.LMul.MX) - vti.RegClass:$rs1, GPR:$rs2, VLMax, vti.SEW)>; + vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.SEW)>; def : Pat<(sub (vti.Vector (SplatPat_simm5 XLenVT:$rs2)), (vti.Vector vti.RegClass:$rs1)), (!cast("PseudoVRSUB_VI_"# vti.LMul.MX) - vti.RegClass:$rs1, simm5:$rs2, VLMax, vti.SEW)>; + vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.SEW)>; } // 12.5. Vector Bitwise Logical Instructions @@ -351,47 +355,48 @@ def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), vti.RegClass:$rs1, vti.RegClass:$rs2)), (!cast("PseudoVMERGE_VVM_"#vti.LMul.MX) - vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm, VLMax, vti.SEW)>; + vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm, + vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat XLenVT:$rs1), vti.RegClass:$rs2)), (!cast("PseudoVMERGE_VXM_"#vti.LMul.MX) - vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, VLMax, vti.SEW)>; + vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat_simm5 simm5:$rs1), vti.RegClass:$rs2)), (!cast("PseudoVMERGE_VIM_"#vti.LMul.MX) - vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, VLMax, vti.SEW)>; + vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, vti.AVL, vti.SEW)>; } // 16.1. Vector Mask-Register Logical Instructions foreach mti = AllMasks in { def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), (!cast("PseudoVMAND_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), (!cast("PseudoVMOR_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), (!cast("PseudoVMXOR_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (vnot (and VR:$rs1, VR:$rs2))), (!cast("PseudoVMNAND_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (vnot (or VR:$rs1, VR:$rs2))), (!cast("PseudoVMNOR_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (vnot (xor VR:$rs1, VR:$rs2))), (!cast("PseudoVMXNOR_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (and VR:$rs1, (vnot VR:$rs2))), (!cast("PseudoVMANDNOT_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; def : Pat<(mti.Mask (or VR:$rs1, (vnot VR:$rs2))), (!cast("PseudoVMORNOT_MM_"#mti.LMul.MX) - VR:$rs1, VR:$rs2, VLMax, mti.SEW)>; + VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>; } } // Predicates = [HasStdExtV] @@ -429,7 +434,7 @@ fvti.RegClass:$rs2)), (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), (splat_vector fvti.ScalarRegClass:$rs1), @@ -437,13 +442,13 @@ (!cast("PseudoVFMERGE_VFM_"#fvti.LMul.MX) fvti.RegClass:$rs2, ToFPR32.ret, - VMV0:$vm, VLMax, fvti.SEW)>; + VMV0:$vm, fvti.AVL, fvti.SEW)>; def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), (splat_vector (fvti.Scalar fpimm0)), fvti.RegClass:$rs2)), (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX) - fvti.RegClass:$rs2, 0, VMV0:$vm, VLMax, fvti.SEW)>; + fvti.RegClass:$rs2, 0, VMV0:$vm, fvti.AVL, fvti.SEW)>; } } // Predicates = [HasStdExtV, HasStdExtF] @@ -455,17 +460,17 @@ foreach vti = AllIntegerVectors in { def : Pat<(vti.Vector (splat_vector GPR:$rs1)), (!cast("PseudoVMV_V_X_" # vti.LMul.MX) - GPR:$rs1, VLMax, vti.SEW)>; + GPR:$rs1, vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (splat_vector simm5:$rs1)), (!cast("PseudoVMV_V_I_" # vti.LMul.MX) - simm5:$rs1, VLMax, vti.SEW)>; + simm5:$rs1, vti.AVL, vti.SEW)>; } foreach mti = AllMasks in { def : Pat<(mti.Mask immAllOnesV), - (!cast("PseudoVMSET_M_"#mti.BX) VLMax, mti.SEW)>; + (!cast("PseudoVMSET_M_"#mti.BX) mti.AVL, mti.SEW)>; def : Pat<(mti.Mask immAllZerosV), - (!cast("PseudoVMCLR_M_"#mti.BX) VLMax, mti.SEW)>; + (!cast("PseudoVMCLR_M_"#mti.BX) mti.AVL, mti.SEW)>; } } // Predicates = [HasStdExtV] @@ -474,10 +479,10 @@ if !eq(vti.SEW, 64) then { def : Pat<(vti.Vector (rv32_splat_i64 GPR:$rs1)), (!cast("PseudoVMV_V_X_" # vti.LMul.MX) - GPR:$rs1, VLMax, vti.SEW)>; + GPR:$rs1, vti.AVL, vti.SEW)>; def : Pat<(vti.Vector (rv32_splat_i64 simm5:$rs1)), (!cast("PseudoVMV_V_I_" # vti.LMul.MX) - simm5:$rs1, VLMax, vti.SEW)>; + simm5:$rs1, vti.AVL, vti.SEW)>; } } } // Predicates = [HasStdExtV, IsRV32] @@ -487,10 +492,10 @@ def : Pat<(fvti.Vector (splat_vector fvti.ScalarRegClass:$rs1)), (!cast("PseudoVFMV_V_F_"#fvti.LMul.MX) ToFPR32.ret, - VLMax, fvti.SEW)>; + fvti.AVL, fvti.SEW)>; def : Pat<(fvti.Vector (splat_vector (fvti.Scalar fpimm0))), (!cast("PseudoVMV_V_I_"#fvti.LMul.MX) - 0, VLMax, fvti.SEW)>; + 0, fvti.AVL, fvti.SEW)>; } } // Predicates = [HasStdExtV, HasStdExtF]