diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1963,26 +1963,10 @@ VReg Op1Class, DAGOperand Op2Class, LMULInfo MInfo, - string Constraint = ""> { + string Constraint = "", + int sew = 0> { let VLMul = MInfo.value in { - def "_" # MInfo.MX : VPseudoBinaryNoMask; - def "_" # MInfo.MX # "_TU" : VPseudoBinaryNoMaskTU; - def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy, - RISCVMaskedPseudo; - } -} - -multiclass VPseudoBinary_E { - let VLMul = MInfo.value in { - defvar suffix = "_" # MInfo.MX # "_E" # sew; + defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); def suffix : VPseudoBinaryNoMask; def suffix # "_TU" : VPseudoBinaryNoMaskTU { + string Constraint = "", + int sew = 0> { let VLMul = lmul.value in { - def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask; - def "_" # lmul.MX # "_" # emul.MX # "_TU": VPseudoBinaryNoMaskTU; - def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy, RISCVMaskedPseudo; } } -multiclass VPseudoBinaryEmul_E { - let VLMul = lmul.value in { - defvar suffix = "_" # lmul.MX # "_E" # sew # "_" # emul.MX; - def suffix : VPseudoBinaryNoMask; - def suffix # "_TU" : VPseudoBinaryNoMaskTU; - def suffix # "_MASK" : VPseudoBinaryMaskPolicy, - RISCVMaskedPseudo; - } -} - multiclass VPseudoTiedBinary { - defm _VV : VPseudoBinary; -} - -multiclass VPseudoBinaryV_VV_E { - defm _VV : VPseudoBinary_E; +multiclass VPseudoBinaryV_VV { + defm _VV : VPseudoBinary; } // Similar to VPseudoBinaryV_VV, but uses MxListF. -multiclass VPseudoBinaryFV_VV { - defm _VV : VPseudoBinary; -} - -multiclass VPseudoBinaryFV_VV_E { - defm _VV : VPseudoBinary_E; +multiclass VPseudoBinaryFV_VV { + defm _VV : VPseudoBinary; } multiclass VPseudoVGTR_VV_EEW { @@ -2088,7 +2047,7 @@ defvar WriteVRGatherVV_MX_E = !cast("WriteVRGatherVV_" # mx # "_E" # e); defvar ReadVRGatherVV_data_MX_E = !cast("ReadVRGatherVV_data_" # mx # "_E" # e); defvar ReadVRGatherVV_index_MX_E = !cast("ReadVRGatherVV_index_" # mx # "_E" # e); - defm _VV : VPseudoBinaryEmul_E, + defm _VV : VPseudoBinaryEmul, Sched<[WriteVRGatherVV_MX_E, ReadVRGatherVV_data_MX_E, ReadVRGatherVV_index_MX_E]>; } } @@ -2096,12 +2055,8 @@ } } -multiclass VPseudoBinaryV_VX { - defm "_VX" : VPseudoBinary; -} - -multiclass VPseudoBinaryV_VX_E { - defm "_VX" : VPseudoBinary_E; +multiclass VPseudoBinaryV_VX { + defm "_VX" : VPseudoBinary; } multiclass VPseudoVSLD1_VX { @@ -2116,15 +2071,9 @@ } } -multiclass VPseudoBinaryV_VF { +multiclass VPseudoBinaryV_VF { defm "_V" # f.FX : VPseudoBinary; -} - -multiclass VPseudoBinaryV_VF_E { - defm "_V" # f.FX : VPseudoBinary_E; + f.fprclass, m, Constraint, sew>; } multiclass VPseudoVSLD1_VF { @@ -2520,7 +2469,7 @@ defvar WriteVRGatherVV_MX_E = !cast("WriteVRGatherVV_" # mx # "_E" # e); defvar ReadVRGatherVV_data_MX_E = !cast("ReadVRGatherVV_data_" # mx # "_E" # e); defvar ReadVRGatherVV_index_MX_E = !cast("ReadVRGatherVV_index_" # mx # "_E" # e); - defm "" : VPseudoBinaryV_VV_E, + defm "" : VPseudoBinaryV_VV, Sched<[WriteVRGatherVV_MX_E, ReadVRGatherVV_data_MX_E, ReadVRGatherVV_index_MX_E, ReadVMask]>; } @@ -2685,9 +2634,9 @@ defvar ReadVIDivV_MX_E = !cast("ReadVIDivV_" # mx # "_E" # e); defvar ReadVIDivX_MX_E = !cast("ReadVIDivX_" # mx # "_E" # e); - defm "" : VPseudoBinaryV_VV_E, + defm "" : VPseudoBinaryV_VV, Sched<[WriteVIDivV_MX_E, ReadVIDivV_MX_E, ReadVIDivV_MX_E, ReadVMask]>; - defm "" : VPseudoBinaryV_VX_E, + defm "" : VPseudoBinaryV_VX, Sched<[WriteVIDivX_MX_E, ReadVIDivV_MX_E, ReadVIDivX_MX_E, ReadVMask]>; } } @@ -2724,7 +2673,7 @@ defvar WriteVFDivV_MX_E = !cast("WriteVFDivV_" # mx # "_E" # e); defvar ReadVFDivV_MX_E = !cast("ReadVFDivV_" # mx # "_E" # e); - defm "" : VPseudoBinaryFV_VV_E, + defm "" : VPseudoBinaryFV_VV, Sched<[WriteVFDivV_MX_E, ReadVFDivV_MX_E, ReadVFDivV_MX_E, ReadVMask]>; } } @@ -2738,7 +2687,7 @@ defvar ReadVFDivV_MX_E = !cast("ReadVFDivV_" # mx # "_E" # e); defvar ReadVFDivF_MX_E = !cast("ReadVFDivF_" # mx # "_E" # e); - defm "" : VPseudoBinaryV_VF_E, + defm "" : VPseudoBinaryV_VF, Sched<[WriteVFDivF_MX_E, ReadVFDivV_MX_E, ReadVFDivF_MX_E, ReadVMask]>; } } @@ -2755,7 +2704,7 @@ defvar ReadVFDivV_MX_E = !cast("ReadVFDivV_" # mx # "_E" # e); defvar ReadVFDivF_MX_E = !cast("ReadVFDivF_" # mx # "_E" # e); - defm "" : VPseudoBinaryV_VF_E, + defm "" : VPseudoBinaryV_VF, Sched<[WriteVFDivF_MX_E, ReadVFDivV_MX_E, ReadVFDivF_MX_E, ReadVMask]>; } } @@ -3182,7 +3131,7 @@ } } -multiclass VPseudoTernaryWithTailPolicy_E.val in { defvar WriteVIRedV_From_MX_E = !cast("WriteVIRedV_From_" # mx # "_E" # e); - defm _VS : VPseudoTernaryWithTailPolicy_E, + defm _VS : VPseudoTernaryWithTailPolicy, Sched<[WriteVIRedV_From_MX_E, ReadVIRedV, ReadVIRedV, ReadVIRedV, ReadVMask]>; } @@ -3477,7 +3426,7 @@ defvar mx = m.MX; foreach e = SchedSEWSet.val in { defvar WriteVIWRedV_From_MX_E = !cast("WriteVIWRedV_From_" # mx # "_E" # e); - defm _VS : VPseudoTernaryWithTailPolicy_E, + defm _VS : VPseudoTernaryWithTailPolicy, Sched<[WriteVIWRedV_From_MX_E, ReadVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVMask]>; } @@ -3489,7 +3438,7 @@ defvar mx = m.MX; foreach e = SchedSEWSet.val in { defvar WriteVFRedV_From_MX_E = !cast("WriteVFRedV_From_" # mx # "_E" # e); - defm _VS : VPseudoTernaryWithTailPolicy_E, + defm _VS : VPseudoTernaryWithTailPolicy, Sched<[WriteVFRedV_From_MX_E, ReadVFRedV, ReadVFRedV, ReadVFRedV, ReadVMask]>; } @@ -3501,7 +3450,7 @@ defvar mx = m.MX; foreach e = SchedSEWSet.val in { defvar WriteVFRedOV_From_MX_E = !cast("WriteVFRedOV_From_" # mx # "_E" # e); - defm _VS : VPseudoTernaryWithTailPolicy_E, + defm _VS : VPseudoTernaryWithTailPolicy, Sched<[WriteVFRedOV_From_MX_E, ReadVFRedOV, ReadVFRedOV, ReadVFRedOV, ReadVMask]>; } @@ -3513,7 +3462,7 @@ defvar mx = m.MX; foreach e = SchedSEWSet.val in { defvar WriteVFWRedV_From_MX_E = !cast("WriteVFWRedV_From_" # mx # "_E" # e); - defm _VS : VPseudoTernaryWithTailPolicy_E, + defm _VS : VPseudoTernaryWithTailPolicy, Sched<[WriteVFWRedV_From_MX_E, ReadVFWRedV, ReadVFWRedV, ReadVFWRedV, ReadVMask]>; } @@ -3903,31 +3852,18 @@ string kind, ValueType result_type, ValueType op2_type, - int sew, + int log2sew, LMULInfo vlmul, - VReg op2_reg_class> : + VReg op2_reg_class, + bit isSEWAware = 0> : Pat<(result_type (!cast(intrinsic_name) (result_type undef), (op2_type op2_reg_class:$rs2), VLOpFrag)), - (!cast(inst#"_"#kind#"_"#vlmul.MX) - (op2_type op2_reg_class:$rs2), - GPR:$vl, sew)>; - -class VPatUnaryNoMask_E : - Pat<(result_type (!cast(intrinsic_name) - (result_type undef), - (op2_type op2_reg_class:$rs2), - VLOpFrag)), - (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#sew) + (!cast( + !if(isSEWAware, + inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew), + inst#"_"#kind#"_"#vlmul.MX)) (op2_type op2_reg_class:$rs2), GPR:$vl, log2sew)>; @@ -3936,34 +3872,19 @@ string kind, ValueType result_type, ValueType op2_type, - int sew, + int log2sew, LMULInfo vlmul, VReg result_reg_class, - VReg op2_reg_class> : - Pat<(result_type (!cast(intrinsic_name) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - VLOpFrag)), - (!cast(inst#"_"#kind#"_"#vlmul.MX#"_TU") - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - GPR:$vl, sew)>; - -class VPatUnaryNoMaskTU_E : + VReg op2_reg_class, + bit isSEWAware = 0> : Pat<(result_type (!cast(intrinsic_name) (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), VLOpFrag)), - (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#sew#"_TU") + (!cast( + !if(isSEWAware, + inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_TU", + inst#"_"#kind#"_"#vlmul.MX#"_TU")) (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), GPR:$vl, log2sew)>; @@ -3994,37 +3915,20 @@ ValueType result_type, ValueType op2_type, ValueType mask_type, - int sew, + int log2sew, LMULInfo vlmul, VReg result_reg_class, - VReg op2_reg_class> : - Pat<(result_type (!cast(intrinsic_name#"_mask") - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - (mask_type V0), - VLOpFrag, (XLenVT timm:$policy))), - (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>; - -class VPatUnaryMaskTA_E : + VReg op2_reg_class, + bit isSEWAware = 0> : Pat<(result_type (!cast(intrinsic_name#"_mask") (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), (mask_type V0), VLOpFrag, (XLenVT timm:$policy))), - (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#sew#"_MASK") + (!cast( + !if(isSEWAware, + inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", + inst#"_"#kind#"_"#vlmul.MX#"_MASK")) (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>; @@ -4058,7 +3962,7 @@ ValueType result_type, ValueType op1_type, ValueType mask_type, - int sew, + int log2sew, LMULInfo vlmul, VReg result_reg_class, VReg op1_reg_class> : @@ -4067,29 +3971,7 @@ (op1_type op1_reg_class:$rs1), (mask_type VR:$rs2), VLOpFrag)), - (!cast(inst#"_"#kind#"_"#vlmul.MX) - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (mask_type VR:$rs2), - GPR:$vl, sew)>; - -class VPatUnaryAnyMask_E : - Pat<(result_type (!cast(intrinsic) - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (mask_type VR:$rs2), - VLOpFrag)), - (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#sew) + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (mask_type VR:$rs2), @@ -4310,24 +4192,23 @@ op2_kind:$rs2, GPR:$vl, sew)>; -class VPatTernaryNoMaskTA_E : +class VPatTernaryNoMaskTA : Pat<(result_type (!cast(intrinsic) (result_type result_reg_class:$rs3), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), VLOpFrag)), - (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#sew) + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) result_reg_class:$rs3, (op1_type op1_reg_class:$rs1), op2_kind:$rs2, @@ -4405,26 +4286,25 @@ (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>; -class VPatTernaryMaskTA_E : +class VPatTernaryMaskTA : Pat<(result_type (!cast(intrinsic#"_mask") (result_type result_reg_class:$rs3), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (mask_type V0), VLOpFrag)), - (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#sew# "_MASK") + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)# "_MASK") result_reg_class:$rs3, (op1_type op1_reg_class:$rs1), op2_kind:$rs2, @@ -4446,14 +4326,13 @@ } } -multiclass VPatUnaryV_V_AnyMask_E vtilist> { +multiclass VPatUnaryV_V_AnyMask vtilist> { foreach vti = vtilist in { let Predicates = GetVTypePredicates.Predicates in - def : VPatUnaryAnyMask_E; + vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>; } } @@ -4503,37 +4382,18 @@ } multiclass VPatUnaryV_V vtilist> { + list vtilist, bit isSEWAware = 0> { foreach vti = vtilist in { let Predicates = GetVTypePredicates.Predicates in { def : VPatUnaryNoMask; + vti.Vector, vti.Vector, vti.Log2SEW, + vti.LMul, vti.RegClass, isSEWAware>; def : VPatUnaryNoMaskTU; + vti.Vector, vti.Vector, vti.Log2SEW, + vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>; def : VPatUnaryMaskTA; - } - } -} - -multiclass VPatUnaryV_V_E vtilist> { - foreach vti = vtilist in { - let Predicates = GetVTypePredicates.Predicates in { - def : VPatUnaryNoMask_E; - def : VPatUnaryNoMaskTU_E; - def : VPatUnaryMaskTA_E; + vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, + vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>; } } } @@ -4726,28 +4586,20 @@ } multiclass VPatBinaryV_VV vtilist> { - foreach vti = vtilist in - let Predicates = GetVTypePredicates.Predicates in - defm : VPatBinaryTA; -} - -multiclass VPatBinaryV_VV_E vtilist> { + list vtilist, bit isSEWAware = 0> { foreach vti = vtilist in let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryTA; } -multiclass VPatBinaryV_VV_INT_E vtilist> { +multiclass VPatBinaryV_VV_INT vtilist> { foreach vti = vtilist in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = GetVTypePredicates.Predicates in @@ -4759,7 +4611,7 @@ } } -multiclass VPatBinaryV_VV_INT_E_EEW vtilist> { foreach vti = vtilist in { // emul = lmul * eew / sew @@ -4781,24 +4633,14 @@ } multiclass VPatBinaryV_VX vtilist> { - foreach vti = vtilist in { - defvar kind = "V"#vti.ScalarSuffix; - let Predicates = GetVTypePredicates.Predicates in - defm : VPatBinaryTA; - } -} - -multiclass VPatBinaryV_VX_E vtilist> { + list vtilist, bit isSEWAware = 0> { foreach vti = vtilist in { defvar kind = "V"#vti.ScalarSuffix; let Predicates = GetVTypePredicates.Predicates in defm : VPatBinaryTA; @@ -5097,14 +4939,9 @@ VPatBinaryV_VI; multiclass VPatBinaryV_VV_VX vtilist> - : VPatBinaryV_VV, - VPatBinaryV_VX; - -multiclass VPatBinaryV_VV_VX_E vtilist> - : VPatBinaryV_VV_E, - VPatBinaryV_VX_E; + list vtilist, bit isSEWAware = 0> + : VPatBinaryV_VV, + VPatBinaryV_VX; multiclass VPatBinaryV_VX_VI vtilist> @@ -5214,25 +5051,24 @@ op2_kind>; } -multiclass VPatTernaryTA_E { - def : VPatTernaryNoMaskTA_E; - def : VPatTernaryMaskTA_E; +multiclass VPatTernaryTA { + def : VPatTernaryNoMaskTA; + def : VPatTernaryMaskTA; } multiclass VPatTernaryV_VV_AAXA vtilist, Operand ImmType = simm5> - : VPatBinaryV_VV_INT_E, + : VPatBinaryV_VV_INT, VPatBinaryV_VX_INT, VPatBinaryV_VI; @@ -5348,20 +5184,20 @@ { defvar vectorM1 = !cast(!if(IsFloat, "VF", "VI") # vti.SEW # "M1"); let Predicates = GetVTypePredicates.Predicates in - defm : VPatTernaryTA_E; + defm : VPatTernaryTA; } foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in { let Predicates = GetVTypePredicates.Predicates in - defm : VPatTernaryTA_E; + defm : VPatTernaryTA; } } @@ -5372,12 +5208,12 @@ if !le(wtiSEW, 64) then { defvar wtiM1 = !cast(!if(IsFloat, "VF", "VI") # wtiSEW # "M1"); let Predicates = GetVTypePredicates.Predicates in - defm : VPatTernaryTA_E; + defm : VPatTernaryTA; } } } @@ -6450,10 +6286,10 @@ //===----------------------------------------------------------------------===// // 11.11. Vector Integer Divide Instructions //===----------------------------------------------------------------------===// -defm : VPatBinaryV_VV_VX_E<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors>; -defm : VPatBinaryV_VV_VX_E<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors>; -defm : VPatBinaryV_VV_VX_E<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors>; -defm : VPatBinaryV_VV_VX_E<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors>; +defm : VPatBinaryV_VV_VX<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors, /*isSEWAware*/ 1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors, /*isSEWAware*/ 1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors, /*isSEWAware*/ 1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors, /*isSEWAware*/ 1>; //===----------------------------------------------------------------------===// // 11.12. Vector Widening Integer Multiply Instructions @@ -6568,8 +6404,8 @@ // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions //===----------------------------------------------------------------------===// defm : VPatBinaryV_VV_VX<"int_riscv_vfmul", "PseudoVFMUL", AllFloatVectors>; -defm : VPatBinaryV_VV_VX_E<"int_riscv_vfdiv", "PseudoVFDIV", AllFloatVectors>; -defm : VPatBinaryV_VX_E<"int_riscv_vfrdiv", "PseudoVFRDIV", AllFloatVectors>; +defm : VPatBinaryV_VV_VX<"int_riscv_vfdiv", "PseudoVFDIV", AllFloatVectors, /*isSEWAware*/ 1>; +defm : VPatBinaryV_VX<"int_riscv_vfrdiv", "PseudoVFRDIV", AllFloatVectors, /*isSEWAware*/ 1>; //===----------------------------------------------------------------------===// // 13.5. Vector Widening Floating-Point Multiply @@ -6599,7 +6435,7 @@ //===----------------------------------------------------------------------===// // 13.8. Vector Floating-Point Square-Root Instruction //===----------------------------------------------------------------------===// -defm : VPatUnaryV_V_E<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors>; +defm : VPatUnaryV_V<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors, /*isSEWAware*/ 1>; //===----------------------------------------------------------------------===// // 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction @@ -6851,22 +6687,22 @@ //===----------------------------------------------------------------------===// defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", AllIntegerVectors, uimm5>; -defm : VPatBinaryV_VV_INT_E_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", - /* eew */ 16, AllIntegerVectors>; +defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", + /* eew */ 16, AllIntegerVectors>; defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", AllFloatVectors, uimm5>; -defm : VPatBinaryV_VV_INT_E_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", - /* eew */ 16, AllFloatVectors>; +defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", + /* eew */ 16, AllFloatVectors>; //===----------------------------------------------------------------------===// // 16.5. Vector Compress Instruction //===----------------------------------------------------------------------===// -defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; -defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; -defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; -defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; -defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; -defm : VPatUnaryV_V_AnyMask_E<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; +defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; +defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; +defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; +defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; +defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; +defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; // Include the non-intrinsic ISel patterns include "RISCVInstrInfoVVLPatterns.td" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -76,31 +76,18 @@ string instruction_name, ValueType result_type, ValueType op_type, - int sew, + int log2sew, LMULInfo vlmul, OutPatFrag avl, - VReg op_reg_class> : + VReg op_reg_class, + bit isSEWAware = 0> : Pat<(result_type (vop (op_type op_reg_class:$rs1), (op_type op_reg_class:$rs2))), - (!cast(instruction_name#"_VV_"# vlmul.MX) - op_reg_class:$rs1, - op_reg_class:$rs2, - avl, sew)>; - -class VPatBinarySDNode_VV_E : - Pat<(result_type (vop - (op_type op_reg_class:$rs1), - (op_type op_reg_class:$rs2))), - (!cast(instruction_name#"_VV_"# vlmul.MX#"_E"#sew) + (!cast( + !if(isSEWAware, + instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), + instruction_name#"_VV_"# vlmul.MX)) op_reg_class:$rs1, op_reg_class:$rs2, avl, log2sew)>; @@ -110,66 +97,36 @@ string suffix, ValueType result_type, ValueType vop_type, - int sew, + int log2sew, LMULInfo vlmul, OutPatFrag avl, VReg vop_reg_class, ComplexPattern SplatPatKind, - DAGOperand xop_kind> : - Pat<(result_type (vop - (vop_type vop_reg_class:$rs1), - (vop_type (SplatPatKind xop_kind:$rs2)))), - (!cast(instruction_name#_#suffix#_# vlmul.MX) - vop_reg_class:$rs1, - xop_kind:$rs2, - avl, sew)>; - -class VPatBinarySDNode_XI_E : + DAGOperand xop_kind, + bit isSEWAware = 0> : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (SplatPatKind xop_kind:$rs2)))), - (!cast(instruction_name#_#suffix#_# vlmul.MX#"_E"#sew) + (!cast( + !if(isSEWAware, + instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), + instruction_name#_#suffix#_# vlmul.MX)) vop_reg_class:$rs1, xop_kind:$rs2, avl, log2sew)>; multiclass VPatBinarySDNode_VV_VX vtilist = AllIntegerVectors> { + list vtilist = AllIntegerVectors, + bit isSEWAware = 0> { foreach vti = vtilist in { let Predicates = GetVTypePredicates.Predicates in { def : VPatBinarySDNode_VV; + vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; def : VPatBinarySDNode_XI; - } - } -} - -multiclass VPatBinarySDNode_VV_VX_E { - foreach vti = AllIntegerVectors in { - let Predicates = GetVTypePredicates.Predicates in { - def : VPatBinarySDNode_VV_E; - def : VPatBinarySDNode_XI_E; + SplatPat, GPR, isSEWAware>; } } } @@ -192,83 +149,47 @@ ValueType result_type, ValueType vop_type, ValueType xop_type, - int sew, + int log2sew, LMULInfo vlmul, OutPatFrag avl, VReg vop_reg_class, - DAGOperand xop_kind> : - Pat<(result_type (vop (vop_type vop_reg_class:$rs1), - (vop_type (SplatFPOp xop_kind:$rs2)))), - (!cast(instruction_name#"_"#vlmul.MX) - vop_reg_class:$rs1, - (xop_type xop_kind:$rs2), - avl, sew)>; - -class VPatBinarySDNode_VF_E : + DAGOperand xop_kind, + bit isSEWAware = 0> : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (SplatFPOp xop_kind:$rs2)))), - (!cast(instruction_name#"_"#vlmul.MX#"_E"#sew) + (!cast( + !if(isSEWAware, + instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), + instruction_name#"_"#vlmul.MX)) vop_reg_class:$rs1, (xop_type xop_kind:$rs2), avl, log2sew)>; -multiclass VPatBinaryFPSDNode_VV_VF { +multiclass VPatBinaryFPSDNode_VV_VF { foreach vti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { def : VPatBinarySDNode_VV; + vti.LMul, vti.AVL, vti.RegClass, isSEWAware>; def : VPatBinarySDNode_VF; - } - } -} - -multiclass VPatBinaryFPSDNode_VV_VF_E { - foreach vti = AllFloatVectors in { - let Predicates = GetVTypePredicates.Predicates in { - def : VPatBinarySDNode_VV_E; - def : VPatBinarySDNode_VF_E; + vti.ScalarRegClass, isSEWAware>; } } } -multiclass VPatBinaryFPSDNode_R_VF { - foreach fvti = AllFloatVectors in - let Predicates = GetVTypePredicates.Predicates in - def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), - (fvti.Vector fvti.RegClass:$rs1))), - (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) - fvti.RegClass:$rs1, - (fvti.Scalar fvti.ScalarRegClass:$rs2), - fvti.AVL, fvti.Log2SEW)>; -} - -multiclass VPatBinaryFPSDNode_R_VF_E { +multiclass VPatBinaryFPSDNode_R_VF { foreach fvti = AllFloatVectors in let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), (fvti.Vector fvti.RegClass:$rs1))), - (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW) + (!cast( + !if(isSEWAware, + instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, + instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) fvti.RegClass:$rs1, (fvti.Scalar fvti.ScalarRegClass:$rs2), fvti.AVL, fvti.Log2SEW)>; @@ -895,10 +816,10 @@ } // 11.11. Vector Integer Divide Instructions -defm : VPatBinarySDNode_VV_VX_E; -defm : VPatBinarySDNode_VV_VX_E; -defm : VPatBinarySDNode_VV_VX_E; -defm : VPatBinarySDNode_VV_VX_E; +defm : VPatBinarySDNode_VV_VX; +defm : VPatBinarySDNode_VV_VX; +defm : VPatBinarySDNode_VV_VX; +defm : VPatBinarySDNode_VV_VX; // 11.12. Vector Widening Integer Multiply Instructions defm : VPatWidenBinarySDNode_VV_VX; -defm : VPatBinaryFPSDNode_VV_VF_E; -defm : VPatBinaryFPSDNode_R_VF_E; +defm : VPatBinaryFPSDNode_VV_VF; +defm : VPatBinaryFPSDNode_R_VF; // 13.5. Vector Widening Floating-Point Multiply Instructions defm : VPatWidenBinaryFPSDNode_VV_VF; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -563,43 +563,22 @@ ValueType op1_type, ValueType op2_type, ValueType mask_type, - int sew, + int log2sew, LMULInfo vlmul, VReg result_reg_class, VReg op1_reg_class, - VReg op2_reg_class> + VReg op2_reg_class, + bit isSEWAware = 0> : Pat<(result_type (vop (op1_type op1_reg_class:$rs1), (op2_type op2_reg_class:$rs2), (result_type result_reg_class:$merge), (mask_type V0), VLOpFrag)), - (!cast(instruction_name#"_"#suffix#"_"# vlmul.MX#"_MASK") - result_reg_class:$merge, - op1_reg_class:$rs1, - op2_reg_class:$rs2, - (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>; - -class VPatBinaryVL_V_E - : Pat<(result_type (vop - (op1_type op1_reg_class:$rs1), - (op2_type op2_reg_class:$rs2), - (result_type result_reg_class:$merge), - (mask_type V0), - VLOpFrag)), - (!cast(instruction_name#"_"#suffix#"_"# vlmul.MX#"_E"# sew#"_MASK") + (!cast( + !if(isSEWAware, + instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", + instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) result_reg_class:$merge, op1_reg_class:$rs1, op2_reg_class:$rs2, @@ -646,78 +625,41 @@ ValueType vop1_type, ValueType vop2_type, ValueType mask_type, - int sew, + int log2sew, LMULInfo vlmul, VReg result_reg_class, VReg vop_reg_class, ComplexPattern SplatPatKind, - DAGOperand xop_kind> - : Pat<(result_type (vop - (vop1_type vop_reg_class:$rs1), - (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), - (result_type result_reg_class:$merge), - (mask_type V0), - VLOpFrag)), - (!cast(instruction_name#_#suffix#_# vlmul.MX#"_MASK") - result_reg_class:$merge, - vop_reg_class:$rs1, - xop_kind:$rs2, - (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>; - -class VPatBinaryVL_XI_E + DAGOperand xop_kind, + bit isSEWAware = 0> : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), (result_type result_reg_class:$merge), (mask_type V0), VLOpFrag)), - (!cast(instruction_name#_#suffix#_# vlmul.MX#"_E"# sew#"_MASK") + (!cast( + !if(isSEWAware, + instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", + instruction_name#_#suffix#_#vlmul.MX#"_MASK")) result_reg_class:$merge, vop_reg_class:$rs1, xop_kind:$rs2, (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; multiclass VPatBinaryVL_VV_VX vtilist = AllIntegerVectors> { + list vtilist = AllIntegerVectors, + bit isSEWAware = 0> { foreach vti = vtilist in { let Predicates = GetVTypePredicates.Predicates in { def : VPatBinaryVL_V; + vti.RegClass, isSEWAware>; def : VPatBinaryVL_XI; - } - } -} - -multiclass VPatBinaryVL_VV_VX_E { - foreach vti = AllIntegerVectors in { - let Predicates = GetVTypePredicates.Predicates in { - def : VPatBinaryVL_V_E; - def : VPatBinaryVL_XI_E; + SplatPat, GPR, isSEWAware>; } } } @@ -805,92 +747,44 @@ ValueType vop1_type, ValueType vop2_type, ValueType mask_type, - int sew, + int log2sew, LMULInfo vlmul, VReg result_reg_class, VReg vop_reg_class, - RegisterClass scalar_reg_class> + RegisterClass scalar_reg_class, + bit isSEWAware = 0> : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), (vop2_type (SplatFPOp scalar_reg_class:$rs2)), (result_type result_reg_class:$merge), (mask_type V0), VLOpFrag)), - (!cast(instruction_name#"_"#vlmul.MX#"_MASK") - result_reg_class:$merge, - vop_reg_class:$rs1, - scalar_reg_class:$rs2, - (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>; - -class VPatBinaryVL_VF_E - : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), - (vop_type (SplatFPOp scalar_reg_class:$rs2)), - (result_type result_reg_class:$merge), - (mask_type V0), - VLOpFrag)), - (!cast(instruction_name#"_"#vlmul.MX#"_E"#sew#"_MASK") + (!cast( + !if(isSEWAware, + instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", + instruction_name#"_"#vlmul.MX#"_MASK")) result_reg_class:$merge, vop_reg_class:$rs1, scalar_reg_class:$rs2, (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; -multiclass VPatBinaryFPVL_VV_VF { +multiclass VPatBinaryFPVL_VV_VF { foreach vti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { def : VPatBinaryVL_V; + vti.RegClass, isSEWAware>; def : VPatBinaryVL_VF; - } - } -} - -multiclass VPatBinaryFPVL_VV_VF_E { - foreach vti = AllFloatVectors in { - let Predicates = GetVTypePredicates.Predicates in { - def : VPatBinaryVL_V_E; - def : VPatBinaryVL_VF_E; + vti.ScalarRegClass, isSEWAware>; } } } -multiclass VPatBinaryFPVL_R_VF { - foreach fvti = AllFloatVectors in { - let Predicates = GetVTypePredicates.Predicates in - def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), - fvti.RegClass:$rs1, - (fvti.Vector fvti.RegClass:$merge), - (fvti.Mask V0), - VLOpFrag)), - (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") - fvti.RegClass:$merge, - fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, - (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; - } -} - -multiclass VPatBinaryFPVL_R_VF_E { +multiclass VPatBinaryFPVL_R_VF { foreach fvti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), @@ -898,7 +792,10 @@ (fvti.Vector fvti.RegClass:$merge), (fvti.Mask V0), VLOpFrag)), - (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") + (!cast( + !if(isSEWAware, + instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", + instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) fvti.RegClass:$merge, fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; @@ -1781,10 +1678,10 @@ } // 11.11. Vector Integer Divide Instructions -defm : VPatBinaryVL_VV_VX_E; -defm : VPatBinaryVL_VV_VX_E; -defm : VPatBinaryVL_VV_VX_E; -defm : VPatBinaryVL_VV_VX_E; +defm : VPatBinaryVL_VV_VX; +defm : VPatBinaryVL_VV_VX; +defm : VPatBinaryVL_VV_VX; +defm : VPatBinaryVL_VV_VX; // 11.12. Vector Widening Integer Multiply Instructions defm : VPatBinaryWVL_VV_VX; @@ -1912,8 +1809,8 @@ // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions defm : VPatBinaryFPVL_VV_VF; -defm : VPatBinaryFPVL_VV_VF_E; -defm : VPatBinaryFPVL_R_VF_E; +defm : VPatBinaryFPVL_VV_VF; +defm : VPatBinaryFPVL_R_VF; // 13.5. Vector Widening Floating-Point Multiply Instructions defm : VPatBinaryFPWVL_VV_VF;