diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -528,6 +528,14 @@ Instruction BaseInstr = !cast(PseudoToVInst.VInst); // SEW = 0 is used to denote that the Pseudo is not SEW specific (or unknown). bits<8> SEW = 0; + // TargetOverlapConstraintType indicates that these instructions can + // overlap between source operands and destination operands. + // 1 -> default value, remain current constraint + // 2 -> narrow case + // 3 -> widen case + // TODO: Add TargetOverlapConstraintType into PseudosTable for further + // query. + bits<2> TargetOverlapConstraintType = 1; } // The actual table. @@ -855,7 +863,7 @@ } class VPseudoILoadNoMask LMUL, - bit Ordered, bit EarlyClobber>: + bit Ordered, bit EarlyClobber, int TargetConstraintType = 1>: Pseudo<(outs RetClass:$rd), (ins RetClass:$dest, GPRMem:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, @@ -868,10 +876,11 @@ let HasSEWOp = 1; let HasVecPolicyOp = 1; let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $dest", "$rd = $dest"); + let TargetOverlapConstraintType = TargetConstraintType; } class VPseudoILoadMask LMUL, - bit Ordered, bit EarlyClobber>: + bit Ordered, bit EarlyClobber, int TargetConstraintType = 1>: Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, IdxClass:$rs2, @@ -882,6 +891,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $merge", "$rd = $merge"); + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; @@ -979,7 +989,7 @@ } class VPseudoUnaryNoMask : + string Constraint = "", int TargetConstraintType = 1> : Pseudo<(outs RetClass:$rd), (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, @@ -988,13 +998,14 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; } class VPseudoUnaryNoMaskRoundingMode : + string Constraint = "", int TargetConstraintType = 1> : Pseudo<(outs RetClass:$rd), (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, @@ -1003,6 +1014,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; @@ -1010,7 +1022,7 @@ let UsesVXRM = 0; } -class VPseudoUnaryMask : +class VPseudoUnaryMask : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, OpClass:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, @@ -1019,6 +1031,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; @@ -1129,7 +1142,8 @@ class VPseudoBinaryNoMask : + string Constraint, + int TargetConstraintType = 1> : Pseudo<(outs RetClass:$rd), (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, RISCVVPseudo { @@ -1137,6 +1151,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = Constraint; + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; } @@ -1144,7 +1159,8 @@ class VPseudoBinaryNoMaskTU : + string Constraint, + int TargetConstraintType = 1> : Pseudo<(outs RetClass:$rd), (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, @@ -1153,6 +1169,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; @@ -1162,7 +1179,8 @@ VReg Op1Class, DAGOperand Op2Class, string Constraint, - int UsesVXRM_ = 1> : + int UsesVXRM_ = 1, + int TargetConstraintType = 1> : Pseudo<(outs RetClass:$rd), (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, @@ -1170,6 +1188,7 @@ let mayLoad = 0; let mayStore = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; @@ -1181,7 +1200,8 @@ RegisterClass Op1Class, DAGOperand Op2Class, string Constraint, - int UsesVXRM_> : + int UsesVXRM_, + int TargetConstraintType = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, Op1Class:$rs2, Op2Class:$rs1, @@ -1191,6 +1211,7 @@ let mayLoad = 0; let mayStore = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; @@ -1204,7 +1225,8 @@ // This allows maskedoff and rs2 to be the same register. class VPseudoTiedBinaryNoMask : + string Constraint, + int TargetConstraintType = 1> : Pseudo<(outs RetClass:$rd), (ins RetClass:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, @@ -1213,6 +1235,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $rs2"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; @@ -1288,7 +1311,8 @@ class VPseudoBinaryMaskPolicy : + string Constraint, + int TargetConstraintType = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, Op1Class:$rs2, Op2Class:$rs1, @@ -1298,6 +1322,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; @@ -1348,7 +1373,8 @@ class VPseudoBinaryMOutNoMask : + string Constraint, + int TargetConstraintType = 1> : Pseudo<(outs RetClass:$rd), (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, RISCVVPseudo { @@ -1356,6 +1382,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = Constraint; + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; } @@ -1364,7 +1391,8 @@ class VPseudoBinaryMOutMask : + string Constraint, + int TargetConstraintType = 1> : Pseudo<(outs RetClass:$rd), (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, @@ -1374,6 +1402,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let UsesMaskPolicy = 1; @@ -1384,7 +1413,8 @@ // This allows maskedoff and rs2 to be the same register. class VPseudoTiedBinaryMask : + string Constraint, + int TargetConstraintType = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, Op2Class:$rs1, @@ -1394,6 +1424,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 1; @@ -1429,7 +1460,8 @@ DAGOperand Op2Class, LMULInfo MInfo, bit CarryIn, - string Constraint> : + string Constraint, + int TargetConstraintType = 1> : Pseudo<(outs RetClass:$rd), !if(CarryIn, (ins Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl, @@ -1440,6 +1472,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = Constraint; + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let VLMul = MInfo.value; @@ -1450,7 +1483,8 @@ DAGOperand Op2Class, LMULInfo MInfo, bit CarryIn, - string Constraint> : + string Constraint, + int TargetConstraintType = 1> : Pseudo<(outs RetClass:$rd), !if(CarryIn, (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl, @@ -1461,6 +1495,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; let HasSEWOp = 1; let HasVecPolicyOp = 0; @@ -1487,7 +1522,8 @@ class VPseudoTernaryNoMaskWithPolicy : + string Constraint, + int TargetConstraintType = 1> : Pseudo<(outs RetClass:$rd), (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), @@ -1497,6 +1533,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $rs3"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVecPolicyOp = 1; let HasVLOp = 1; let HasSEWOp = 1; @@ -1505,7 +1542,8 @@ class VPseudoTernaryNoMaskWithPolicyRoundingMode : + string Constraint, + int TargetConstraintType = 1> : Pseudo<(outs RetClass:$rd), (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), @@ -1515,6 +1553,7 @@ let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $rs3"], ","); + let TargetOverlapConstraintType = TargetConstraintType; let HasVecPolicyOp = 1; let HasVLOp = 1; let HasSEWOp = 1; @@ -1796,7 +1835,7 @@ } } -multiclass VPseudoILoad { +multiclass VPseudoILoad { foreach idxEEW = EEWList in { foreach dataEEW = EEWList in { foreach dataEMUL = MxSet.m in { @@ -1811,12 +1850,14 @@ defvar Vreg = dataEMUL.vrclass; defvar IdxVreg = idxEMUL.vrclass; defvar HasConstraint = !ne(dataEEW, idxEEW); + defvar NewTypeConstraints = !if(!or(!eq(dataEMULOctuple, dataEMUL.octuple), !eq(IdxLInfo, "MF2")), 1, !if(!ge(dataEMULOctuple, dataEMUL.octuple), 2, 3)); + defvar UseNewTypeConstraints = !if(!eq(TargetConstraintType, 1), 1, NewTypeConstraints); let VLMul = dataEMUL.value in { def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo : - VPseudoILoadNoMask, + VPseudoILoadNoMask, VLXSched; def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : - VPseudoILoadMask, + VPseudoILoadMask, RISCVMaskedPseudo, VLXSched; } @@ -1994,13 +2035,14 @@ DAGOperand Op2Class, LMULInfo MInfo, string Constraint = "", - int sew = 0> { + int sew = 0, + int TargetConstraintType = 1> { let VLMul = MInfo.value, SEW=sew in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); def suffix : VPseudoBinaryNoMaskTU; + Constraint, TargetConstraintType>; def suffix # "_MASK" : VPseudoBinaryMaskPolicy, + Constraint, TargetConstraintType>, RISCVMaskedPseudo; } } @@ -2024,16 +2066,19 @@ LMULInfo MInfo, string Constraint = "", int sew = 0, - int UsesVXRM = 1> { + int UsesVXRM = 1, + int TargetConstraintType = 1> { let VLMul = MInfo.value, SEW=sew in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); def suffix : VPseudoBinaryNoMaskRoundingMode; + Constraint, UsesVXRM, + TargetConstraintType>; def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode, + UsesVXRM, + TargetConstraintType>, RISCVMaskedPseudo; } } @@ -2043,13 +2088,14 @@ VReg Op1Class, DAGOperand Op2Class, LMULInfo MInfo, - string Constraint = ""> { + string Constraint = "", + int TargetConstraintType = 1> { let VLMul = MInfo.value in { def "_" # MInfo.MX : VPseudoBinaryMOutNoMask; + Constraint, TargetConstraintType>; let ForceTailAgnostic = true in def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask, + Op2Class, Constraint, TargetConstraintType>, RISCVMaskedPseudo; } } @@ -2074,12 +2120,13 @@ multiclass VPseudoTiedBinary { + string Constraint = "", + int TargetConstraintType = 1> { let VLMul = MInfo.value in { def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask; + Constraint, TargetConstraintType>; def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask; + Constraint, TargetConstraintType>; } } @@ -2201,19 +2248,20 @@ // * The destination EEW is greater than the source EEW, the source EMUL is // at least 1, and the overlap is in the highest-numbered part of the // destination register group is legal. Otherwise, it is illegal. -multiclass VPseudoBinaryW_VV { +multiclass VPseudoBinaryW_VV { defm _VV : VPseudoBinary; + "@earlyclobber $rd", TargetConstraintType=TargetConstraintType>; } -multiclass VPseudoBinaryW_VV_RM { +multiclass VPseudoBinaryW_VV_RM { defm _VV : VPseudoBinaryRoundingMode; + "@earlyclobber $rd", UsesVXRM=0, + TargetConstraintType=TargetConstraintType>; } -multiclass VPseudoBinaryW_VX { +multiclass VPseudoBinaryW_VX { defm "_VX" : VPseudoBinary; + "@earlyclobber $rd", TargetConstraintType=TargetConstraintType>; } multiclass VPseudoBinaryW_VI { @@ -2227,40 +2275,44 @@ "@earlyclobber $rd">; } -multiclass VPseudoBinaryW_VF_RM { +multiclass VPseudoBinaryW_VF_RM { defm "_V" # f.FX : VPseudoBinaryRoundingMode; + UsesVXRM=0, + TargetConstraintType=TargetConstraintType>; } -multiclass VPseudoBinaryW_WV { +multiclass VPseudoBinaryW_WV { defm _WV : VPseudoBinary; + "@earlyclobber $rd", TargetConstraintType=TargetConstraintType>; defm _WV : VPseudoTiedBinary; + "@earlyclobber $rd", TargetConstraintType>; } -multiclass VPseudoBinaryW_WV_RM { +multiclass VPseudoBinaryW_WV_RM { defm _WV : VPseudoBinaryRoundingMode; + "@earlyclobber $rd", UsesVXRM=0, TargetConstraintType=TargetConstraintType>; defm _WV : VPseudoTiedBinaryRoundingMode; } -multiclass VPseudoBinaryW_WX { - defm "_WX" : VPseudoBinary; +multiclass VPseudoBinaryW_WX { + defm "_WX" : VPseudoBinary; } -multiclass VPseudoBinaryW_WF { +multiclass VPseudoBinaryW_WF { defm "_W" # f.FX : VPseudoBinary; + f.fprclass, m, /*Constraint*/ "", TargetConstraintType=TargetConstraintType>; } -multiclass VPseudoBinaryW_WF_RM { +multiclass VPseudoBinaryW_WF_RM { defm "_W" # f.FX : VPseudoBinaryRoundingMode; + Constraint="", + sew=0, + UsesVXRM=0, + TargetConstraintType=TargetConstraintType>; } // Narrowing instructions like vnsrl/vnsra/vnclip(u) don't need @earlyclobber @@ -2268,9 +2320,9 @@ // exception from the spec. // "The destination EEW is smaller than the source EEW and the overlap is in the // lowest-numbered part of the source register group." -multiclass VPseudoBinaryV_WV { +multiclass VPseudoBinaryV_WV { defm _WV : VPseudoBinary; + !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""), TargetConstraintType=TargetConstraintType>; } multiclass VPseudoBinaryV_WV_RM { @@ -2279,9 +2331,9 @@ "@earlyclobber $rd", "")>; } -multiclass VPseudoBinaryV_WX { +multiclass VPseudoBinaryV_WX { defm _WX : VPseudoBinary; + !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""), TargetConstraintType=TargetConstraintType>; } multiclass VPseudoBinaryV_WX_RM { @@ -2290,9 +2342,9 @@ "@earlyclobber $rd", "")>; } -multiclass VPseudoBinaryV_WI { +multiclass VPseudoBinaryV_WI { defm _WI : VPseudoBinary; + !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""), TargetConstraintType=TargetConstraintType>; } multiclass VPseudoBinaryV_WI_RM { @@ -2305,33 +2357,35 @@ // vector register is v0. // For vadc and vsbc, CarryIn == 1 and CarryOut == 0 multiclass VPseudoBinaryV_VM { + string Constraint = "", int TargetConstraintType = 1> { def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX : VPseudoBinaryCarryIn.R, m.vrclass)), - m.vrclass, m.vrclass, m, CarryIn, Constraint>; + m.vrclass, m.vrclass, m, CarryIn, Constraint, TargetConstraintType>; } -multiclass VPseudoTiedBinaryV_VM { +multiclass VPseudoTiedBinaryV_VM { def "_VVM" # "_" # m.MX: VPseudoTiedBinaryCarryIn.R, - m.vrclass, m.vrclass, m, 1, "">; + m.vrclass, m.vrclass, m, 1, "", + TargetConstraintType>; } multiclass VPseudoBinaryV_XM { + string Constraint = "", int TargetConstraintType = 1> { def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX : VPseudoBinaryCarryIn.R, m.vrclass)), - m.vrclass, GPR, m, CarryIn, Constraint>; + m.vrclass, GPR, m, CarryIn, Constraint, TargetConstraintType>; } -multiclass VPseudoTiedBinaryV_XM { +multiclass VPseudoTiedBinaryV_XM { def "_VXM" # "_" # m.MX: VPseudoTiedBinaryCarryIn.R, - m.vrclass, GPR, m, 1, "">; + m.vrclass, GPR, m, 1, "", + TargetConstraintType>; } multiclass VPseudoVMRG_FM { @@ -2349,12 +2403,12 @@ } multiclass VPseudoBinaryV_IM { + string Constraint = "", int TargetConstraintType = 1> { def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX : VPseudoBinaryCarryIn.R, m.vrclass)), - m.vrclass, simm5, m, CarryIn, Constraint>; + m.vrclass, simm5, m, CarryIn, Constraint, TargetConstraintType>; } multiclass VPseudoTiedBinaryV_IM { @@ -2462,45 +2516,45 @@ } } -multiclass PseudoVEXT_VF2 { +multiclass PseudoVEXT_VF2 { defvar constraints = "@earlyclobber $rd"; foreach m = MxListVF2 in { defvar mx = m.MX; let VLMul = m.value in { - def "_" # mx : VPseudoUnaryNoMask, + def "_" # mx : VPseudoUnaryNoMask, SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>; def "_" # mx # "_MASK" : - VPseudoUnaryMask, + VPseudoUnaryMask, RISCVMaskedPseudo, SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>; } } } -multiclass PseudoVEXT_VF4 { +multiclass PseudoVEXT_VF4 { defvar constraints = "@earlyclobber $rd"; foreach m = MxListVF4 in { defvar mx = m.MX; let VLMul = m.value in { - def "_" # mx : VPseudoUnaryNoMask, + def "_" # mx : VPseudoUnaryNoMask, SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>; def "_" # mx # "_MASK" : - VPseudoUnaryMask, + VPseudoUnaryMask, RISCVMaskedPseudo, SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>; } } } -multiclass PseudoVEXT_VF8 { +multiclass PseudoVEXT_VF8 { defvar constraints = "@earlyclobber $rd"; foreach m = MxListVF8 in { defvar mx = m.MX; let VLMul = m.value in { - def "_" # mx : VPseudoUnaryNoMask, + def "_" # mx : VPseudoUnaryNoMask, SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>; def "_" # mx # "_MASK" : - VPseudoUnaryMask, + VPseudoUnaryMask, RISCVMaskedPseudo, SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>; } @@ -2518,26 +2572,26 @@ // lowest-numbered part of the source register group". // With LMUL<=1 the source and dest occupy a single register so any overlap // is in the lowest-numbered part. -multiclass VPseudoBinaryM_VV { +multiclass VPseudoBinaryM_VV { defm _VV : VPseudoBinaryM; + !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>; } -multiclass VPseudoBinaryM_VX { +multiclass VPseudoBinaryM_VX { defm "_VX" : VPseudoBinaryM; + !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>; } -multiclass VPseudoBinaryM_VF { +multiclass VPseudoBinaryM_VF { defm "_V" # f.FX : VPseudoBinaryM; + !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>; } -multiclass VPseudoBinaryM_VI { +multiclass VPseudoBinaryM_VI { defm _VI : VPseudoBinaryM; + !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>; } multiclass VPseudoVGTR_VV_VX_VI { @@ -2837,13 +2891,13 @@ } } -multiclass VPseudoVWALU_VV_VX { +multiclass VPseudoVWALU_VV_VX { foreach m = MxListW in { defvar mx = m.MX; - defm "" : VPseudoBinaryW_VV, + defm "" : VPseudoBinaryW_VV, SchedBinary<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV", mx, forceMergeOpRead=true>; - defm "" : VPseudoBinaryW_VX, + defm "" : VPseudoBinaryW_VX, SchedBinary<"WriteVIWALUX", "ReadVIWALUV", "ReadVIWALUX", mx, forceMergeOpRead=true>; } @@ -2855,71 +2909,71 @@ } } -multiclass VPseudoVWMUL_VV_VX { +multiclass VPseudoVWMUL_VV_VX { foreach m = MxListW in { defvar mx = m.MX; - defm "" : VPseudoBinaryW_VV, + defm "" : VPseudoBinaryW_VV, SchedBinary<"WriteVIWMulV", "ReadVIWMulV", "ReadVIWMulV", mx, forceMergeOpRead=true>; - defm "" : VPseudoBinaryW_VX, + defm "" : VPseudoBinaryW_VX, SchedBinary<"WriteVIWMulX", "ReadVIWMulV", "ReadVIWMulX", mx, forceMergeOpRead=true>; } } -multiclass VPseudoVWMUL_VV_VF_RM { +multiclass VPseudoVWMUL_VV_VF_RM { foreach m = MxListFW in { - defm "" : VPseudoBinaryW_VV_RM, + defm "" : VPseudoBinaryW_VV_RM, SchedBinary<"WriteVFWMulV", "ReadVFWMulV", "ReadVFWMulV", m.MX, forceMergeOpRead=true>; } foreach f = FPListW in { foreach m = f.MxListFW in { - defm "" : VPseudoBinaryW_VF_RM, + defm "" : VPseudoBinaryW_VF_RM, SchedBinary<"WriteVFWMulF", "ReadVFWMulV", "ReadVFWMulF", m.MX, forceMergeOpRead=true>; } } } -multiclass VPseudoVWALU_WV_WX { +multiclass VPseudoVWALU_WV_WX { foreach m = MxListW in { defvar mx = m.MX; - defm "" : VPseudoBinaryW_WV, + defm "" : VPseudoBinaryW_WV, SchedBinary<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV", mx, forceMergeOpRead=true>; - defm "" : VPseudoBinaryW_WX, + defm "" : VPseudoBinaryW_WX, SchedBinary<"WriteVIWALUX", "ReadVIWALUV", "ReadVIWALUX", mx, forceMergeOpRead=true>; } } -multiclass VPseudoVFWALU_VV_VF_RM { +multiclass VPseudoVFWALU_VV_VF_RM { foreach m = MxListFW in { - defm "" : VPseudoBinaryW_VV_RM, + defm "" : VPseudoBinaryW_VV_RM, SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX, forceMergeOpRead=true>; } foreach f = FPListW in { foreach m = f.MxListFW in { - defm "" : VPseudoBinaryW_VF_RM, + defm "" : VPseudoBinaryW_VF_RM, SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX, forceMergeOpRead=true>; } } } -multiclass VPseudoVFWALU_WV_WF_RM { +multiclass VPseudoVFWALU_WV_WF_RM { foreach m = MxListFW in { - defm "" : VPseudoBinaryW_WV_RM, + defm "" : VPseudoBinaryW_WV_RM, SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX, forceMergeOpRead=true>; } foreach f = FPListW in { foreach m = f.MxListFW in { - defm "" : VPseudoBinaryW_WF_RM, + defm "" : VPseudoBinaryW_WF_RM, SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX, forceMergeOpRead=true>; } @@ -2962,52 +3016,52 @@ } } -multiclass VPseudoVCALU_VM_XM { +multiclass VPseudoVCALU_VM_XM { foreach m = MxList in { defvar mx = m.MX; - defm "" : VPseudoTiedBinaryV_VM, + defm "" : VPseudoTiedBinaryV_VM, SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx, forceMergeOpRead=true>; - defm "" : VPseudoTiedBinaryV_XM, + defm "" : VPseudoTiedBinaryV_XM, SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx, forceMergeOpRead=true>; } } -multiclass VPseudoVCALUM_VM_XM_IM { +multiclass VPseudoVCALUM_VM_XM_IM { foreach m = MxList in { defvar mx = m.MX; - defm "" : VPseudoBinaryV_VM, + defm "" : VPseudoBinaryV_VM, SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx, forceMasked=1, forceMergeOpRead=true>; - defm "" : VPseudoBinaryV_XM, + defm "" : VPseudoBinaryV_XM, SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx, forceMasked=1, forceMergeOpRead=true>; - defm "" : VPseudoBinaryV_IM, + defm "" : VPseudoBinaryV_IM, SchedUnary<"WriteVICALUI", "ReadVICALUV", mx, forceMasked=1, forceMergeOpRead=true>; } } -multiclass VPseudoVCALUM_VM_XM { +multiclass VPseudoVCALUM_VM_XM { foreach m = MxList in { defvar mx = m.MX; - defm "" : VPseudoBinaryV_VM, + defm "" : VPseudoBinaryV_VM, SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx, forceMasked=1, forceMergeOpRead=true>; - defm "" : VPseudoBinaryV_XM, + defm "" : VPseudoBinaryV_XM, SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx, forceMasked=1, forceMergeOpRead=true>; } } -multiclass VPseudoVCALUM_V_X_I { +multiclass VPseudoVCALUM_V_X_I { foreach m = MxList in { defvar mx = m.MX; - defm "" : VPseudoBinaryV_VM, + defm "" : VPseudoBinaryV_VM, SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx, forceMergeOpRead=true>; - defm "" : VPseudoBinaryV_XM, + defm "" : VPseudoBinaryV_XM, SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx, forceMergeOpRead=true>; defm "" : VPseudoBinaryV_IM, @@ -3016,13 +3070,13 @@ } } -multiclass VPseudoVCALUM_V_X { +multiclass VPseudoVCALUM_V_X { foreach m = MxList in { defvar mx = m.MX; - defm "" : VPseudoBinaryV_VM, + defm "" : VPseudoBinaryV_VM, SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx, forceMergeOpRead=true>; - defm "" : VPseudoBinaryV_XM, + defm "" : VPseudoBinaryV_XM, SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx, forceMergeOpRead=true>; } @@ -3043,16 +3097,16 @@ } } -multiclass VPseudoVNSHT_WV_WX_WI { +multiclass VPseudoVNSHT_WV_WX_WI { foreach m = MxListW in { defvar mx = m.MX; - defm "" : VPseudoBinaryV_WV, + defm "" : VPseudoBinaryV_WV, SchedBinary<"WriteVNShiftV", "ReadVNShiftV", "ReadVNShiftV", mx, forceMergeOpRead=true>; - defm "" : VPseudoBinaryV_WX, + defm "" : VPseudoBinaryV_WX, SchedBinary<"WriteVNShiftX", "ReadVNShiftV", "ReadVNShiftX", mx, forceMergeOpRead=true>; - defm "" : VPseudoBinaryV_WI, + defm "" : VPseudoBinaryV_WI, SchedUnary<"WriteVNShiftI", "ReadVNShiftV", mx, forceMergeOpRead=true>; } @@ -3097,11 +3151,12 @@ DAGOperand Op2Class, LMULInfo MInfo, string Constraint = "", - bit Commutable = 0> { + bit Commutable = 0, + int TargetConstraintType = 1> { let VLMul = MInfo.value in { let isCommutable = Commutable in - def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy; - def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy, + def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy, RISCVMaskedPseudo; } } @@ -3111,16 +3166,19 @@ DAGOperand Op2Class, LMULInfo MInfo, string Constraint = "", - bit Commutable = 0> { + bit Commutable = 0, + int TargetConstraintType = 1> { let VLMul = MInfo.value in { let isCommutable = Commutable in def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicyRoundingMode; + Op2Class, Constraint, + TargetConstraintType>; def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode, + UsesVXRM_=0, + TargetConstraintType=TargetConstraintType>, RISCVMaskedPseudo; } } @@ -3152,34 +3210,37 @@ Commutable=1>; } -multiclass VPseudoTernaryW_VV { +multiclass VPseudoTernaryW_VV { defvar constraint = "@earlyclobber $rd"; defm _VV : VPseudoTernaryWithPolicy; + constraint, /*Commutable*/ 0, TargetConstraintType>; } -multiclass VPseudoTernaryW_VV_RM { +multiclass VPseudoTernaryW_VV_RM { defvar constraint = "@earlyclobber $rd"; defm _VV : VPseudoTernaryWithPolicyRoundingMode; + constraint, /* Commutable */ 0, + TargetConstraintType>; } -multiclass VPseudoTernaryW_VX { +multiclass VPseudoTernaryW_VX { defvar constraint = "@earlyclobber $rd"; defm "_VX" : VPseudoTernaryWithPolicy; + constraint, /*Commutable*/ 0, TargetConstraintType>; } -multiclass VPseudoTernaryW_VF { +multiclass VPseudoTernaryW_VF { defvar constraint = "@earlyclobber $rd"; defm "_V" # f.FX : VPseudoTernaryWithPolicy; + m.vrclass, m, constraint, /*Commutable*/ 0, TargetConstraintType>; } -multiclass VPseudoTernaryW_VF_RM { +multiclass VPseudoTernaryW_VF_RM { defvar constraint = "@earlyclobber $rd"; defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode; + m.vrclass, m, constraint, + /* Commutable */ 0, + TargetConstraintType>; } multiclass VPseudoVSLDVWithPolicy { foreach m = MxListW in { defvar mx = m.MX; - defm "" : VPseudoTernaryW_VV, + defm "" : VPseudoTernaryW_VV, SchedTernary<"WriteVIWMulAddV", "ReadVIWMulAddV", "ReadVIWMulAddV", "ReadVIWMulAddV", mx>; - defm "" : VPseudoTernaryW_VX, + defm "" : VPseudoTernaryW_VX, SchedTernary<"WriteVIWMulAddX", "ReadVIWMulAddV", "ReadVIWMulAddX", "ReadVIWMulAddV", mx>; } } -multiclass VPseudoVWMAC_VX { +multiclass VPseudoVWMAC_VX { foreach m = MxListW in { - defm "" : VPseudoTernaryW_VX, + defm "" : VPseudoTernaryW_VX, SchedTernary<"WriteVIWMulAddX", "ReadVIWMulAddV", "ReadVIWMulAddX", "ReadVIWMulAddV", m.MX>; } } -multiclass VPseudoVWMAC_VV_VF_RM { +multiclass VPseudoVWMAC_VV_VF_RM { foreach m = MxListFW in { - defm "" : VPseudoTernaryW_VV_RM, + defm "" : VPseudoTernaryW_VV_RM, SchedTernary<"WriteVFWMulAddV", "ReadVFWMulAddV", "ReadVFWMulAddV", "ReadVFWMulAddV", m.MX>; } foreach f = FPListW in { foreach m = f.MxListFW in { - defm "" : VPseudoTernaryW_VF_RM, + defm "" : VPseudoTernaryW_VF_RM, SchedTernary<"WriteVFWMulAddF", "ReadVFWMulAddV", "ReadVFWMulAddF", "ReadVFWMulAddV", m.MX>; } @@ -3319,57 +3380,57 @@ } } -multiclass VPseudoVCMPM_VV_VX_VI { +multiclass VPseudoVCMPM_VV_VX_VI { foreach m = MxList in { defvar mx = m.MX; - defm "" : VPseudoBinaryM_VV, + defm "" : VPseudoBinaryM_VV, SchedBinary<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV", mx>; - defm "" : VPseudoBinaryM_VX, + defm "" : VPseudoBinaryM_VX, SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>; - defm "" : VPseudoBinaryM_VI, + defm "" : VPseudoBinaryM_VI, SchedUnary<"WriteVICmpI", "ReadVICmpV", mx>; } } -multiclass VPseudoVCMPM_VV_VX { +multiclass VPseudoVCMPM_VV_VX { foreach m = MxList in { defvar mx = m.MX; - defm "" : VPseudoBinaryM_VV, + defm "" : VPseudoBinaryM_VV, SchedBinary<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV", mx>; - defm "" : VPseudoBinaryM_VX, + defm "" : VPseudoBinaryM_VX, SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>; } } -multiclass VPseudoVCMPM_VV_VF { +multiclass VPseudoVCMPM_VV_VF { foreach m = MxListF in { - defm "" : VPseudoBinaryM_VV, + defm "" : VPseudoBinaryM_VV, SchedBinary<"WriteVFCmpV", "ReadVFCmpV", "ReadVFCmpV", m.MX>; } foreach f = FPList in { foreach m = f.MxList in { - defm "" : VPseudoBinaryM_VF, + defm "" : VPseudoBinaryM_VF, SchedBinary<"WriteVFCmpF", "ReadVFCmpV", "ReadVFCmpF", m.MX>; } } } -multiclass VPseudoVCMPM_VF { +multiclass VPseudoVCMPM_VF { foreach f = FPList in { foreach m = f.MxList in { - defm "" : VPseudoBinaryM_VF, + defm "" : VPseudoBinaryM_VF, SchedBinary<"WriteVFCmpF", "ReadVFCmpV", "ReadVFCmpF", m.MX>; } } } -multiclass VPseudoVCMPM_VX_VI { +multiclass VPseudoVCMPM_VX_VI { foreach m = MxList in { defvar mx = m.MX; - defm "" : VPseudoBinaryM_VX, + defm "" : VPseudoBinaryM_VX, SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>; - defm "" : VPseudoBinaryM_VI, + defm "" : VPseudoBinaryM_VI, SchedUnary<"WriteVICmpI", "ReadVICmpV", mx>; } } @@ -3452,11 +3513,12 @@ multiclass VPseudoConversion { + string Constraint = "", + int TargetConstraintType = 1> { let VLMul = MInfo.value in { - def "_" # MInfo.MX : VPseudoUnaryNoMask; + def "_" # MInfo.MX : VPseudoUnaryNoMask; def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask, + Constraint, TargetConstraintType>, RISCVMaskedPseudo; } } @@ -3464,9 +3526,10 @@ multiclass VPseudoConversionRoundingMode { + string Constraint = "", + int TargetConstraintType = 1> { let VLMul = MInfo.value in { - def "_" # MInfo.MX : VPseudoUnaryNoMaskRoundingMode; + def "_" # MInfo.MX : VPseudoUnaryNoMaskRoundingMode; def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskRoundingMode, RISCVMaskedPseudo; @@ -3544,19 +3607,19 @@ } } -multiclass VPseudoVWCVTI_V { +multiclass VPseudoVWCVTI_V { defvar constraint = "@earlyclobber $rd"; foreach m = MxListFW in { - defm _V : VPseudoConversion, + defm _V : VPseudoConversion, SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX, forceMergeOpRead=true>; } } -multiclass VPseudoVWCVTI_V_RM { +multiclass VPseudoVWCVTI_V_RM { defvar constraint = "@earlyclobber $rd"; foreach m = MxListFW in { - defm _V : VPseudoConversionRoundingMode, + defm _V : VPseudoConversionRoundingMode, SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX, forceMergeOpRead=true>; } @@ -3571,37 +3634,37 @@ } } -multiclass VPseudoVWCVTF_V { +multiclass VPseudoVWCVTF_V { defvar constraint = "@earlyclobber $rd"; foreach m = MxListW in { - defm _V : VPseudoConversion, + defm _V : VPseudoConversion, SchedUnary<"WriteVFWCvtIToFV", "ReadVFWCvtIToFV", m.MX, forceMergeOpRead=true>; } } -multiclass VPseudoVWCVTD_V { +multiclass VPseudoVWCVTD_V { defvar constraint = "@earlyclobber $rd"; foreach m = MxListFW in { - defm _V : VPseudoConversion, + defm _V : VPseudoConversion, SchedUnary<"WriteVFWCvtFToFV", "ReadVFWCvtFToFV", m.MX, forceMergeOpRead=true>; } } -multiclass VPseudoVNCVTI_W { +multiclass VPseudoVNCVTI_W { defvar constraint = "@earlyclobber $rd"; foreach m = MxListW in { - defm _W : VPseudoConversion, + defm _W : VPseudoConversion, SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX, forceMergeOpRead=true>; } } -multiclass VPseudoVNCVTI_W_RM { +multiclass VPseudoVNCVTI_W_RM { defvar constraint = "@earlyclobber $rd"; foreach m = MxListW in { - defm _W : VPseudoConversionRoundingMode, + defm _W : VPseudoConversionRoundingMode, SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX, forceMergeOpRead=true>; } @@ -3616,10 +3679,10 @@ } } -multiclass VPseudoVNCVTF_W_RM { +multiclass VPseudoVNCVTF_W_RM { defvar constraint = "@earlyclobber $rd"; foreach m = MxListFW in { - defm _W : VPseudoConversionRoundingMode, + defm _W : VPseudoConversionRoundingMode, SchedUnary<"WriteVFNCvtIToFV", "ReadVFNCvtIToFV", m.MX, forceMergeOpRead=true>; } @@ -3634,19 +3697,19 @@ } } -multiclass VPseudoVNCVTD_W { +multiclass VPseudoVNCVTD_W { defvar constraint = "@earlyclobber $rd"; foreach m = MxListFW in { - defm _W : VPseudoConversion, + defm _W : VPseudoConversion, SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, forceMergeOpRead=true>; } } -multiclass VPseudoVNCVTD_W_RM { +multiclass VPseudoVNCVTD_W_RM { defvar constraint = "@earlyclobber $rd"; foreach m = MxListFW in { - defm _W : VPseudoConversionRoundingMode, + defm _W : VPseudoConversionRoundingMode, SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, forceMergeOpRead=true>; } @@ -6016,8 +6079,8 @@ //===----------------------------------------------------------------------===// // Vector Indexed Loads and Stores -defm PseudoVLUX : VPseudoILoad; -defm PseudoVLOX : VPseudoILoad; +defm PseudoVLUX : VPseudoILoad; +defm PseudoVLOX : VPseudoILoad; defm PseudoVSOX : VPseudoIStore; defm PseudoVSUX : VPseudoIStore; @@ -6118,35 +6181,35 @@ //===----------------------------------------------------------------------===// // 11.2. Vector Widening Integer Add/Subtract //===----------------------------------------------------------------------===// -defm PseudoVWADDU : VPseudoVWALU_VV_VX; -defm PseudoVWSUBU : VPseudoVWALU_VV_VX; -defm PseudoVWADD : VPseudoVWALU_VV_VX; -defm PseudoVWSUB : VPseudoVWALU_VV_VX; -defm PseudoVWADDU : VPseudoVWALU_WV_WX; -defm PseudoVWSUBU : VPseudoVWALU_WV_WX; -defm PseudoVWADD : VPseudoVWALU_WV_WX; -defm PseudoVWSUB : VPseudoVWALU_WV_WX; +defm PseudoVWADDU : VPseudoVWALU_VV_VX; +defm PseudoVWSUBU : VPseudoVWALU_VV_VX; +defm PseudoVWADD : VPseudoVWALU_VV_VX; +defm PseudoVWSUB : VPseudoVWALU_VV_VX; +defm PseudoVWADDU : VPseudoVWALU_WV_WX; +defm PseudoVWSUBU : VPseudoVWALU_WV_WX; +defm PseudoVWADD : VPseudoVWALU_WV_WX; +defm PseudoVWSUB : VPseudoVWALU_WV_WX; //===----------------------------------------------------------------------===// // 11.3. Vector Integer Extension //===----------------------------------------------------------------------===// -defm PseudoVZEXT_VF2 : PseudoVEXT_VF2; -defm PseudoVZEXT_VF4 : PseudoVEXT_VF4; -defm PseudoVZEXT_VF8 : PseudoVEXT_VF8; -defm PseudoVSEXT_VF2 : PseudoVEXT_VF2; -defm PseudoVSEXT_VF4 : PseudoVEXT_VF4; -defm PseudoVSEXT_VF8 : PseudoVEXT_VF8; +defm PseudoVZEXT_VF2 : PseudoVEXT_VF2; +defm PseudoVZEXT_VF4 : PseudoVEXT_VF4; +defm PseudoVZEXT_VF8 : PseudoVEXT_VF8; +defm PseudoVSEXT_VF2 : PseudoVEXT_VF2; +defm PseudoVSEXT_VF4 : PseudoVEXT_VF4; +defm PseudoVSEXT_VF8 : PseudoVEXT_VF8; //===----------------------------------------------------------------------===// // 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions //===----------------------------------------------------------------------===// defm PseudoVADC : VPseudoVCALU_VM_XM_IM; -defm PseudoVMADC : VPseudoVCALUM_VM_XM_IM<"@earlyclobber $rd">; -defm PseudoVMADC : VPseudoVCALUM_V_X_I<"@earlyclobber $rd">; +defm PseudoVMADC : VPseudoVCALUM_VM_XM_IM<"@earlyclobber $rd", /*TargetConstraintType*/ 2>; +defm PseudoVMADC : VPseudoVCALUM_V_X_I<"@earlyclobber $rd", /*TargetConstraintType*/ 2>; defm PseudoVSBC : VPseudoVCALU_VM_XM; -defm PseudoVMSBC : VPseudoVCALUM_VM_XM<"@earlyclobber $rd">; -defm PseudoVMSBC : VPseudoVCALUM_V_X<"@earlyclobber $rd">; +defm PseudoVMSBC : VPseudoVCALUM_VM_XM<"@earlyclobber $rd", /*TargetConstraintType*/ 2>; +defm PseudoVMSBC : VPseudoVCALUM_V_X<"@earlyclobber $rd", /*TargetConstraintType*/ 2>; //===----------------------------------------------------------------------===// // 11.5. Vector Bitwise Logical Instructions @@ -6165,20 +6228,20 @@ //===----------------------------------------------------------------------===// // 11.7. Vector Narrowing Integer Right Shift Instructions //===----------------------------------------------------------------------===// -defm PseudoVNSRL : VPseudoVNSHT_WV_WX_WI; -defm PseudoVNSRA : VPseudoVNSHT_WV_WX_WI; +defm PseudoVNSRL : VPseudoVNSHT_WV_WX_WI; +defm PseudoVNSRA : VPseudoVNSHT_WV_WX_WI; //===----------------------------------------------------------------------===// // 11.8. Vector Integer Comparison Instructions //===----------------------------------------------------------------------===// -defm PseudoVMSEQ : VPseudoVCMPM_VV_VX_VI; -defm PseudoVMSNE : VPseudoVCMPM_VV_VX_VI; -defm PseudoVMSLTU : VPseudoVCMPM_VV_VX; -defm PseudoVMSLT : VPseudoVCMPM_VV_VX; -defm PseudoVMSLEU : VPseudoVCMPM_VV_VX_VI; -defm PseudoVMSLE : VPseudoVCMPM_VV_VX_VI; -defm PseudoVMSGTU : VPseudoVCMPM_VX_VI; -defm PseudoVMSGT : VPseudoVCMPM_VX_VI; +defm PseudoVMSEQ : VPseudoVCMPM_VV_VX_VI; +defm PseudoVMSNE : VPseudoVCMPM_VV_VX_VI; +defm PseudoVMSLTU : VPseudoVCMPM_VV_VX; +defm PseudoVMSLT : VPseudoVCMPM_VV_VX; +defm PseudoVMSLEU : VPseudoVCMPM_VV_VX_VI; +defm PseudoVMSLE : VPseudoVCMPM_VV_VX_VI; +defm PseudoVMSGTU : VPseudoVCMPM_VX_VI; +defm PseudoVMSGT : VPseudoVCMPM_VX_VI; //===----------------------------------------------------------------------===// // 11.9. Vector Integer Min/Max Instructions @@ -6207,9 +6270,9 @@ //===----------------------------------------------------------------------===// // 11.12. Vector Widening Integer Multiply Instructions //===----------------------------------------------------------------------===// -defm PseudoVWMUL : VPseudoVWMUL_VV_VX; -defm PseudoVWMULU : VPseudoVWMUL_VV_VX; -defm PseudoVWMULSU : VPseudoVWMUL_VV_VX; +defm PseudoVWMUL : VPseudoVWMUL_VV_VX; +defm PseudoVWMULU : VPseudoVWMUL_VV_VX; +defm PseudoVWMULSU : VPseudoVWMUL_VV_VX; //===----------------------------------------------------------------------===// // 11.13. Vector Single-Width Integer Multiply-Add Instructions @@ -6222,10 +6285,10 @@ //===----------------------------------------------------------------------===// // 11.14. Vector Widening Integer Multiply-Add Instructions //===----------------------------------------------------------------------===// -defm PseudoVWMACCU : VPseudoVWMAC_VV_VX; -defm PseudoVWMACC : VPseudoVWMAC_VV_VX; -defm PseudoVWMACCSU : VPseudoVWMAC_VV_VX; -defm PseudoVWMACCUS : VPseudoVWMAC_VX; +defm PseudoVWMACCU : VPseudoVWMAC_VV_VX; +defm PseudoVWMACC : VPseudoVWMAC_VV_VX; +defm PseudoVWMACCSU : VPseudoVWMAC_VV_VX; +defm PseudoVWMACCUS : VPseudoVWMAC_VX; //===----------------------------------------------------------------------===// // 11.15. Vector Integer Merge Instructions @@ -6300,10 +6363,10 @@ // 13.3. Vector Widening Floating-Point Add/Subtract Instructions //===----------------------------------------------------------------------===// let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in { -defm PseudoVFWADD : VPseudoVFWALU_VV_VF_RM; -defm PseudoVFWSUB : VPseudoVFWALU_VV_VF_RM; -defm PseudoVFWADD : VPseudoVFWALU_WV_WF_RM; -defm PseudoVFWSUB : VPseudoVFWALU_WV_WF_RM; +defm PseudoVFWADD : VPseudoVFWALU_VV_VF_RM; +defm PseudoVFWSUB : VPseudoVFWALU_VV_VF_RM; +defm PseudoVFWADD : VPseudoVFWALU_WV_WF_RM; +defm PseudoVFWSUB : VPseudoVFWALU_WV_WF_RM; } //===----------------------------------------------------------------------===// @@ -6319,7 +6382,7 @@ // 13.5. Vector Widening Floating-Point Multiply //===----------------------------------------------------------------------===// let mayRaiseFPException = true, hasSideEffects = 0 in { -defm PseudoVFWMUL : VPseudoVWMUL_VV_VF_RM; +defm PseudoVFWMUL : VPseudoVWMUL_VV_VF_RM; } //===----------------------------------------------------------------------===// @@ -6340,10 +6403,10 @@ // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions //===----------------------------------------------------------------------===// let mayRaiseFPException = true, hasSideEffects = 0, hasPostISelHook = 1 in { -defm PseudoVFWMACC : VPseudoVWMAC_VV_VF_RM; -defm PseudoVFWNMACC : VPseudoVWMAC_VV_VF_RM; -defm PseudoVFWMSAC : VPseudoVWMAC_VV_VF_RM; -defm PseudoVFWNMSAC : VPseudoVWMAC_VV_VF_RM; +defm PseudoVFWMACC : VPseudoVWMAC_VV_VF_RM; +defm PseudoVFWNMACC : VPseudoVWMAC_VV_VF_RM; +defm PseudoVFWMSAC : VPseudoVWMAC_VV_VF_RM; +defm PseudoVFWNMSAC : VPseudoVWMAC_VV_VF_RM; let Predicates = [HasStdExtZvfbfwma] in defm PseudoVFWMACCBF16 : VPseudoVWMAC_VV_VF_BF_RM; } @@ -6385,12 +6448,12 @@ // 13.13. Vector Floating-Point Compare Instructions //===----------------------------------------------------------------------===// let mayRaiseFPException = true in { -defm PseudoVMFEQ : VPseudoVCMPM_VV_VF; -defm PseudoVMFNE : VPseudoVCMPM_VV_VF; -defm PseudoVMFLT : VPseudoVCMPM_VV_VF; -defm PseudoVMFLE : VPseudoVCMPM_VV_VF; -defm PseudoVMFGT : VPseudoVCMPM_VF; -defm PseudoVMFGE : VPseudoVCMPM_VF; +defm PseudoVMFEQ : VPseudoVCMPM_VV_VF; +defm PseudoVMFNE : VPseudoVCMPM_VV_VF; +defm PseudoVMFLT : VPseudoVCMPM_VV_VF; +defm PseudoVMFLE : VPseudoVCMPM_VV_VF; +defm PseudoVMFGT : VPseudoVCMPM_VF; +defm PseudoVMFGE : VPseudoVCMPM_VF; } //===----------------------------------------------------------------------===// @@ -6437,19 +6500,19 @@ //===----------------------------------------------------------------------===// let mayRaiseFPException = true in { let hasSideEffects = 0, hasPostISelHook = 1 in { -defm PseudoVFWCVT_XU_F : VPseudoVWCVTI_V_RM; -defm PseudoVFWCVT_X_F : VPseudoVWCVTI_V_RM; +defm PseudoVFWCVT_XU_F : VPseudoVWCVTI_V_RM; +defm PseudoVFWCVT_X_F : VPseudoVWCVTI_V_RM; } defm PseudoVFWCVT_RM_XU_F : VPseudoVWCVTI_RM_V; defm PseudoVFWCVT_RM_X_F : VPseudoVWCVTI_RM_V; -defm PseudoVFWCVT_RTZ_XU_F : VPseudoVWCVTI_V; -defm PseudoVFWCVT_RTZ_X_F : VPseudoVWCVTI_V; +defm PseudoVFWCVT_RTZ_XU_F : VPseudoVWCVTI_V; +defm PseudoVFWCVT_RTZ_X_F : VPseudoVWCVTI_V; -defm PseudoVFWCVT_F_XU : VPseudoVWCVTF_V; -defm PseudoVFWCVT_F_X : VPseudoVWCVTF_V; +defm PseudoVFWCVT_F_XU : VPseudoVWCVTF_V; +defm PseudoVFWCVT_F_X : VPseudoVWCVTF_V; -defm PseudoVFWCVT_F_F : VPseudoVWCVTD_V; +defm PseudoVFWCVT_F_F : VPseudoVWCVTD_V; defm PseudoVFWCVTBF16_F_F : VPseudoVWCVTD_V; } // mayRaiseFPException = true @@ -6458,27 +6521,27 @@ //===----------------------------------------------------------------------===// let mayRaiseFPException = true in { let hasSideEffects = 0, hasPostISelHook = 1 in { -defm PseudoVFNCVT_XU_F : VPseudoVNCVTI_W_RM; -defm PseudoVFNCVT_X_F : VPseudoVNCVTI_W_RM; +defm PseudoVFNCVT_XU_F : VPseudoVNCVTI_W_RM; +defm PseudoVFNCVT_X_F : VPseudoVNCVTI_W_RM; } defm PseudoVFNCVT_RM_XU_F : VPseudoVNCVTI_RM_W; defm PseudoVFNCVT_RM_X_F : VPseudoVNCVTI_RM_W; -defm PseudoVFNCVT_RTZ_XU_F : VPseudoVNCVTI_W; -defm PseudoVFNCVT_RTZ_X_F : VPseudoVNCVTI_W; +defm PseudoVFNCVT_RTZ_XU_F : VPseudoVNCVTI_W; +defm PseudoVFNCVT_RTZ_X_F : VPseudoVNCVTI_W; let hasSideEffects = 0, hasPostISelHook = 1 in { -defm PseudoVFNCVT_F_XU : VPseudoVNCVTF_W_RM; -defm PseudoVFNCVT_F_X : VPseudoVNCVTF_W_RM; +defm PseudoVFNCVT_F_XU : VPseudoVNCVTF_W_RM; +defm PseudoVFNCVT_F_X : VPseudoVNCVTF_W_RM; } defm PseudoVFNCVT_RM_F_XU : VPseudoVNCVTF_RM_W; defm PseudoVFNCVT_RM_F_X : VPseudoVNCVTF_RM_W; let hasSideEffects = 0, hasPostISelHook = 1 in -defm PseudoVFNCVT_F_F : VPseudoVNCVTD_W_RM; +defm PseudoVFNCVT_F_F : VPseudoVNCVTD_W_RM; defm PseudoVFNCVTBF16_F_F : VPseudoVNCVTD_W_RM; -defm PseudoVFNCVT_ROD_F_F : VPseudoVNCVTD_W; +defm PseudoVFNCVT_ROD_F_F : VPseudoVNCVTD_W; } // mayRaiseFPException = true } // Predicates = [HasVInstructionsAnyF]