diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -18,12 +18,28 @@ #include "llvm/ADT/APInt.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSwitch.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" #include "llvm/MC/MCInstrDesc.h" #include "llvm/Support/RISCVISAInfo.h" #include "llvm/TargetParser/SubtargetFeature.h" namespace llvm { +namespace RISCVVPseudosTable { + +struct PseudoInfo { + uint16_t Pseudo; + uint16_t BaseInstr; + uint8_t VLMul; + uint8_t SEW; +}; + +#define GET_RISCVVPseudosTable_DECL +#include "RISCVGenSearchableTables.inc" + +} // end namespace RISCVVPseudosTable + // RISCVII - This namespace holds all of the target specific flags that // instruction info tracks. All definitions must match RISCVInstrFormats.td. namespace RISCVII { @@ -153,6 +169,15 @@ static inline bool hasSEWOp(uint64_t TSFlags) { return TSFlags & HasSEWOpMask; } +/// \returns true if there is a SEW for the instruction. +static bool hasSEW(const MachineInstr &MI) { + if (RISCVII::hasSEWOp(MI.getDesc().TSFlags)) + return true; + + const RISCVVPseudosTable::PseudoInfo *RVV = + RISCVVPseudosTable::getPseudoInfo(MI.getOpcode()); + return RVV && RVV->SEW != 0; +} /// \returns true if there is a VL operand for the instruction. static inline bool hasVLOp(uint64_t TSFlags) { return TSFlags & HasVLOpMask; @@ -180,12 +205,12 @@ static inline unsigned getVLOpNum(const MCInstrDesc &Desc) { const uint64_t TSFlags = Desc.TSFlags; - // This method is only called if we expect to have a VL operand, and all - // instructions with VL also have SEW. - assert(hasSEWOp(TSFlags) && hasVLOp(TSFlags)); - unsigned Offset = 2; + // This method is only called if we expect to have a VL operand. + assert(hasVLOp(TSFlags)); + // Some instructions don't have SEW operand. + unsigned Offset = 1 + hasSEWOp(TSFlags); if (hasVecPolicyOp(TSFlags)) - Offset = 3; + Offset = Offset + 1; return Desc.getNumOperands() - Offset; } @@ -198,6 +223,16 @@ return Desc.getNumOperands() - Offset; } +static MachineOperand getSEW(const MachineInstr &MI) { + if (RISCVII::hasSEWOp(MI.getDesc().TSFlags)) + return MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())); + + const RISCVVPseudosTable::PseudoInfo *RVV = + RISCVVPseudosTable::getPseudoInfo(MI.getOpcode()); + assert(RVV && "Can't find PseudoInfo!"); + return MachineOperand::CreateImm(Log2_32(RVV->SEW)); +} + static inline unsigned getVecPolicyOpNum(const MCInstrDesc &Desc) { assert(hasVecPolicyOp(Desc.TSFlags)); return Desc.getNumOperands() - 1; @@ -444,20 +479,6 @@ #include "RISCVGenSearchableTables.inc" } // end namespace RISCVInsnOpcode -namespace RISCVVPseudosTable { - -struct PseudoInfo { - uint16_t Pseudo; - uint16_t BaseInstr; - uint8_t VLMul; - uint8_t SEW; -}; - -#define GET_RISCVVPseudosTable_DECL -#include "RISCVGenSearchableTables.inc" - -} // end namespace RISCVVPseudosTable - namespace RISCVABI { enum ABI { diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -48,8 +48,14 @@ return RISCVII::getVLOpNum(MI.getDesc()); } -static unsigned getSEWOpNum(const MachineInstr &MI) { - return RISCVII::getSEWOpNum(MI.getDesc()); +static unsigned getLog2SEW(const MachineInstr &MI) { + if (RISCVII::hasSEWOp(MI.getDesc().TSFlags)) + return MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm(); + + const RISCVVPseudosTable::PseudoInfo *RVV = + RISCVVPseudosTable::getPseudoInfo(MI.getOpcode()); + assert(RVV && "Can't find PseudoInfo!"); + return Log2_32(RVV->SEW); } static bool isVectorConfigInstr(const MachineInstr &MI) { @@ -162,7 +168,7 @@ static bool isMaskRegOp(const MachineInstr &MI) { if (!RISCVII::hasSEWOp(MI.getDesc().TSFlags)) return false; - const unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm(); + const unsigned Log2SEW = getLog2SEW(MI); // A Log2SEW of 0 is an operation on mask registers only. return Log2SEW == 0; } @@ -343,7 +349,7 @@ Res.demandVTYPE(); // Start conservative on the unlowered form too uint64_t TSFlags = MI.getDesc().TSFlags; - if (RISCVII::hasSEWOp(TSFlags)) { + if (RISCVII::hasSEW(MI)) { Res.demandVTYPE(); if (RISCVII::hasVLOp(TSFlags)) Res.demandVL(); @@ -365,7 +371,7 @@ } // Store instructions don't use the policy fields. - if (RISCVII::hasSEWOp(TSFlags) && MI.getNumExplicitDefs() == 0) { + if (RISCVII::hasSEW(MI) && MI.getNumExplicitDefs() == 0) { Res.TailPolicy = false; Res.MaskPolicy = false; } @@ -788,7 +794,7 @@ RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags); - unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm(); + unsigned Log2SEW = getLog2SEW(MI); // A Log2SEW of 0 is an operation on mask registers only. unsigned SEW = Log2SEW ? 1 << Log2SEW : 8; assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW"); @@ -1006,7 +1012,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info, const MachineInstr &MI) const { uint64_t TSFlags = MI.getDesc().TSFlags; - if (!RISCVII::hasSEWOp(TSFlags)) + if (!RISCVII::hasSEW(MI)) return; const VSETVLIInfo NewInfo = computeInfoForInstr(MI, TSFlags, MRI); @@ -1089,7 +1095,7 @@ for (const MachineInstr &MI : MBB) { transferBefore(Info, MI); - if (isVectorConfigInstr(MI) || RISCVII::hasSEWOp(MI.getDesc().TSFlags)) + if (isVectorConfigInstr(MI) || RISCVII::hasSEW(MI)) HadVectorOp = true; transferAfter(Info, MI); @@ -1220,8 +1226,7 @@ PrefixTransparent = false; } - uint64_t TSFlags = MI.getDesc().TSFlags; - if (RISCVII::hasSEWOp(TSFlags)) { + if (RISCVII::hasSEW(MI)) { if (PrevInfo != CurInfo) { // If this is the first implicit state change, and the state change // requested can be proven to produce the same register contents, we @@ -1235,7 +1240,7 @@ PrefixTransparent = false; } - if (RISCVII::hasVLOp(TSFlags)) { + if (RISCVII::hasVLOp(MI.getDesc().TSFlags)) { MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI)); if (VLOp.isReg()) { // Erase the AVL operand from the instruction. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -269,7 +269,7 @@ // If the producing instruction does not depend on vsetvli, do not // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD. - if (!RISCVII::hasSEWOp(TSFlags) || !RISCVII::hasVLOp(TSFlags)) + if (!RISCVII::hasSEW(*MBBI) || !RISCVII::hasVLOp(TSFlags)) return false; // Found the definition. @@ -456,7 +456,7 @@ if (UseVMV_V_V) { const MCInstrDesc &Desc = DefMBBI->getDesc(); MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL - MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW + MIB.add(RISCVII::getSEW(*DefMBBI)); // SEW MIB.addImm(0); // tu, mu MIB.addReg(RISCV::VL, RegState::Implicit); MIB.addReg(RISCV::VTYPE, RegState::Implicit); @@ -1832,10 +1832,6 @@ return false; } } - if (!RISCVII::hasSEWOp(TSFlags)) { - ErrInfo = "VL operand w/o SEW operand?"; - return false; - } } if (RISCVII::hasSEWOp(TSFlags)) { unsigned OpIdx = RISCVII::getSEWOpNum(Desc); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1014,17 +1014,21 @@ class VPseudoUnaryNoMaskRoundingMode : + string Constraint = "", + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let HasRoundModeOp = 1; let UsesVXRM = 0; @@ -1049,18 +1053,23 @@ class VPseudoUnaryMaskRoundingMode : + string Constraint = "", + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, OpClass:$rs2, - VMaskOp:$vm, ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, OpClass:$rs2, + VMaskOp:$vm, ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, OpClass:$rs2, + VMaskOp:$vm, ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; let HasRoundModeOp = 1; @@ -1147,14 +1156,13 @@ VReg Op1Class> : Pseudo<(outs RetClass:$rd), (ins RetClass:$merge, Op1Class:$rs2, - VR:$vm, AVL:$vl, ixlenimm:$sew), []>, + VR:$vm, AVL:$vl), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = "@earlyclobber $rd, $rd = $merge"; let HasVLOp = 1; - let HasSEWOp = 1; } class VPseudoBinaryNoMask : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, - ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, + ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, + ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; } @@ -1193,16 +1205,20 @@ VReg Op1Class, DAGOperand Op2Class, string Constraint, - int UsesVXRM_ = 1> : + int UsesVXRM_ = 1, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let HasRoundModeOp = 1; let UsesVXRM = UsesVXRM_; @@ -1212,18 +1228,24 @@ RegisterClass Op1Class, DAGOperand Op2Class, string Constraint, - int UsesVXRM_> : + int UsesVXRM_, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, ixlenimm:$rm, AVL:$vl, - ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, ixlenimm:$rm, AVL:$vl, + ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, ixlenimm:$rm, AVL:$vl, + ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; let HasRoundModeOp = 1; @@ -1321,18 +1343,23 @@ class VPseudoBinaryMaskPolicy : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; } @@ -1340,38 +1367,50 @@ class VPseudoTernaryMaskPolicy : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; } class VPseudoTernaryMaskPolicyRoundingMode : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, - ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, + ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, + ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let HasRoundModeOp = 1; let UsesVXRM = 0; @@ -1521,10 +1560,14 @@ class VPseudoTernaryNoMaskWithPolicy : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1532,16 +1575,20 @@ let Constraints = !interleave([Constraint, "$rd = $rs3"], ","); let HasVecPolicyOp = 1; let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; } class VPseudoTernaryNoMaskWithPolicyRoundingMode : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, - ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + ixlenimm:$rm, AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1549,7 +1596,7 @@ let Constraints = !interleave([Constraint, "$rd = $rs3"], ","); let HasVecPolicyOp = 1; let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasRoundModeOp = 1; let UsesVXRM = 0; } @@ -2066,10 +2113,11 @@ int sew = 0> { let VLMul = MInfo.value, SEW=sew in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); + defvar hasSEWOp = !eq(sew, 0); def suffix : VPseudoBinaryNoMaskTU; + Constraint, hasSEWOp=hasSEWOp>; def suffix # "_MASK" : VPseudoBinaryMaskPolicy, + Constraint, hasSEWOp=hasSEWOp>, RISCVMaskedPseudo; } } @@ -2082,8 +2130,9 @@ int sew = 0> { let VLMul = MInfo.value, SEW=sew in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); + defvar hasSEWOp = !eq(sew, 0); def suffix : VPseudoBinaryNoMaskTU; + Constraint, hasSEWOp=hasSEWOp>; } } @@ -2096,13 +2145,15 @@ int UsesVXRM = 1> { let VLMul = MInfo.value, SEW=sew in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); + defvar hasSEWOp = !eq(sew, 0); def suffix : VPseudoBinaryNoMaskRoundingMode; + Constraint, UsesVXRM, hasSEWOp>; def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode, + UsesVXRM, + hasSEWOp>, RISCVMaskedPseudo; } } @@ -2132,10 +2183,11 @@ int sew = 0> { let VLMul = lmul.value, SEW=sew in { defvar suffix = !if(sew, "_" # lmul.MX # "_E" # sew, "_" # lmul.MX); + defvar hasSEWOp = !eq(sew, 0); def suffix # "_" # emul.MX : VPseudoBinaryNoMaskTU; + Constraint, hasSEWOp>; def suffix # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy, + Constraint, hasSEWOp>, RISCVMaskedPseudo; } } @@ -2490,11 +2542,11 @@ foreach e = sews in { defvar suffix = "_" # mx # "_E" # e; let SEW = e in { - def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode, + def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode, SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e, forceMergeOpRead=true>; def "_V" #suffix # "_MASK" - : VPseudoUnaryMaskRoundingMode, + : VPseudoUnaryMaskRoundingMode, RISCVMaskedPseudo, SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e, forceMergeOpRead=true>; @@ -3141,8 +3193,12 @@ let VLMul = MInfo.value, SEW=sew in { defvar mx = MInfo.MX; let isCommutable = Commutable in - def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicy; - def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicy; + def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicy; + def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicy; } } @@ -3158,10 +3214,12 @@ let isCommutable = Commutable in def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicyRoundingMode; + Op2Class, Constraint, + hasSEWOp=0>; def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicyRoundingMode; + Op2Class, Constraint, + hasSEWOp=0>; } } @@ -3884,13 +3942,15 @@ (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), VLOpFrag)), - (!cast( - !if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - inst#"_"#kind#"_"#vlmul.MX)) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - GPR:$vl, log2sew, TU_MU)>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + GPR:$vl, TU_MU), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + GPR:$vl, log2sew, TU_MU))>; class VPatUnaryNoMaskRoundingMode( - !if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - inst#"_"#kind#"_"#vlmul.MX)) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - (XLenVT timm:$round), - GPR:$vl, log2sew, TU_MU)>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (XLenVT timm:$round), + GPR:$vl, TU_MU), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (XLenVT timm:$round), + GPR:$vl, log2sew, TU_MU))>; class VPatUnaryMask( - !if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - inst#"_"#kind#"_"#vlmul.MX#"_MASK")) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), GPR:$vl, (XLenVT timm:$policy)), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy)))>; class VPatUnaryMaskRoundingMode( - !if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - inst#"_"#kind#"_"#vlmul.MX#"_MASK")) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - (mask_type V0), - (XLenVT timm:$round), - GPR:$vl, log2sew, (XLenVT timm:$policy))>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, (XLenVT timm:$policy)), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, log2sew, (XLenVT timm:$policy)))>; class VPatMaskUnaryNoMask; + GPR:$vl)>; class VPatBinaryM : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), VLOpFrag)), - (!cast(inst) - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - GPR:$vl, sew, TU_MU)>; + !if(hasSEWOp, + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew, TU_MU), + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, TU_MU))>; class VPatBinaryNoMaskRoundingMode : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name) (result_type (undef)), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (XLenVT timm:$round), VLOpFrag)), - (!cast(inst) - (result_type (IMPLICIT_DEF)), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (XLenVT timm:$round), - GPR:$vl, sew, TA_MA)>; + !if(hasSEWOp, + (!cast(inst) + (result_type (IMPLICIT_DEF)), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, sew, TA_MA), + (!cast(inst) + (result_type (IMPLICIT_DEF)), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, TA_MA))>; class VPatBinaryNoMaskTURoundingMode : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (XLenVT timm:$round), VLOpFrag)), - (!cast(inst) - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (XLenVT timm:$round), - GPR:$vl, sew, TU_MU)>; + !if(hasSEWOp, + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, sew, TU_MU), + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, TU_MU))>; // Same as above but source operands are swapped. @@ -4144,18 +4236,25 @@ int sew, VReg result_reg_class, VReg op1_reg_class, - DAGOperand op2_kind> : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name#"_mask") (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (mask_type V0), VLOpFrag, (XLenVT timm:$policy))), - (!cast(inst#"_MASK") - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>; + !if(hasSEWOp, + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy)), + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), GPR:$vl, (XLenVT timm:$policy)))>; class VPatBinaryMaskTARoundingMode : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name#"_mask") (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), @@ -4174,13 +4274,21 @@ (mask_type V0), (XLenVT timm:$round), VLOpFrag, (XLenVT timm:$policy))), - (!cast(inst#"_MASK") - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (mask_type V0), - (XLenVT timm:$round), - GPR:$vl, sew, (XLenVT timm:$policy))>; + !if(hasSEWOp, + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, sew, (XLenVT timm:$policy)), + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, (XLenVT timm:$policy)))>; // Same as above but source operands are swapped. class VPatBinaryMaskSwapped; + GPR:$vl, TAIL_AGNOSTIC)>; class VPatTernaryNoMaskTARoundingMode; + GPR:$vl, TAIL_AGNOSTIC)>; class VPatTernaryNoMaskWithPolicy; + GPR:$vl, TAIL_AGNOSTIC)>; class VPatTernaryMaskTARoundingMode; + GPR:$vl, TAIL_AGNOSTIC)>; multiclass VPatUnaryS_M { @@ -4701,12 +4809,13 @@ int sew, VReg result_reg_class, VReg op1_reg_class, - DAGOperand op2_kind> { + DAGOperand op2_kind, + bit hasSEWOp = 1> { def : VPatBinaryNoMaskTU; + sew, result_reg_class, op1_reg_class, op2_kind, hasSEWOp>; def : VPatBinaryMaskTA; + op2_kind, hasSEWOp>; } multiclass VPatBinaryRoundingMode { + DAGOperand op2_kind, + bit hasSEWOp = 1> { def : VPatBinaryNoMaskRoundingMode; + sew, op1_reg_class, op2_kind, hasSEWOp>; def : VPatBinaryNoMaskTURoundingMode; + sew, result_reg_class, op1_reg_class, op2_kind, hasSEWOp>; def : VPatBinaryMaskTARoundingMode; + op2_kind, hasSEWOp>; } multiclass VPatBinarySwapped; + vti.RegClass, vti.RegClass, + hasSEWOp=!not(isSEWAware)>; } multiclass VPatBinaryV_VV_RM; + vti.RegClass, vti.RegClass, + hasSEWOp=!not(isSEWAware)>; } multiclass VPatBinaryV_VV_INT; + vti.RegClass, vti.RegClass, + hasSEWOp=0>; } } @@ -4899,7 +5012,8 @@ defm : VPatBinary; + vti.RegClass, ivti.RegClass, + hasSEWOp=0>; } } } @@ -4915,7 +5029,8 @@ instruction#"_"#kind#"_"#vti.LMul.MX), vti.Vector, vti.Vector, vti.Scalar, vti.Mask, vti.Log2SEW, vti.RegClass, - vti.RegClass, vti.ScalarRegClass>; + vti.RegClass, vti.ScalarRegClass, + hasSEWOp=!not(isSEWAware)>; } } @@ -4930,7 +5045,8 @@ instruction#"_"#kind#"_"#vti.LMul.MX), vti.Vector, vti.Vector, vti.Scalar, vti.Mask, vti.Log2SEW, vti.RegClass, - vti.RegClass, vti.ScalarRegClass>; + vti.RegClass, vti.ScalarRegClass, + hasSEWOp=!not(isSEWAware)>; } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -83,14 +83,17 @@ Pat<(result_type (vop (op_type op_reg_class:$rs1), (op_type op_reg_class:$rs2))), - (!cast( - !if(isSEWAware, - instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_VV_"# vlmul.MX)) + !if(isSEWAware, + (!cast(instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew)) (result_type (IMPLICIT_DEF)), op_reg_class:$rs1, op_reg_class:$rs2, - avl, log2sew, TA_MA)>; + avl, TA_MA), + (!cast(instruction_name#"_VV_"# vlmul.MX) + (result_type (IMPLICIT_DEF)), + op_reg_class:$rs1, + op_reg_class:$rs2, + avl, log2sew, TA_MA))>; class VPatBinarySDNode_VV_RM( - !if(isSEWAware, - instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_VV_"# vlmul.MX)) - (result_type (IMPLICIT_DEF)), - op_reg_class:$rs1, - op_reg_class:$rs2, - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - avl, log2sew, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type (IMPLICIT_DEF)), + op_reg_class:$rs1, + op_reg_class:$rs2, + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + avl, TA_MA), + (!cast(instruction_name#"_VV_"# vlmul.MX) + (result_type (IMPLICIT_DEF)), + op_reg_class:$rs1, + op_reg_class:$rs2, + FRM_DYN, + avl, log2sew, TA_MA))>; class VPatBinarySDNode_XI( - !if(isSEWAware, - instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#_#suffix#_# vlmul.MX)) - (result_type (IMPLICIT_DEF)), - vop_reg_class:$rs1, - xop_kind:$rs2, - avl, log2sew, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + xop_kind:$rs2, + avl, TA_MA), + (!cast(instruction_name#_#suffix#_# vlmul.MX) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + xop_kind:$rs2, + avl, log2sew, TA_MA))>; multiclass VPatBinarySDNode_VV_VX vtilist = AllIntegerVectors, @@ -182,14 +192,17 @@ bit isSEWAware = 0> : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (SplatFPOp xop_kind:$rs2)))), - (!cast( - !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_"#vlmul.MX)) - (result_type (IMPLICIT_DEF)), - vop_reg_class:$rs1, - (xop_type xop_kind:$rs2), - avl, log2sew, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + avl, TA_MA), + (!cast(instruction_name#"_"#vlmul.MX) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + avl, log2sew, TA_MA))>; class VPatBinarySDNode_VF_RM : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (SplatFPOp xop_kind:$rs2)))), - (!cast( - !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_"#vlmul.MX)) - (result_type (IMPLICIT_DEF)), - vop_reg_class:$rs1, - (xop_type xop_kind:$rs2), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - avl, log2sew, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + avl, TA_MA), + (!cast(instruction_name#"_"#vlmul.MX) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + FRM_DYN, + avl, log2sew, TA_MA))>; multiclass VPatBinaryFPSDNode_VV_VF { @@ -252,14 +269,17 @@ let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), (fvti.Vector fvti.RegClass:$rs1))), - (!cast( - !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) - (fvti.Vector (IMPLICIT_DEF)), - fvti.RegClass:$rs1, - (fvti.Scalar fvti.ScalarRegClass:$rs2), - fvti.AVL, fvti.Log2SEW, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs1, + (fvti.Scalar fvti.ScalarRegClass:$rs2), + fvti.AVL, TA_MA), + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs1, + (fvti.Scalar fvti.ScalarRegClass:$rs2), + fvti.AVL, fvti.Log2SEW, TA_MA))>; } multiclass VPatBinaryFPSDNode_R_VF_RM.Predicates in def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), (fvti.Vector fvti.RegClass:$rs1))), - (!cast( - !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) - (fvti.Vector (IMPLICIT_DEF)), - fvti.RegClass:$rs1, - (fvti.Scalar fvti.ScalarRegClass:$rs2), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs1, + (fvti.Scalar fvti.ScalarRegClass:$rs2), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + fvti.AVL, TA_MA), + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs1, + (fvti.Scalar fvti.ScalarRegClass:$rs2), + FRM_DYN, + fvti.AVL, fvti.Log2SEW, TA_MA))>; } multiclass VPatIntegerSetCCSDNode_VV("PseudoVREM_VV_"#vti.LMul.MX#"_E"#!shl(1, vti.Log2SEW)) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; + vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, TA_MA)>; } } @@ -1300,7 +1324,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - vti.AVL, vti.Log2SEW, TA_MA)>; + vti.AVL, TA_MA)>; // 13.12. Vector Floating-Point Sign-Injection Instructions def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -615,14 +615,17 @@ (result_type result_reg_class:$merge), (mask_type V0), VLOpFrag)), - (!cast( - !if(isSEWAware, - instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) - result_reg_class:$merge, - op1_reg_class:$rs1, - op2_reg_class:$rs2, - (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK") + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC))>; class VPatBinaryVL_V_RM( - !if(isSEWAware, - instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) - result_reg_class:$merge, - op1_reg_class:$rs1, - op2_reg_class:$rs2, - (mask_type V0), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK") + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, log2sew, TAIL_AGNOSTIC))>; multiclass VPatTiedBinaryNoMaskVL_V( - !if(isSEWAware, - instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#_#suffix#_#vlmul.MX#"_MASK")) - result_reg_class:$merge, - vop_reg_class:$rs1, - xop_kind:$rs2, - (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + xop_kind:$rs2, + (mask_type V0), GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#_#suffix#_#vlmul.MX#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + xop_kind:$rs2, + (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC))>; multiclass VPatBinaryVL_VV_VX vtilist = AllIntegerVectors, @@ -870,14 +883,17 @@ (result_type result_reg_class:$merge), (mask_type V0), VLOpFrag)), - (!cast( - !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#"_"#vlmul.MX#"_MASK")) - result_reg_class:$merge, - vop_reg_class:$rs1, - scalar_reg_class:$rs2, - (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_"#vlmul.MX#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC))>; class VPatBinaryVL_VF_RM( - !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#"_"#vlmul.MX#"_MASK")) - result_reg_class:$merge, - vop_reg_class:$rs1, - scalar_reg_class:$rs2, - (mask_type V0), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_"#vlmul.MX#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, log2sew, TAIL_AGNOSTIC))>; multiclass VPatBinaryFPVL_VV_VF { @@ -950,13 +973,15 @@ (fvti.Vector fvti.RegClass:$merge), (fvti.Mask V0), VLOpFrag)), - (!cast( - !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) - fvti.RegClass:$merge, - fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, - (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") + fvti.RegClass:$merge, + fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, + (fvti.Mask V0), GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") + fvti.RegClass:$merge, + fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, + (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC))>; } } @@ -969,17 +994,23 @@ (fvti.Vector fvti.RegClass:$merge), (fvti.Mask V0), VLOpFrag)), - (!cast( - !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) - fvti.RegClass:$merge, - fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, - (fvti.Mask V0), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") + fvti.RegClass:$merge, + fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, + (fvti.Mask V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") + fvti.RegClass:$merge, + fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, + (fvti.Mask V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC))>; } } @@ -1388,7 +1419,7 @@ (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (vti_m1.Vector VR:$rs2), - GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; + GPR:$vl, (XLenVT timm:$policy))>; def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2, @@ -1398,7 +1429,7 @@ (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (vti_m1.Vector VR:$rs2), - (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; + (vti.Mask V0), GPR:$vl, (XLenVT timm:$policy))>; } } } @@ -1418,7 +1449,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; + GPR:$vl, (XLenVT timm:$policy))>; def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2, @@ -1432,7 +1463,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; + GPR:$vl, (XLenVT timm:$policy))>; } } } @@ -1491,7 +1522,7 @@ (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW, + (wti_m1.Vector VR:$rs2), GPR:$vl, (XLenVT timm:$policy))>; def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), @@ -1499,7 +1530,7 @@ (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, + (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, (XLenVT timm:$policy))>; } } @@ -1522,7 +1553,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, + GPR:$vl, (XLenVT timm:$policy))>; def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), @@ -1534,7 +1565,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, + GPR:$vl, (XLenVT timm:$policy))>; } } @@ -1553,7 +1584,7 @@ (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW, + (wti_m1.Vector VR:$rs2), GPR:$vl, (XLenVT timm:$policy))>; def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), @@ -1561,7 +1592,7 @@ (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, + (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, (XLenVT timm:$policy))>; } } @@ -1584,7 +1615,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, + GPR:$vl, (XLenVT timm:$policy))>; def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), @@ -1596,7 +1627,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, + GPR:$vl, (XLenVT timm:$policy))>; } } @@ -2410,7 +2441,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, TA_MA)>; + GPR:$vl, TA_MA)>; // 13.12. Vector Floating-Point Sign-Injection Instructions def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), @@ -2785,7 +2816,7 @@ VLOpFrag)), (!cast("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, vti.RegClass:$merge, (vti.Mask V0), @@ -2820,7 +2851,7 @@ VLOpFrag)), (!cast(inst#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; } } @@ -2855,7 +2886,7 @@ VLOpFrag)), (!cast("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, vti.RegClass:$merge, (vti.Mask V0), @@ -2891,7 +2922,7 @@ VLOpFrag)), (!cast(inst#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll @@ -312,8 +312,10 @@ define @vpmerge_vfsqrt( %passthru, %vf, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vfsqrt: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vfsqrt.v v9, v9 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma +; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret %1 = zext i32 %vl to i64 %a = call @llvm.riscv.vfsqrt.nxv2f32( undef, %vf, i64 7, i64 %1) @@ -756,8 +758,9 @@ define @vpselect_vfsqrt( %passthru, %vf, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vfsqrt: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vfsqrt.v v9, v9 +; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret %1 = zext i32 %vl to i64 %a = call @llvm.riscv.vfsqrt.nxv2f32( undef, %vf, i64 7, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -47,7 +47,6 @@ ret void } - ; Function Attrs: nounwind readnone declare i64 @llvm.riscv.vmv.x.s.nxv1i64() #1 define i64 @vmv_x_s(i8 zeroext %cond, %0, %1, i64 %2) #0 { @@ -69,7 +68,6 @@ ret i64 %d } - ; Function Attrs: nounwind declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #2 define @vsetvli_add_or_sub(i8 zeroext %cond, %0, %1, i64 %avl) #0 { @@ -133,28 +131,20 @@ ret void } - ; Function Attrs: nofree nosync nounwind readnone willreturn declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) - ; Function Attrs: nounwind readnone declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 - ; Function Attrs: nounwind readnone declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , , i64) #1 - ; Function Attrs: nounwind readonly declare @llvm.riscv.vle.nxv1i64.i64(, * nocapture, i64) #3 - ; Function Attrs: nounwind readonly declare @llvm.riscv.vle.nxv1i32.i64(, * nocapture, i64) #3 - ; Function Attrs: nounwind writeonly declare void @llvm.riscv.vse.nxv1i64.i64(, * nocapture, i64) #4 - ; Function Attrs: nounwind readnone declare @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(, , i64) #1 - ; Function Attrs: nounwind readnone declare @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(, , i64) #1 attributes #0 = { "target-features"="+v" } @@ -818,7 +808,8 @@ ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: [[PseudoVMV_S_X_M1_:%[0-9]+]]:vr = PseudoVMV_S_X_M1 [[DEF]], [[COPY5]], 1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 5 /* e32 */, 1 /* ta, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 192 /* e8, m1, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_E8_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res) ; CHECK-NEXT: PseudoRET @@ -855,7 +846,7 @@ %21:vr = IMPLICIT_DEF %20:vr = PseudoVMV_S_X_M1 %21, %19, 1, 5 %24:vr = IMPLICIT_DEF - %23:vr = PseudoVREDSUM_VS_M1_E8 %24, %16, killed %20, 4, 5, 1 + %23:vr = PseudoVREDSUM_VS_M1_E8 %24, %16, killed %20, 4, 1 PseudoVSE32_V_M1 killed %23, %8, 1, 5 :: (store (s32) into %ir.res) PseudoRET diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -319,8 +319,9 @@ ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 152 /* e64, m1, tu, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $v2, 0, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 192 /* e8, m1, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 1 /* ta, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_E8_]], 6 /* e64 */, implicit $vtype ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]] ; CHECK-NEXT: PseudoRET implicit $x10 @@ -329,7 +330,7 @@ %1:vr = PseudoVLE64_V_M1 %pt, %0, 2, 6, 0 :: (load (s128) from %ir.x) %2:vr = PseudoVMV_V_I_M1 undef $v2, 0, -1, 6, 0 %4:vr = IMPLICIT_DEF - %3:vr = PseudoVREDSUM_VS_M1_E8 %4, killed %1, killed %2, 2, 6, 1 + %3:vr = PseudoVREDSUM_VS_M1_E8 %4, killed %1, killed %2, 2, 1 %5:gpr = PseudoVMV_X_S_M1 killed %3, 6 $x10 = COPY %5 PseudoRET implicit $x10