diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -18,6 +18,8 @@ #include "llvm/ADT/APInt.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSwitch.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" #include "llvm/MC/MCInstrDesc.h" #include "llvm/Support/RISCVISAInfo.h" #include "llvm/TargetParser/SubtargetFeature.h" @@ -113,6 +115,12 @@ UsesVXRMShift = HasRoundModeOpShift + 1, UsesVXRMMask = 1 << UsesVXRMShift, + + HasImplictSEWShift = UsesVXRMShift + 1, + HasImplictSEWMask = 1 << HasImplictSEWShift, + + VSEWShift = HasImplictSEWShift + 1, + VSEWMask = 0b11 << VSEWShift, }; enum VLMUL : uint8_t { @@ -126,6 +134,13 @@ LMUL_F2 }; +enum VSEW : uint8_t { + SEW_8 = 0, + SEW_16, + SEW_32, + SEW_64, +}; + enum { TAIL_UNDISTURBED_MASK_UNDISTURBED = 0, TAIL_AGNOSTIC = 1, @@ -178,14 +193,29 @@ /// \returns true if this instruction uses vxrm static inline bool usesVXRM(uint64_t TSFlags) { return TSFlags & UsesVXRMMask; } +/// \returns true if this instruction has implict SEW value. +static inline bool hasImplictSEW(uint64_t TSFlags) { + return TSFlags & HasImplictSEWMask; +} + +/// \returns the VSEW for the instruction. +static inline VSEW getVSEW(uint64_t TSFlags) { + return static_cast((TSFlags & VSEWMask) >> VSEWShift); +} + +/// \returns true if there is a SEW value for the instruction. +static inline bool hasSEW(uint64_t TSFlags) { + return hasSEWOp(TSFlags) || hasImplictSEW(TSFlags); +} + static inline unsigned getVLOpNum(const MCInstrDesc &Desc) { const uint64_t TSFlags = Desc.TSFlags; - // This method is only called if we expect to have a VL operand, and all - // instructions with VL also have SEW. - assert(hasSEWOp(TSFlags) && hasVLOp(TSFlags)); - unsigned Offset = 2; + // This method is only called if we expect to have a VL operand. + assert(hasVLOp(TSFlags)); + // Some instructions don't have SEW operand. + unsigned Offset = 1 + hasSEWOp(TSFlags); if (hasVecPolicyOp(TSFlags)) - Offset = 3; + Offset = Offset + 1; return Desc.getNumOperands() - Offset; } @@ -198,6 +228,24 @@ return Desc.getNumOperands() - Offset; } +static inline MachineOperand getSEWOp(const MachineInstr &MI) { + uint64_t TSFlags = MI.getDesc().TSFlags; + assert(hasSEW(TSFlags) && "The instruction doesn't have SEW value!"); + if (hasSEWOp(TSFlags)) + return MI.getOperand(getSEWOpNum(MI.getDesc())); + + return MachineOperand::CreateImm(3 + getVSEW(TSFlags)); +} + +static inline unsigned getLog2SEW(const MachineInstr &MI) { + uint64_t TSFlags = MI.getDesc().TSFlags; + assert(RISCVII::hasSEW(TSFlags) && "The instruction doesn't have SEW value!"); + if (RISCVII::hasSEWOp(TSFlags)) + return MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm(); + + return 3 + RISCVII::getVSEW(TSFlags); +} + static inline unsigned getVecPolicyOpNum(const MCInstrDesc &Desc) { assert(hasVecPolicyOp(Desc.TSFlags)); return Desc.getNumOperands() - 1; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -306,8 +306,11 @@ Operands.push_back(VL); MVT XLenVT = Subtarget->getXLenVT(); - SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT); - Operands.push_back(SEWOp); + // Add SEW operand if it is indexed or mask load/store instruction. + if (Log2SEW == 0 || IndexVT) { + SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT); + Operands.push_back(SEWOp); + } // At the IR layer, all the masked load intrinsics have policy operands, // none of the others do. All have passthru operands. For our pseudos, @@ -2094,7 +2097,6 @@ selectVLOp(Node->getOperand(2), VL); unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); - SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT); // If VL=1, then we don't need to do a strided load and can just do a // regular load. @@ -2110,7 +2112,7 @@ Operands.push_back(CurDAG->getRegister(RISCV::X0, XLenVT)); uint64_t Policy = RISCVII::MASK_AGNOSTIC | RISCVII::TAIL_AGNOSTIC; SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT); - Operands.append({VL, SEW, PolicyOp, Ld->getChain()}); + Operands.append({VL, PolicyOp, Ld->getChain()}); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = RISCV::getVLEPseudo( @@ -3431,8 +3433,8 @@ // The vector policy operand may be present for masked intrinsics bool HasVecPolicyOp = RISCVII::hasVecPolicyOp(TrueTSFlags); - unsigned TrueVLIndex = - True.getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2; + unsigned TrueVLIndex = True.getNumOperands() - HasVecPolicyOp - HasChainOp - + HasGlueOp - 1 - RISCVII::hasSEWOp(TrueTSFlags); SDValue TrueVL = True.getOperand(TrueVLIndex); SDValue SEW = True.getOperand(TrueVLIndex + 1); @@ -3532,7 +3534,10 @@ if (HasRoundingMode) Ops.push_back(True->getOperand(TrueVLIndex - 1)); - Ops.append({VL, SEW, PolicyOp}); + Ops.push_back(VL); + if (RISCVII::hasSEWOp(TrueTSFlags)) + Ops.push_back(SEW); + Ops.push_back(PolicyOp); // Result node should have chain operand of True. if (HasChainOp) diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -48,10 +48,6 @@ return RISCVII::getVLOpNum(MI.getDesc()); } -static unsigned getSEWOpNum(const MachineInstr &MI) { - return RISCVII::getSEWOpNum(MI.getDesc()); -} - static bool isVectorConfigInstr(const MachineInstr &MI) { return MI.getOpcode() == RISCV::PseudoVSETVLI || MI.getOpcode() == RISCV::PseudoVSETVLIX0 || @@ -162,7 +158,7 @@ static bool isMaskRegOp(const MachineInstr &MI) { if (!RISCVII::hasSEWOp(MI.getDesc().TSFlags)) return false; - const unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm(); + const unsigned Log2SEW = RISCVII::getLog2SEW(MI); // A Log2SEW of 0 is an operation on mask registers only. return Log2SEW == 0; } @@ -343,7 +339,7 @@ Res.demandVTYPE(); // Start conservative on the unlowered form too uint64_t TSFlags = MI.getDesc().TSFlags; - if (RISCVII::hasSEWOp(TSFlags)) { + if (RISCVII::hasSEW(TSFlags)) { Res.demandVTYPE(); if (RISCVII::hasVLOp(TSFlags)) Res.demandVL(); @@ -365,7 +361,7 @@ } // Store instructions don't use the policy fields. - if (RISCVII::hasSEWOp(TSFlags) && MI.getNumExplicitDefs() == 0) { + if (RISCVII::hasSEW(TSFlags) && MI.getNumExplicitDefs() == 0) { Res.TailPolicy = false; Res.MaskPolicy = false; } @@ -788,7 +784,7 @@ RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags); - unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm(); + unsigned Log2SEW = RISCVII::getLog2SEW(MI); // A Log2SEW of 0 is an operation on mask registers only. unsigned SEW = Log2SEW ? 1 << Log2SEW : 8; assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW"); @@ -1006,7 +1002,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info, const MachineInstr &MI) const { uint64_t TSFlags = MI.getDesc().TSFlags; - if (!RISCVII::hasSEWOp(TSFlags)) + if (!RISCVII::hasSEW(TSFlags)) return; const VSETVLIInfo NewInfo = computeInfoForInstr(MI, TSFlags, MRI); @@ -1089,7 +1085,7 @@ for (const MachineInstr &MI : MBB) { transferBefore(Info, MI); - if (isVectorConfigInstr(MI) || RISCVII::hasSEWOp(MI.getDesc().TSFlags)) + if (isVectorConfigInstr(MI) || RISCVII::hasSEW(MI.getDesc().TSFlags)) HadVectorOp = true; transferAfter(Info, MI); @@ -1221,7 +1217,7 @@ } uint64_t TSFlags = MI.getDesc().TSFlags; - if (RISCVII::hasSEWOp(TSFlags)) { + if (RISCVII::hasSEW(TSFlags)) { if (PrevInfo != CurInfo) { // If this is the first implicit state change, and the state change // requested can be proven to produce the same register contents, we diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -212,6 +212,13 @@ // to the correct CSR. bit UsesVXRM = 0; let TSFlags{20} = UsesVXRM; + + bit HasImplictSEW = 0; + let TSFlags{21} = HasImplictSEW; + + // The actual SEW value is 8 * (2 ^ VSEW). + bits<2> VSEW = 0; + let TSFlags{23-22} = VSEW; } class RVInstgetDesc(); MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL - MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW + MIB.add(RISCVII::getSEWOp(*DefMBBI)); // SEW MIB.addImm(0); // tu, mu MIB.addReg(RISCV::VL, RegState::Implicit); MIB.addReg(RISCV::VTYPE, RegState::Implicit); @@ -498,7 +498,7 @@ if (UseVMV_V_V) { const MCInstrDesc &Desc = DefMBBI->getDesc(); MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL - MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW + MIB.add(RISCVII::getSEWOp(*DefMBBI)); // SEW MIB.addImm(0); // tu, mu MIB.addReg(RISCV::VL, RegState::Implicit); MIB.addReg(RISCV::VTYPE, RegState::Implicit); @@ -1841,10 +1841,6 @@ return false; } } - if (!RISCVII::hasSEWOp(TSFlags)) { - ErrInfo = "VL operand w/o SEW operand?"; - return false; - } } if (RISCVII::hasSEWOp(TSFlags)) { unsigned OpIdx = RISCVII::getSEWOpNum(Desc); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -760,15 +760,20 @@ class VPseudoUSLoadNoMask : Pseudo<(outs RetClass:$rd), - (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew, - ixlenimm:$policy), []>, + !if(!eq(EEW, 1), + (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo, RISCVVLE { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + defvar hasSEWOp = !eq(EEW, 1); + let HasSEWOp = hasSEWOp; + // For mask load, EEW = 1. + let HasImplictSEW = !not(hasSEWOp); + let VSEW = !if(hasSEWOp, 0, !logtwo(!div(EEW, 8))); let HasVecPolicyOp = 1; let Constraints = "$rd = $dest"; } @@ -778,7 +783,7 @@ Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLE { let mayLoad = 1; @@ -786,7 +791,8 @@ let hasSideEffects = 0; let Constraints = "$rd = $merge"; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; } @@ -794,15 +800,15 @@ class VPseudoUSLoadFFNoMask : Pseudo<(outs RetClass:$rd, GPR:$vl), - (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl, - ixlenimm:$sew, ixlenimm:$policy), []>, + (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLE { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let Constraints = "$rd = $dest"; } @@ -812,7 +818,7 @@ Pseudo<(outs GetVRegNoV0.R:$rd, GPR:$vl), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, - VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>, + VMaskOp:$vm, AVL:$avl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLE { let mayLoad = 1; @@ -820,7 +826,8 @@ let hasSideEffects = 0; let Constraints = "$rd = $merge"; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; } @@ -828,15 +835,15 @@ class VPseudoSLoadNoMask : Pseudo<(outs RetClass:$rd), - (ins RetClass:$dest, GPRMem:$rs1, GPR:$rs2, AVL:$vl, - ixlenimm:$sew, ixlenimm:$policy), []>, + (ins RetClass:$dest, GPRMem:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLE { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let Constraints = "$rd = $dest"; } @@ -846,7 +853,7 @@ Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, GPR:$rs2, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLE { let mayLoad = 1; @@ -854,7 +861,8 @@ let hasSideEffects = 0; let Constraints = "$rd = $merge"; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; } @@ -904,56 +912,62 @@ class VPseudoUSStoreNoMask : Pseudo<(outs), - (ins StClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew), []>, + !if(!eq(EEW, 1), + (ins StClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew), + (ins StClass:$rd, GPRMem:$rs1, AVL:$vl)), []>, RISCVVPseudo, RISCVVSE { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + // For mask store, EEW = 1. + defvar hasSEWOp = !eq(EEW, 1); + let HasSEWOp = hasSEWOp; + let HasImplictSEW = !not(hasSEWOp); + let VSEW = !if(hasSEWOp, 0, !logtwo(!div(EEW, 8))); } class VPseudoUSStoreMask : Pseudo<(outs), - (ins StClass:$rd, GPRMem:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, + (ins StClass:$rd, GPRMem:$rs1, VMaskOp:$vm, AVL:$vl), []>, RISCVVPseudo, RISCVVSE { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoSStoreNoMask : Pseudo<(outs), - (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, - AVL:$vl, ixlenimm:$sew), []>, + (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, AVL:$vl), []>, RISCVVPseudo, RISCVVSE { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoSStoreMask : Pseudo<(outs), - (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, + (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm, AVL:$vl), []>, RISCVVPseudo, RISCVVSE { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoNullaryNoMask : @@ -1018,20 +1032,27 @@ class VPseudoUnaryNoMaskRoundingMode : + string Constraint = "", + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let HasRoundModeOp = 1; let UsesVXRM = 0; + let HasImplictSEW = !not(HasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } class VPseudoUnaryMask : + string Constraint = "", + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, OpClass:$rs2, - VMaskOp:$vm, ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, OpClass:$rs2, + VMaskOp:$vm, ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, OpClass:$rs2, + VMaskOp:$vm, ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; let HasRoundModeOp = 1; let UsesVXRM = 0; + let HasImplictSEW = !not(HasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } class VPseudoUnaryMask_NoExcept : Pseudo<(outs RetClass:$rd), (ins RetClass:$merge, Op1Class:$rs2, - VR:$vm, AVL:$vl, ixlenimm:$sew), []>, + VR:$vm, AVL:$vl), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = "@earlyclobber $rd, $rd = $merge"; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } class VPseudoBinaryNoMask : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, - ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, + ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, + ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; + let HasImplictSEW = !not(HasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } class VPseudoBinaryNoMaskRoundingMode : + int UsesVXRM_ = 1, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let HasRoundModeOp = 1; let UsesVXRM = UsesVXRM_; + let HasImplictSEW = !not(HasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } class VPseudoBinaryMaskPolicyRoundingMode : + int UsesVXRM_, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, ixlenimm:$rm, AVL:$vl, - ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, ixlenimm:$rm, AVL:$vl, + ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, ixlenimm:$rm, AVL:$vl, + ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; let HasRoundModeOp = 1; let UsesVXRM = UsesVXRM_; + let HasImplictSEW = !not(HasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } // Special version of VPseudoBinaryNoMask where we pretend the first source is @@ -1325,60 +1379,86 @@ class VPseudoBinaryMaskPolicy : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; + let HasImplictSEW = !not(HasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } class VPseudoTernaryMaskPolicy : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; + let HasImplictSEW = !not(HasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } class VPseudoTernaryMaskPolicyRoundingMode : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, - ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, + ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, + ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let HasRoundModeOp = 1; let UsesVXRM = 0; + let HasImplictSEW = !not(HasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } // Like VPseudoBinaryNoMask, but output can be V0. @@ -1525,10 +1605,14 @@ class VPseudoTernaryNoMaskWithPolicy : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1536,16 +1620,23 @@ let Constraints = !interleave([Constraint, "$rd = $rs3"], ","); let HasVecPolicyOp = 1; let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; + let HasImplictSEW = !not(HasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } class VPseudoTernaryNoMaskWithPolicyRoundingMode : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, - ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + ixlenimm:$rm, AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1553,24 +1644,27 @@ let Constraints = !interleave([Constraint, "$rd = $rs3"], ","); let HasVecPolicyOp = 1; let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasRoundModeOp = 1; let UsesVXRM = 0; + let HasImplictSEW = !not(HasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } class VPseudoUSSegLoadNoMask NF> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, - ixlenimm:$sew, ixlenimm:$policy), []>, + (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLSEG { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let Constraints = "$rd = $dest"; } @@ -1580,7 +1674,7 @@ bits<4> NF> : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLSEG { let mayLoad = 1; @@ -1588,7 +1682,8 @@ let hasSideEffects = 0; let Constraints = "$rd = $merge"; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; } @@ -1597,15 +1692,15 @@ int EEW, bits<4> NF> : Pseudo<(outs RetClass:$rd, GPR:$vl), - (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl, - ixlenimm:$sew, ixlenimm:$policy), []>, + (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLSEG { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let Constraints = "$rd = $dest"; } @@ -1615,7 +1710,7 @@ bits<4> NF> : Pseudo<(outs GetVRegNoV0.R:$rd, GPR:$vl), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, - VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>, + VMaskOp:$vm, AVL:$avl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLSEG { let mayLoad = 1; @@ -1623,7 +1718,8 @@ let hasSideEffects = 0; let Constraints = "$rd = $merge"; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; } @@ -1632,15 +1728,15 @@ int EEW, bits<4> NF> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, GPRMem:$rs1, GPR:$offset, AVL:$vl, - ixlenimm:$sew, ixlenimm:$policy), []>, + (ins RetClass:$merge, GPRMem:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLSEG { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let Constraints = "$rd = $merge"; } @@ -1650,8 +1746,7 @@ bits<4> NF> : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, - GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, - ixlenimm:$policy), []>, + GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLSEG { let mayLoad = 1; @@ -1659,7 +1754,8 @@ let hasSideEffects = 0; let Constraints = "$rd = $merge"; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; } @@ -1714,59 +1810,60 @@ int EEW, bits<4> NF> : Pseudo<(outs), - (ins ValClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew), []>, + (ins ValClass:$rd, GPRMem:$rs1, AVL:$vl), []>, RISCVVPseudo, RISCVVSSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoUSSegStoreMask NF> : Pseudo<(outs), - (ins ValClass:$rd, GPRMem:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, + (ins ValClass:$rd, GPRMem:$rs1, VMaskOp:$vm, AVL:$vl), []>, RISCVVPseudo, RISCVVSSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoSSegStoreNoMask NF> : Pseudo<(outs), - (ins ValClass:$rd, GPRMem:$rs1, GPR:$offset, - AVL:$vl, ixlenimm:$sew), []>, + (ins ValClass:$rd, GPRMem:$rs1, GPR:$offset, AVL:$vl), []>, RISCVVPseudo, RISCVVSSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoSSegStoreMask NF> : Pseudo<(outs), - (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, + (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset, VMaskOp:$vm, AVL:$vl), []>, RISCVVPseudo, RISCVVSSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoISegStoreNoMask { let VLMul = MInfo.value, SEW=sew in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); + defvar hasSEWOp = !eq(sew, 0); def suffix : VPseudoBinaryNoMaskTU; + Constraint, hasSEWOp=hasSEWOp>; def suffix # "_MASK" : VPseudoBinaryMaskPolicy, + Constraint, hasSEWOp=hasSEWOp>, RISCVMaskedPseudo; } } @@ -2086,8 +2184,9 @@ int sew = 0> { let VLMul = MInfo.value, SEW=sew in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); + defvar hasSEWOp = !eq(sew, 0); def suffix : VPseudoBinaryNoMaskTU; + Constraint, hasSEWOp=hasSEWOp>; } } @@ -2100,13 +2199,15 @@ int UsesVXRM = 1> { let VLMul = MInfo.value, SEW=sew in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); + defvar hasSEWOp = !eq(sew, 0); def suffix : VPseudoBinaryNoMaskRoundingMode; + Constraint, UsesVXRM, hasSEWOp>; def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode, + UsesVXRM, + hasSEWOp>, RISCVMaskedPseudo; } } @@ -2136,10 +2237,11 @@ int sew = 0> { let VLMul = lmul.value, SEW=sew in { defvar suffix = !if(sew, "_" # lmul.MX # "_E" # sew, "_" # lmul.MX); + defvar hasSEWOp = !eq(sew, 0); def suffix # "_" # emul.MX : VPseudoBinaryNoMaskTU; + Constraint, hasSEWOp>; def suffix # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy, + Constraint, hasSEWOp>, RISCVMaskedPseudo; } } @@ -2494,11 +2596,11 @@ foreach e = sews in { defvar suffix = "_" # mx # "_E" # e; let SEW = e in { - def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode, + def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode, SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e, forceMergeOpRead=true>; def "_V" #suffix # "_MASK" - : VPseudoUnaryMaskRoundingMode, + : VPseudoUnaryMaskRoundingMode, RISCVMaskedPseudo, SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e, forceMergeOpRead=true>; @@ -3145,8 +3247,12 @@ let VLMul = MInfo.value, SEW=sew in { defvar mx = MInfo.MX; let isCommutable = Commutable in - def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicy; - def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicy; + def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicy; + def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicy; } } @@ -3162,10 +3268,12 @@ let isCommutable = Commutable in def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicyRoundingMode; + Op2Class, Constraint, + hasSEWOp=0>; def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicyRoundingMode; + Op2Class, Constraint, + hasSEWOp=0>; } } @@ -3900,13 +4008,15 @@ (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), VLOpFrag)), - (!cast( - !if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - inst#"_"#kind#"_"#vlmul.MX)) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - GPR:$vl, log2sew, TU_MU)>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + GPR:$vl, TU_MU), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + GPR:$vl, log2sew, TU_MU))>; class VPatUnaryNoMaskRoundingMode( - !if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - inst#"_"#kind#"_"#vlmul.MX)) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - (XLenVT timm:$round), - GPR:$vl, log2sew, TU_MU)>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (XLenVT timm:$round), + GPR:$vl, TU_MU), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (XLenVT timm:$round), + GPR:$vl, log2sew, TU_MU))>; class VPatUnaryMask( - !if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - inst#"_"#kind#"_"#vlmul.MX#"_MASK")) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), GPR:$vl, (XLenVT timm:$policy)), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy)))>; class VPatUnaryMaskRoundingMode( - !if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - inst#"_"#kind#"_"#vlmul.MX#"_MASK")) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - (mask_type V0), - (XLenVT timm:$round), - GPR:$vl, log2sew, (XLenVT timm:$policy))>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, (XLenVT timm:$policy)), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, log2sew, (XLenVT timm:$policy)))>; class VPatMaskUnaryNoMask; + GPR:$vl)>; class VPatBinaryM : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), VLOpFrag)), - (!cast(inst) - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - GPR:$vl, sew, TU_MU)>; + !if(hasSEWOp, + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew, TU_MU), + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, TU_MU))>; class VPatBinaryNoMaskRoundingMode : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name) (result_type (undef)), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (XLenVT timm:$round), VLOpFrag)), - (!cast(inst) - (result_type (IMPLICIT_DEF)), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (XLenVT timm:$round), - GPR:$vl, sew, TA_MA)>; + !if(hasSEWOp, + (!cast(inst) + (result_type (IMPLICIT_DEF)), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, sew, TA_MA), + (!cast(inst) + (result_type (IMPLICIT_DEF)), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, TA_MA))>; class VPatBinaryNoMaskTURoundingMode : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (XLenVT timm:$round), VLOpFrag)), - (!cast(inst) - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (XLenVT timm:$round), - GPR:$vl, sew, TU_MU)>; + !if(hasSEWOp, + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, sew, TU_MU), + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, TU_MU))>; // Same as above but source operands are swapped. @@ -4160,18 +4302,25 @@ int sew, VReg result_reg_class, VReg op1_reg_class, - DAGOperand op2_kind> : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name#"_mask") (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (mask_type V0), VLOpFrag, (XLenVT timm:$policy))), - (!cast(inst#"_MASK") - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>; + !if(hasSEWOp, + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy)), + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), GPR:$vl, (XLenVT timm:$policy)))>; class VPatBinaryMaskTARoundingMode : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name#"_mask") (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), @@ -4190,13 +4340,21 @@ (mask_type V0), (XLenVT timm:$round), VLOpFrag, (XLenVT timm:$policy))), - (!cast(inst#"_MASK") - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (mask_type V0), - (XLenVT timm:$round), - GPR:$vl, sew, (XLenVT timm:$policy))>; + !if(hasSEWOp, + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, sew, (XLenVT timm:$policy)), + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, (XLenVT timm:$policy)))>; // Same as above but source operands are swapped. class VPatBinaryMaskSwapped; + GPR:$vl, TAIL_AGNOSTIC)>; class VPatTernaryNoMaskTARoundingMode; + GPR:$vl, TAIL_AGNOSTIC)>; class VPatTernaryNoMaskWithPolicy; + GPR:$vl, TAIL_AGNOSTIC)>; class VPatTernaryMaskTARoundingMode; + GPR:$vl, TAIL_AGNOSTIC)>; multiclass VPatUnaryS_M { @@ -4717,12 +4875,13 @@ int sew, VReg result_reg_class, VReg op1_reg_class, - DAGOperand op2_kind> { + DAGOperand op2_kind, + bit hasSEWOp = 1> { def : VPatBinaryNoMaskTU; + sew, result_reg_class, op1_reg_class, op2_kind, hasSEWOp>; def : VPatBinaryMaskTA; + op2_kind, hasSEWOp>; } multiclass VPatBinaryRoundingMode { + DAGOperand op2_kind, + bit hasSEWOp = 1> { def : VPatBinaryNoMaskRoundingMode; + sew, op1_reg_class, op2_kind, hasSEWOp>; def : VPatBinaryNoMaskTURoundingMode; + sew, result_reg_class, op1_reg_class, op2_kind, hasSEWOp>; def : VPatBinaryMaskTARoundingMode; + op2_kind, hasSEWOp>; } multiclass VPatBinarySwapped; + vti.RegClass, vti.RegClass, + hasSEWOp=!not(isSEWAware)>; } multiclass VPatBinaryV_VV_RM; + vti.RegClass, vti.RegClass, + hasSEWOp=!not(isSEWAware)>; } multiclass VPatBinaryV_VV_INT; + vti.RegClass, vti.RegClass, + hasSEWOp=0>; } } @@ -4915,7 +5078,8 @@ defm : VPatBinary; + vti.RegClass, ivti.RegClass, + hasSEWOp=0>; } } } @@ -4931,7 +5095,8 @@ instruction#"_"#kind#"_"#vti.LMul.MX), vti.Vector, vti.Vector, vti.Scalar, vti.Mask, vti.Log2SEW, vti.RegClass, - vti.RegClass, vti.ScalarRegClass>; + vti.RegClass, vti.ScalarRegClass, + hasSEWOp=!not(isSEWAware)>; } } @@ -4946,7 +5111,8 @@ instruction#"_"#kind#"_"#vti.LMul.MX), vti.Vector, vti.Vector, vti.Scalar, vti.Mask, vti.Log2SEW, vti.RegClass, - vti.RegClass, vti.ScalarRegClass>; + vti.RegClass, vti.ScalarRegClass, + hasSEWOp=!not(isSEWAware)>; } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -34,11 +34,10 @@ defvar store_instr = !cast("PseudoVSE"#sew#"_V_"#vlmul.MX); // Load def : Pat<(type (load GPR:$rs1)), - (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl, - log2sew, TA_MA)>; + (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl, TA_MA)>; // Store def : Pat<(store type:$rs2, GPR:$rs1), - (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>; + (store_instr reg_class:$rs2, GPR:$rs1, avl)>; } multiclass VPatUSLoadStoreWholeVRSDNode( - !if(isSEWAware, - instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_VV_"# vlmul.MX)) + !if(isSEWAware, + (!cast(instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew)) (result_type (IMPLICIT_DEF)), op_reg_class:$rs1, op_reg_class:$rs2, - avl, log2sew, TA_MA)>; + avl, TA_MA), + (!cast(instruction_name#"_VV_"# vlmul.MX) + (result_type (IMPLICIT_DEF)), + op_reg_class:$rs1, + op_reg_class:$rs2, + avl, log2sew, TA_MA))>; class VPatBinarySDNode_VV_RM( - !if(isSEWAware, - instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_VV_"# vlmul.MX)) - (result_type (IMPLICIT_DEF)), - op_reg_class:$rs1, - op_reg_class:$rs2, - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - avl, log2sew, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type (IMPLICIT_DEF)), + op_reg_class:$rs1, + op_reg_class:$rs2, + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + avl, TA_MA), + (!cast(instruction_name#"_VV_"# vlmul.MX) + (result_type (IMPLICIT_DEF)), + op_reg_class:$rs1, + op_reg_class:$rs2, + FRM_DYN, + avl, log2sew, TA_MA))>; class VPatBinarySDNode_XI( - !if(isSEWAware, - instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#_#suffix#_# vlmul.MX)) - (result_type (IMPLICIT_DEF)), - vop_reg_class:$rs1, - xop_kind:$rs2, - avl, log2sew, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + xop_kind:$rs2, + avl, TA_MA), + (!cast(instruction_name#_#suffix#_# vlmul.MX) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + xop_kind:$rs2, + avl, log2sew, TA_MA))>; multiclass VPatBinarySDNode_VV_VX vtilist = AllIntegerVectors, @@ -182,14 +191,17 @@ bit isSEWAware = 0> : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (SplatFPOp xop_kind:$rs2)))), - (!cast( - !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_"#vlmul.MX)) - (result_type (IMPLICIT_DEF)), - vop_reg_class:$rs1, - (xop_type xop_kind:$rs2), - avl, log2sew, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + avl, TA_MA), + (!cast(instruction_name#"_"#vlmul.MX) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + avl, log2sew, TA_MA))>; class VPatBinarySDNode_VF_RM : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (SplatFPOp xop_kind:$rs2)))), - (!cast( - !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_"#vlmul.MX)) - (result_type (IMPLICIT_DEF)), - vop_reg_class:$rs1, - (xop_type xop_kind:$rs2), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - avl, log2sew, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + avl, TA_MA), + (!cast(instruction_name#"_"#vlmul.MX) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + FRM_DYN, + avl, log2sew, TA_MA))>; multiclass VPatBinaryFPSDNode_VV_VF { @@ -252,14 +268,17 @@ let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), (fvti.Vector fvti.RegClass:$rs1))), - (!cast( - !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) - (fvti.Vector (IMPLICIT_DEF)), - fvti.RegClass:$rs1, - (fvti.Scalar fvti.ScalarRegClass:$rs2), - fvti.AVL, fvti.Log2SEW, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs1, + (fvti.Scalar fvti.ScalarRegClass:$rs2), + fvti.AVL, TA_MA), + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs1, + (fvti.Scalar fvti.ScalarRegClass:$rs2), + fvti.AVL, fvti.Log2SEW, TA_MA))>; } multiclass VPatBinaryFPSDNode_R_VF_RM.Predicates in def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), (fvti.Vector fvti.RegClass:$rs1))), - (!cast( - !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) - (fvti.Vector (IMPLICIT_DEF)), - fvti.RegClass:$rs1, - (fvti.Scalar fvti.ScalarRegClass:$rs2), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs1, + (fvti.Scalar fvti.ScalarRegClass:$rs2), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + fvti.AVL, TA_MA), + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs1, + (fvti.Scalar fvti.ScalarRegClass:$rs2), + FRM_DYN, + fvti.AVL, fvti.Log2SEW, TA_MA))>; } multiclass VPatIntegerSetCCSDNode_VV("PseudoVREM_VV_"#vti.LMul.MX#"_E"#!shl(1, vti.Log2SEW)) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; + vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, TA_MA)>; } } @@ -1300,7 +1323,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - vti.AVL, vti.Log2SEW, TA_MA)>; + vti.AVL, TA_MA)>; // 13.12. Vector Floating-Point Sign-Injection Instructions def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -615,14 +615,17 @@ (result_type result_reg_class:$merge), (mask_type V0), VLOpFrag)), - (!cast( - !if(isSEWAware, - instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) - result_reg_class:$merge, - op1_reg_class:$rs1, - op2_reg_class:$rs2, - (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK") + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC))>; class VPatBinaryVL_V_RM( - !if(isSEWAware, - instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) - result_reg_class:$merge, - op1_reg_class:$rs1, - op2_reg_class:$rs2, - (mask_type V0), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK") + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, log2sew, TAIL_AGNOSTIC))>; multiclass VPatTiedBinaryNoMaskVL_V( - !if(isSEWAware, - instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#_#suffix#_#vlmul.MX#"_MASK")) - result_reg_class:$merge, - vop_reg_class:$rs1, - xop_kind:$rs2, - (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + xop_kind:$rs2, + (mask_type V0), GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#_#suffix#_#vlmul.MX#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + xop_kind:$rs2, + (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC))>; multiclass VPatBinaryVL_VV_VX vtilist = AllIntegerVectors, @@ -870,14 +883,17 @@ (result_type result_reg_class:$merge), (mask_type V0), VLOpFrag)), - (!cast( - !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#"_"#vlmul.MX#"_MASK")) - result_reg_class:$merge, - vop_reg_class:$rs1, - scalar_reg_class:$rs2, - (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_"#vlmul.MX#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC))>; class VPatBinaryVL_VF_RM( - !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#"_"#vlmul.MX#"_MASK")) - result_reg_class:$merge, - vop_reg_class:$rs1, - scalar_reg_class:$rs2, - (mask_type V0), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_"#vlmul.MX#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, log2sew, TAIL_AGNOSTIC))>; multiclass VPatBinaryFPVL_VV_VF { @@ -950,13 +973,15 @@ (fvti.Vector fvti.RegClass:$merge), (fvti.Mask V0), VLOpFrag)), - (!cast( - !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) - fvti.RegClass:$merge, - fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, - (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") + fvti.RegClass:$merge, + fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, + (fvti.Mask V0), GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") + fvti.RegClass:$merge, + fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, + (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC))>; } } @@ -969,17 +994,23 @@ (fvti.Vector fvti.RegClass:$merge), (fvti.Mask V0), VLOpFrag)), - (!cast( - !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) - fvti.RegClass:$merge, - fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, - (fvti.Mask V0), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") + fvti.RegClass:$merge, + fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, + (fvti.Mask V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") + fvti.RegClass:$merge, + fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, + (fvti.Mask V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC))>; } } @@ -1388,7 +1419,7 @@ (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (vti_m1.Vector VR:$rs2), - GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; + GPR:$vl, (XLenVT timm:$policy))>; def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2, @@ -1398,7 +1429,7 @@ (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (vti_m1.Vector VR:$rs2), - (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; + (vti.Mask V0), GPR:$vl, (XLenVT timm:$policy))>; } } } @@ -1418,7 +1449,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; + GPR:$vl, (XLenVT timm:$policy))>; def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2, @@ -1432,7 +1463,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; + GPR:$vl, (XLenVT timm:$policy))>; } } } @@ -1491,7 +1522,7 @@ (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW, + (wti_m1.Vector VR:$rs2), GPR:$vl, (XLenVT timm:$policy))>; def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), @@ -1499,7 +1530,7 @@ (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, + (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, (XLenVT timm:$policy))>; } } @@ -1522,7 +1553,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, + GPR:$vl, (XLenVT timm:$policy))>; def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), @@ -1534,7 +1565,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, + GPR:$vl, (XLenVT timm:$policy))>; } } @@ -1553,7 +1584,7 @@ (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW, + (wti_m1.Vector VR:$rs2), GPR:$vl, (XLenVT timm:$policy))>; def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), @@ -1561,7 +1592,7 @@ (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, + (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, (XLenVT timm:$policy))>; } } @@ -1584,7 +1615,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, + GPR:$vl, (XLenVT timm:$policy))>; def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), @@ -1596,7 +1627,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, + GPR:$vl, (XLenVT timm:$policy))>; } } @@ -2410,7 +2441,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - GPR:$vl, vti.Log2SEW, TA_MA)>; + GPR:$vl, TA_MA)>; // 13.12. Vector Floating-Point Sign-Injection Instructions def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), @@ -2785,7 +2816,7 @@ VLOpFrag)), (!cast("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, vti.RegClass:$merge, (vti.Mask V0), @@ -2820,7 +2851,7 @@ VLOpFrag)), (!cast(inst#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; } } @@ -2855,7 +2886,7 @@ VLOpFrag)), (!cast("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, vti.RegClass:$merge, (vti.Mask V0), @@ -2891,7 +2922,7 @@ VLOpFrag)), (!cast(inst#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir --- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir +++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir @@ -41,7 +41,7 @@ ; CHECK-NEXT: $x12 = frame-setup SLLI killed $x12, 1 ; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x12 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: renamable $v8 = PseudoVLE64_V_M1 undef renamable $v8, killed renamable $x10, $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) + ; CHECK-NEXT: renamable $v8 = PseudoVLE64_V_M1 undef renamable $v8, killed renamable $x10, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) ; CHECK-NEXT: $x10 = PseudoReadVLENB ; CHECK-NEXT: $x10 = SLLI killed $x10, 1 ; CHECK-NEXT: $x10 = SUB $x8, killed $x10 @@ -58,7 +58,7 @@ %1:gprnox0 = COPY $x11 %0:gpr = COPY $x10 %pt:vr = IMPLICIT_DEF - %2:vr = PseudoVLE64_V_M1 %pt, %0, %1, 6, 0 :: (load unknown-size from %ir.pa, align 8) + %2:vr = PseudoVLE64_V_M1 %pt, %0, %1, 0 :: (load unknown-size from %ir.pa, align 8) %3:gpr = ADDI %stack.2, 0 VS1R_V killed %2:vr, %3:gpr PseudoRET diff --git a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir --- a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir +++ b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir @@ -69,7 +69,7 @@ bb.4.entry: %33:vr = PHI %31, %bb.2, %25, %bb.3 - PseudoVSE64_V_M1 killed %33, %2, 1, 6 /* e64 */ + PseudoVSE64_V_M1 killed %33, %2, 1 PseudoRET ... diff --git a/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir b/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir --- a/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir +++ b/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir @@ -128,9 +128,9 @@ SD killed renamable $x13, %stack.1, 0, debug-location !8 DBG_VALUE %stack.1, $noreg, !11, !DIExpression(DW_OP_deref), debug-location !8 - PseudoVSE32_V_M1 killed renamable $v8, %stack.2, 8, 5, debug-location !DILocation(line: 5, column: 1, scope: !5) + PseudoVSE32_V_M1 killed renamable $v8, %stack.2, 8, debug-location !DILocation(line: 5, column: 1, scope: !5) DBG_VALUE %stack.2, $noreg, !12, !DIExpression(DW_OP_deref), debug-location !DILocation(line: 5, column: 1, scope: !5) - PseudoVSE32_V_M1 killed renamable $v9, %stack.3, 8, 5, debug-location !DILocation(line: 6, column: 1, scope: !5) + PseudoVSE32_V_M1 killed renamable $v9, %stack.3, 8, debug-location !DILocation(line: 6, column: 1, scope: !5) DBG_VALUE %stack.3, $noreg, !13, !DIExpression(DW_OP_deref), debug-location !DILocation(line: 6, column: 1, scope: !5) PseudoVSM_V_B64 killed renamable $v0, %stack.4, 8, 0, debug-location !DILocation(line: 2, column: 1, scope: !5) diff --git a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll --- a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll @@ -17,7 +17,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI %stack.0.a, 0 - ; CHECK-NEXT: PseudoVSE64_V_M1 [[COPY]], killed [[ADDI]], 1, 6 /* e64 */ + ; CHECK-NEXT: PseudoVSE64_V_M1 [[COPY]], killed [[ADDI]], 1 :: (store unknown-size into %ir.b, align 8) ; CHECK-NEXT: [[LD:%[0-9]+]]:gpr = LD %stack.0.a, 0 :: (dereferenceable load (s64) from %ir.a) ; CHECK-NEXT: $x10 = COPY [[LD]] ; CHECK-NEXT: PseudoRET implicit $x10 diff --git a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll --- a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll +++ b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll @@ -12,7 +12,7 @@ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK $noreg, [[COPY2]], $v0, [[COPY]], 6 /* e64 */, 1 /* ta, mu */ :: (load unknown-size from %ir.ptr, align 64) + ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK $noreg, [[COPY2]], $v0, [[COPY]], 1 /* ta, mu */ :: (load unknown-size from %ir.ptr, align 64) ; CHECK-NEXT: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]] ; CHECK-NEXT: PseudoRET implicit $v8m8 %load = call @llvm.vp.load.nxv8i64.p0(* %ptr, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir b/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir --- a/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir +++ b/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir @@ -11,20 +11,20 @@ ; CHECK: liveins: $x10 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: undef %1.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 %pt, $x10, 1, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: undef %1.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 %pt, $x10, 1, 0 /* tu, mu */ ; CHECK-NEXT: %pt2:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: %1.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 %pt2, $x10, 1, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: %1.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 %pt2, $x10, 1, 0 /* tu, mu */ ; CHECK-NEXT: %pt3:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt3, $x10, 1, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt3, $x10, 1, 0 /* tu, mu */ ; CHECK-NEXT: undef early-clobber %5.sub_vrm2_0:vrn2m2 = PseudoVRGATHER_VI_M2 undef %5.sub_vrm2_0, %1.sub_vrm2_0, 0, 1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: %5.sub_vrm2_1:vrn2m2 = COPY %1.sub_vrm2_1 ; CHECK-NEXT: PseudoVSUXSEG2EI32_V_M2_M2 %5, $x10, [[PseudoVLE32_V_M2_]], 1, 5 /* e32 */, implicit $vl, implicit $vtype %pt:vrm2 = IMPLICIT_DEF - undef %0.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 %pt, $x10, 1, 5, 0 + undef %0.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 %pt, $x10, 1, 0 %pt2:vrm2 = IMPLICIT_DEF - %0.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 %pt2, $x10, 1, 5, 0 + %0.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 %pt2, $x10, 1, 0 %pt3:vrm2 = IMPLICIT_DEF - %1:vrm2 = PseudoVLE32_V_M2 %pt3, $x10, 1, 5, 0 + %1:vrm2 = PseudoVLE32_V_M2 %pt3, $x10, 1, 0 undef early-clobber %2.sub_vrm2_0:vrn2m2 = PseudoVRGATHER_VI_M2 undef %2.sub_vrm2_0, %0.sub_vrm2_0:vrn2m2, 0, 1, 5, 0, implicit $vl, implicit $vtype %2.sub_vrm2_1:vrn2m2 = COPY %0.sub_vrm2_1:vrn2m2 PseudoVSUXSEG2EI32_V_M2_M2 %2:vrn2m2, $x10, %1:vrm2, 1, 5, implicit $vl, implicit $vtype diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll @@ -16,7 +16,7 @@ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8 ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 0 /* tu, mu */ :: (load unknown-size from %ir.p, align 8) ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store unknown-size into %ir.p, align 8) ; CHECK-NEXT: PseudoRET %splat = insertelement poison, i1 -1, i32 0 @@ -37,7 +37,7 @@ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8 ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 1 /* ta, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 1 /* ta, mu */ :: (load unknown-size from %ir.p, align 8) ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store unknown-size into %ir.p, align 8) ; CHECK-NEXT: PseudoRET %splat = insertelement poison, i1 -1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir b/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir --- a/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir +++ b/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir @@ -10,7 +10,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_0 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF @@ -20,20 +20,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_1 ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm4 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm1_0 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm4 = IMPLICIT_DEF early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5/* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -48,7 +48,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_1 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF @@ -58,20 +58,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_0 ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm4 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm1_1 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm4 = IMPLICIT_DEF early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -86,7 +86,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_2 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF @@ -96,20 +96,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_3 ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm4 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm1_2 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm4 = IMPLICIT_DEF early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -124,7 +124,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_3 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF @@ -134,20 +134,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_2 ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm4 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm1_3 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm4 = IMPLICIT_DEF early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -162,7 +162,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_0 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF @@ -170,20 +170,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_1 ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm4 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm2 = IMPLICIT_DEF - %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0 %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm2_0 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm4 = IMPLICIT_DEF early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -198,7 +198,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_1 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF @@ -206,20 +206,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_0 ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm4 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm2 = IMPLICIT_DEF - %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0 %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm2_1 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm4 = IMPLICIT_DEF early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -235,7 +235,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_0 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -247,20 +247,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_1 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_0 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -275,7 +275,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_1 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -287,20 +287,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_0 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_1 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -315,7 +315,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_2 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -327,20 +327,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_3 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_2 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -355,7 +355,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_3 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -367,20 +367,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_2 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_3 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -395,7 +395,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_4 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -407,20 +407,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_5 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_4 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -435,7 +435,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_5 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -447,20 +447,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_4 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_5 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -475,7 +475,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_6 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -487,20 +487,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_7 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_6 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -515,7 +515,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_7 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -527,20 +527,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_6 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_7 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -555,7 +555,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_0 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -565,20 +565,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_1 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm2 = IMPLICIT_DEF - %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm2_0 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -593,7 +593,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_1 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -603,20 +603,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_0 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm2 = IMPLICIT_DEF - %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm2_1 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -631,7 +631,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_2 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -641,20 +641,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_3 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm2 = IMPLICIT_DEF - %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm2_2 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -669,7 +669,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_3 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -679,20 +679,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_2 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm2 = IMPLICIT_DEF - %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm2_3 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -707,7 +707,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm4 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M4_]], %subreg.sub_vrm4_0 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -715,20 +715,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_1 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm4 = IMPLICIT_DEF - %5:vrm4 = PseudoVLE32_V_M4 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm4 = PseudoVLE32_V_M4 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm4_0 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -743,7 +743,7 @@ ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm4 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M4_]], %subreg.sub_vrm4_1 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -751,20 +751,20 @@ ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_0 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm4 = IMPLICIT_DEF - %5:vrm4 = PseudoVLE32_V_M4 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm4 = PseudoVLE32_V_M4 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm4_1 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 diff --git a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir --- a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir @@ -7,18 +7,15 @@ # set. --- | - ; ModuleID = 'test.ll' source_filename = "test.ll" target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" target triple = "riscv64" - ; Function Attrs: nounwind define @masked_load_nxv8i64(* %a, %mask) #0 { %load = call @llvm.masked.load.nxv8i64.p0nxv8i64(* %a, i32 8, %mask, undef) ret %load } - ; Function Attrs: argmemonly nofree nosync nounwind readonly willreturn declare @llvm.masked.load.nxv8i64.p0nxv8i64(*, i32 immarg, , ) #1 attributes #0 = { nounwind "target-features"="+v" } @@ -53,7 +50,7 @@ ; CHECK-NEXT: $v0 = COPY [[COPY]] ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]] - ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 /* e64 */, 1 /* ta, mu */ :: (load (s512) from %ir.a, align 8) + ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 1 /* ta, mu */ :: (load (s512) from %ir.a, align 8) ; CHECK-NEXT: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]] ; CHECK-NEXT: PseudoRET implicit $v8m8 %1:vr = COPY $v0 @@ -61,7 +58,7 @@ $v0 = COPY %1 %3:vrm8 = IMPLICIT_DEF %4:vrm8nov0 = COPY %3 - %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, -1, 6, 1 :: (load (s512) from %ir.a, align 8) + %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, -1, 1 :: (load (s512) from %ir.a, align 8) $v8m8 = COPY %2 PseudoRET implicit $v8m8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll --- a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll @@ -14,7 +14,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: @@ -31,7 +31,7 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: @@ -50,7 +50,7 @@ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8 ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_MASK1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: @@ -71,7 +71,7 @@ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: [[DEF3:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1 = REG_SEQUENCE [[DEF]], %subreg.sub_vrm1_0, [[DEF2]], %subreg.sub_vrm1_1 - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: @@ -89,7 +89,7 @@ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1 = REG_SEQUENCE [[COPY2]], %subreg.sub_vrm1_0, [[COPY2]], %subreg.sub_vrm1_1 - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: @@ -109,7 +109,7 @@ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY3]], %subreg.sub_vrm1_0, [[COPY3]], %subreg.sub_vrm1_1 ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY2]], $v0, [[COPY]], 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir --- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir @@ -8,15 +8,14 @@ body: | bb.0: liveins: $x14, $x16 - ; 82 = e32,m4 ; CHECK-LABEL: name: copy_different_lmul ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v12m2 = VMV2R_V $v28m2 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype + $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0, implicit $vl, implicit $vtype $v12m2 = COPY $v28m2 ... --- @@ -25,15 +24,14 @@ body: | bb.0: liveins: $x14, $x16 - ; 82 = e32,m4 ; CHECK-LABEL: name: copy_convert_to_vmv_v_v ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 undef $v12m4, $v28m4, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype + $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0, implicit $vl, implicit $vtype $v12m4 = COPY $v28m4 ... --- @@ -42,7 +40,6 @@ body: | bb.0: liveins: $x14 - ; 82 = e32,m4 ; CHECK-LABEL: name: copy_convert_to_vmv_v_i ; CHECK: liveins: $x14 ; CHECK-NEXT: {{ $}} @@ -59,7 +56,6 @@ body: | bb.0: liveins: $x14, $x16 - ; 82 = e32,m4 ; CHECK-LABEL: name: copy_from_whole_load_store ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} @@ -76,17 +72,16 @@ body: | bb.0: liveins: $x14, $x16 - ; 82 = e32,m4 ; CHECK-LABEL: name: copy_with_vleff ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 undef $v28m4, 0, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v4m4, $x0 = PseudoVLE32FF_V_M4 undef $v4m4, $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit-def $vl + ; CHECK-NEXT: $v4m4, $x0 = PseudoVLE32FF_V_M4 undef $v4m4, $x16, $noreg, 0 /* tu, mu */, implicit-def $vl ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVMV_V_I_M4 undef $v28m4, 0, $noreg, 5, 0, implicit $vl, implicit $vtype - $v4m4,$x0 = PseudoVLE32FF_V_M4 undef $v4m4, $x16, $noreg, 5, 0, implicit-def $vl + $v4m4,$x0 = PseudoVLE32FF_V_M4 undef $v4m4, $x16, $noreg, 0, implicit-def $vl $v12m4 = COPY $v28m4 ... --- @@ -95,24 +90,22 @@ body: | bb.0: liveins: $x14, $x16, $x17, $x18 - ; 82 = e32,m4 - ; 73 = e16,m2 ; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_1 ; CHECK: liveins: $x14, $x16, $x17, $x18 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x15 = PseudoVSETVLI $x17, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype + $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0, implicit $vl, implicit $vtype $x15 = PseudoVSETVLI $x17, 73, implicit-def $vl, implicit-def $vtype - $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 4, 0, implicit $vl, implicit $vtype + $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 0, implicit $vl, implicit $vtype $x0 = PseudoVSETVLIX0 $x0, 82, implicit-def $vl, implicit-def $vtype - $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 5, 0, implicit $vl, implicit $vtype + $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 0, implicit $vl, implicit $vtype $v12m4 = COPY $v28m4 ... --- @@ -121,24 +114,22 @@ body: | bb.0: liveins: $x14, $x16, $x17, $x18 - ; 82 = e32,m4 - ; 73 = e16,m2 ; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_2 ; CHECK: liveins: $x14, $x16, $x17, $x18 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 undef $v12m4, $v28m4, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype + $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0, implicit $vl, implicit $vtype $x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype - $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 4, 0, implicit $vl, implicit $vtype + $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 0, implicit $vl, implicit $vtype $x0 = PseudoVSETVLIX0 $x0, 82, implicit-def $vl, implicit-def $vtype - $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 5, 0, implicit $vl, implicit $vtype + $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 0, implicit $vl, implicit $vtype $v12m4 = COPY $v28m4 ... --- @@ -147,20 +138,18 @@ body: | bb.0: liveins: $x14, $x16, $x17, $x18 - ; 82 = e32,m4 - ; 73 = e16,m2 ; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_3 ; CHECK: liveins: $x14, $x16, $x17, $x18 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype + $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0, implicit $vl, implicit $vtype $x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype - $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 4, 0, implicit $vl, implicit $vtype + $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 0, implicit $vl, implicit $vtype $v12m4 = COPY $v28m4 ... --- @@ -169,18 +158,17 @@ body: | bb.0: liveins: $x16, $x17 - ; 73 = e16,m2 ; CHECK-LABEL: name: copy_subregister ; CHECK: liveins: $x16, $x17 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETIVLI 4, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v26m2 = PseudoVLE16_V_M2 undef $v26m2, killed $x16, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v8m2 = PseudoVLE16_V_M2 undef $v8m2, killed $x17, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v26m2 = PseudoVLE16_V_M2 undef $v26m2, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v8m2 = PseudoVLE16_V_M2 undef $v8m2, killed $x17, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: early-clobber $v28m4 = PseudoVWADD_VV_M2 undef $v28m4, $v26m2, $v8m2, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v12m2 = VMV2R_V $v28m2 $x15 = PseudoVSETIVLI 4, 73, implicit-def $vl, implicit-def $vtype - $v26m2 = PseudoVLE16_V_M2 undef $v26m2, killed $x16, $noreg, 4, 0, implicit $vl, implicit $vtype - $v8m2 = PseudoVLE16_V_M2 undef $v8m2, killed $x17, $noreg, 4, 0, implicit $vl, implicit $vtype + $v26m2 = PseudoVLE16_V_M2 undef $v26m2, killed $x16, $noreg, 0, implicit $vl, implicit $vtype + $v8m2 = PseudoVLE16_V_M2 undef $v8m2, killed $x17, $noreg, 0, implicit $vl, implicit $vtype $v28m4 = PseudoVWADD_VV_M2 undef $v28m4, $v26m2, $v8m2, $noreg, 4, 0, implicit $vl, implicit $vtype $v12m2 = COPY $v28m2 @@ -191,17 +179,15 @@ body: | bb.0: liveins: $x14, $x16 - ; 82 = e32,m4 - ; 74 = e16,m4 ; CHECK-LABEL: name: copy_with_different_vlmax ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 74 /* e16, m4, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype + $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0, implicit $vl, implicit $vtype $x0 = PseudoVSETVLIX0 $x0, 74, implicit-def $vl, implicit-def $vtype $v12m4 = COPY $v28m4 ... @@ -215,12 +201,12 @@ ; CHECK: liveins: $x10, $v8, $v26, $v27 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x11 = PseudoVSETIVLI 1, 64 /* e8, m1, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1_E8 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1_E8 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v26 = VMV1R_V killed $v8 ; CHECK-NEXT: $x10 = PseudoVSETVLI killed renamable $x10, 75 /* e16, m8, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v8m8 = VL8RE8_V killed $x10 $x11 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype - $v8 = PseudoVWREDSUM_VS_M1_E8 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, 1, implicit $vl, implicit $vtype + $v8 = PseudoVWREDSUM_VS_M1_E8 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 1, implicit $vl, implicit $vtype $v26 = COPY killed renamable $v8 $x10 = PseudoVSETVLI killed renamable $x10, 75, implicit-def $vl, implicit-def $vtype $v8m8 = VL8RE8_V killed $x10 @@ -231,15 +217,14 @@ body: | bb.0: liveins: $x14, $x16 - ; 80 = e32,m1 ; CHECK-LABEL: name: copy_zvlsseg_reg ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v10 = VMV1R_V $v8 $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype - $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype + $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 0, implicit $vl, implicit $vtype $v10 = COPY $v8 ... --- @@ -248,16 +233,15 @@ body: | bb.0: liveins: $x14, $x16 - ; 80 = e32,m1 ; CHECK-LABEL: name: copy_zvlsseg_reg_2 ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v10 = PseudoVMV_V_V_M1 undef $v10, $v8, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v11 = PseudoVMV_V_V_M1 undef $v11, $v9, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype - $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype + $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 0, implicit $vl, implicit $vtype $v10_v11 = COPY $v8_v9 ... --- @@ -266,15 +250,14 @@ body: | bb.0: liveins: $x14, $x16 - ; 87 = e32,mf2 ; CHECK-LABEL: name: copy_fractional_lmul ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28 = PseudoVLE32_V_MF2 undef $v28, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v28 = PseudoVLE32_V_MF2 undef $v28, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v12 = VMV1R_V $v28 $x15 = PseudoVSETVLI $x14, 87, implicit-def $vl, implicit-def $vtype - $v28 = PseudoVLE32_V_MF2 undef $v28, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype + $v28 = PseudoVLE32_V_MF2 undef $v28, killed $x16, $noreg, 0, implicit $vl, implicit $vtype $v12 = COPY $v28 ... --- @@ -283,14 +266,13 @@ body: | bb.0: liveins: $x12, $x14, $x16 - ; 80 = e32,m1 ; CHECK-LABEL: name: copy_implicit_def ; CHECK: liveins: $x12, $x14, $x16 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x0 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x0 = PseudoVSETIVLI 10, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v15 = PseudoVLE32_V_M1 undef $v15, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15 + ; CHECK-NEXT: $v15 = PseudoVLE32_V_M1 undef $v15, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15 ; CHECK-NEXT: $v24 = VMV1R_V killed $v8 ; CHECK-NEXT: $v25 = VMV1R_V killed $v9 ; CHECK-NEXT: $v26 = VMV1R_V killed $v10 @@ -300,9 +282,9 @@ ; CHECK-NEXT: $v30 = VMV1R_V killed $v14 ; CHECK-NEXT: $v31 = VMV1R_V killed $v15 $x0 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype - $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 5, 0, implicit $vl, implicit $vtype + $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 0, implicit $vl, implicit $vtype $x0 = PseudoVSETIVLI 10, 80, implicit-def $vl, implicit-def $vtype - $v15 = PseudoVLE32_V_M1 undef $v15, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15 + $v15 = PseudoVLE32_V_M1 undef $v15, killed $x16, $noreg, 0, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15 $v24_v25_v26_v27_v28_v29_v30_v31 = COPY killed $v8_v9_v10_v11_v12_v13_v14_v15 ... --- @@ -315,12 +297,12 @@ ; CHECK: liveins: $x10, $x11, $v8, $v9 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x0 = PseudoVSETVLI $x10, 201 /* e16, m2, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v10m2 = PseudoVLE16_V_M2 undef $v10m2, killed $x11, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v10m2 = PseudoVLE16_V_M2 undef $v10m2, killed $x11, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v10 = VMV1R_V $v8 ; CHECK-NEXT: $v11 = VMV1R_V $v9 ; CHECK-NEXT: $v12m2 = VMV2R_V $v10m2 $x0 = PseudoVSETVLI $x10, 201, implicit-def $vl, implicit-def $vtype - $v10m2 = PseudoVLE16_V_M2 undef $v10m2, killed $x11, $noreg, 4, 0, implicit $vl, implicit $vtype + $v10m2 = PseudoVLE16_V_M2 undef $v10m2, killed $x11, $noreg, 0, implicit $vl, implicit $vtype $v10 = COPY $v8 $v11 = COPY $v9 $v12m2 = COPY $v10m2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -3,7 +3,6 @@ # RUN: -run-pass=riscv-insert-vsetvli | FileCheck %s --- | - ; ModuleID = 'vsetvli-insert.ll' source_filename = "vsetvli-insert.ll" target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" target triple = "riscv64" @@ -47,7 +46,6 @@ ret void } - ; Function Attrs: nounwind readnone declare i64 @llvm.riscv.vmv.x.s.nxv1i64() #1 define i64 @vmv_x_s(i8 zeroext %cond, %0, %1, i64 %2) #0 { @@ -69,7 +67,6 @@ ret i64 %d } - ; Function Attrs: nounwind declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #2 define @vsetvli_add_or_sub(i8 zeroext %cond, %0, %1, i64 %avl) #0 { @@ -133,28 +130,20 @@ ret void } - ; Function Attrs: nofree nosync nounwind readnone willreturn declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) - ; Function Attrs: nounwind readnone declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 - ; Function Attrs: nounwind readnone declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , , i64) #1 - ; Function Attrs: nounwind readonly declare @llvm.riscv.vle.nxv1i64.i64(, * nocapture, i64) #3 - ; Function Attrs: nounwind readonly declare @llvm.riscv.vle.nxv1i32.i64(, * nocapture, i64) #3 - ; Function Attrs: nounwind writeonly declare void @llvm.riscv.vse.nxv1i64.i64(, * nocapture, i64) #4 - ; Function Attrs: nounwind readnone declare @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(, , i64) #1 - ; Function Attrs: nounwind readnone declare @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(, , i64) #1 attributes #0 = { "target-features"="+v" } @@ -198,7 +187,7 @@ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: BEQ [[COPY3]], [[COPY4]], %bb.2 ; CHECK-NEXT: PseudoBR %bb.1 @@ -229,7 +218,7 @@ %5:gpr = COPY $x11 %4:gpr = COPY $x10 %pt:vr = IMPLICIT_DEF - %0:vr = PseudoVLE64_V_M1 %pt, %5, %7, 6, 0 + %0:vr = PseudoVLE64_V_M1 %pt, %5, %7, 0 %8:gpr = COPY $x0 BEQ %4, %8, %bb.2 PseudoBR %bb.1 @@ -283,7 +272,7 @@ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 %pt, [[COPY2]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 %pt, [[COPY2]], $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: BEQ [[COPY3]], [[COPY4]], %bb.2 ; CHECK-NEXT: PseudoBR %bb.1 @@ -305,7 +294,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3.if.end: ; CHECK-NEXT: [[PHI:%[0-9]+]]:vr = PHI %1, %bb.1, %2, %bb.2 - ; CHECK-NEXT: PseudoVSE64_V_M1 [[PHI]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE64_V_M1 [[PHI]], [[COPY1]], $noreg, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoRET bb.0.entry: successors: %bb.2(0x30000000), %bb.1(0x50000000) @@ -316,7 +305,7 @@ %5:gpr = COPY $x11 %4:gpr = COPY $x10 %pt:vr = IMPLICIT_DEF - %0:vr = PseudoVLE32_V_MF2 %pt, %5, %7, 5, 0 + %0:vr = PseudoVLE32_V_MF2 %pt, %5, %7, 0 %8:gpr = COPY $x0 BEQ %4, %8, %bb.2 PseudoBR %bb.1 @@ -332,7 +321,7 @@ bb.3.if.end: %3:vr = PHI %1, %bb.1, %2, %bb.2 - PseudoVSE64_V_M1 %3, %6, %7, 6 + PseudoVSE64_V_M1 %3, %6, %7 PseudoRET ... @@ -540,7 +529,7 @@ ; CHECK-NEXT: [[PseudoVMSEQ_VI_MF2_:%[0-9]+]]:vmv0 = PseudoVMSEQ_VI_MF2 killed [[PseudoVID_V_MF2_]], 0, -1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v0 = COPY [[PseudoVMSEQ_VI_MF2_]] ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 23 /* e32, mf2, tu, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl - ; CHECK-NEXT: [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], killed [[COPY]], $v0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], killed [[COPY]], $v0, -1, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0 @@ -576,7 +565,7 @@ %5:vmv0 = PseudoVMSEQ_VI_MF2 killed %3, 0, -1, 5 $v0 = COPY %5 - %6:vrnov0 = PseudoVLE32_V_MF2_MASK %4, killed %0, $v0, -1, 5, 0 + %6:vrnov0 = PseudoVLE32_V_MF2_MASK %4, killed %0, $v0, -1, 0 %7:gpr = PseudoVCPOP_M_B1 %5, -1, 0 %8:gpr = COPY $x0 BEQ killed %7, %8, %bb.3 @@ -632,7 +621,7 @@ ; CHECK-NEXT: [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 %pt2, [[PseudoVID_V_M1_]], [[PHI]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[PHI]], [[SRLI]] ; CHECK-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]] - ; CHECK-NEXT: PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1 ; CHECK-NEXT: BLTU [[ADDI]], [[COPY1]], %bb.1 ; CHECK-NEXT: PseudoBR %bb.2 @@ -657,7 +646,7 @@ %7:vr = PseudoVADD_VX_M1 %pt2, %4:vr, %6:gpr, -1, 6, 0 %8:gpr = MUL %6:gpr, %2:gpr %9:gpr = ADD %0:gpr, %8:gpr - PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1, 5 + PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1 %10:gpr = ADDI %6:gpr, 1 BLTU %10:gpr, %3:gpr, %bb.1 PseudoBR %bb.2 @@ -704,7 +693,7 @@ ; CHECK-NEXT: [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 %pt2, [[PseudoVID_V_M1_]], [[PHI]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[PHI]], [[SRLI]] ; CHECK-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]] - ; CHECK-NEXT: PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: @@ -733,7 +722,7 @@ %7:vr = PseudoVADD_VX_M1 %pt2, %4:vr, %6:gpr, -1, 6, 0 %8:gpr = MUL %6:gpr, %2:gpr %9:gpr = ADD %0:gpr, %8:gpr - PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1, 5 + PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1 %10:gpr = ADDI %6:gpr, 1 bb.3: @@ -804,7 +793,7 @@ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:gpr = PHI [[ADDIW]], %bb.0, %4, %bb.1 ; CHECK-NEXT: [[PHI2:%[0-9]+]]:vr = PHI [[COPY3]], %bb.0, %16, %bb.1 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, [[PHI]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.lsr.iv12, align 4) + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, [[PHI]], 4, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.lsr.iv12, align 4) ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt2, killed [[PseudoVLE32_V_M1_]], [[PHI2]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = nsw ADDI [[PHI1]], -4 @@ -818,9 +807,9 @@ ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: [[PseudoVMV_S_X_M1_:%[0-9]+]]:vr = PseudoVMV_S_X_M1 [[DEF]], [[COPY5]], 1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 5 /* e32 */, 1 /* ta, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E32_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E32 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_E8_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res) + ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_E32_]], [[COPY]], 1, implicit $vl, implicit $vtype :: (store (s32) into %ir.res) ; CHECK-NEXT: PseudoRET bb.0.entry: liveins: $x10, $x12 @@ -841,7 +830,7 @@ %1:gpr = PHI %9, %bb.0, %4, %bb.1 %2:vr = PHI %10, %bb.0, %16, %bb.1 %pt:vr = IMPLICIT_DEF - %14:vr = PseudoVLE32_V_M1 %pt, %0, 4, 5, 0 :: (load (s128) from %ir.lsr.iv12, align 4) + %14:vr = PseudoVLE32_V_M1 %pt, %0, 4, 0 :: (load (s128) from %ir.lsr.iv12, align 4) %pt2:vr = IMPLICIT_DEF %16:vr = PseudoVADD_VV_M1 %pt2, killed %14, %2, 4, 5, 0 %4:gpr = nsw ADDI %1, -4 @@ -855,8 +844,8 @@ %21:vr = IMPLICIT_DEF %20:vr = PseudoVMV_S_X_M1 %21, %19, 1, 5 %24:vr = IMPLICIT_DEF - %23:vr = PseudoVREDSUM_VS_M1_E8 %24, %16, killed %20, 4, 5, 1 - PseudoVSE32_V_M1 killed %23, %8, 1, 5 :: (store (s32) into %ir.res) + %23:vr = PseudoVREDSUM_VS_M1_E32 %24, %16, killed %20, 4, 1 + PseudoVSE32_V_M1 killed %23, %8, 1 :: (store (s32) into %ir.res) PseudoRET ... @@ -984,12 +973,12 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[ADD1:%[0-9]+]]:gpr = ADD %src, [[PHI]] ; CHECK-NEXT: %pt2:vrnov0 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE8_V_MF8_:%[0-9]+]]:vrnov0 = PseudoVLE8_V_MF8 %pt2, killed [[ADD1]], -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVLE8_V_MF8_:%[0-9]+]]:vrnov0 = PseudoVLE8_V_MF8 %pt2, killed [[ADD1]], -1, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: %ptb:vr = IMPLICIT_DEF ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: [[PseudoVADD_VI_MF8_:%[0-9]+]]:vrnov0 = PseudoVADD_VI_MF8 %ptb, [[PseudoVLE8_V_MF8_]], 4, -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADD2:%[0-9]+]]:gpr = ADD %dst, [[PHI]] - ; CHECK-NEXT: PseudoVSE8_V_MF8 killed [[PseudoVADD_VI_MF8_]], killed [[ADD2]], -1, 3 /* e8 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE8_V_MF8 killed [[PseudoVADD_VI_MF8_]], killed [[ADD2]], -1, implicit $vl, implicit $vtype ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3: ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.4(0x04000000) @@ -1035,11 +1024,11 @@ %66:gpr = ADD %src, %26 %pt2:vrnov0 = IMPLICIT_DEF - %67:vrnov0 = PseudoVLE8_V_MF8 %pt2, killed %66, -1, 3, 0 + %67:vrnov0 = PseudoVLE8_V_MF8 %pt2, killed %66, -1, 0 %ptb:vr = IMPLICIT_DEF %76:vrnov0 = PseudoVADD_VI_MF8 %ptb, %67, 4, -1, 3, 0 %77:gpr = ADD %dst, %26 - PseudoVSE8_V_MF8 killed %76, killed %77, -1, 3 + PseudoVSE8_V_MF8 killed %76, killed %77, -1 bb.3: successors: %bb.1(0x7c000000), %bb.4(0x04000000) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -156,7 +156,7 @@ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt2, killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] @@ -165,7 +165,7 @@ %1:vr = COPY $v8 %0:gpr = COPY $x10 %pt:vr = IMPLICIT_DEF - %3:vr = PseudoVLE64_V_M1 %pt, %0, %2, 6, 0 + %3:vr = PseudoVLE64_V_M1 %pt, %0, %2, 0 %pt2:vr = IMPLICIT_DEF %4:vr = PseudoVADD_VV_M1 %pt2, killed %3, %1, %2, 6, 0 $v8 = COPY %4 @@ -198,7 +198,7 @@ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 %pt, [[COPY1]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 %pt, [[COPY1]], $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: %dead:vr = IMPLICIT_DEF ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 %dead, killed [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8 = COPY %3 @@ -206,7 +206,7 @@ %1:gprnox0 = COPY $x11 %0:gpr = COPY $x10 %pt:vr = IMPLICIT_DEF - %2:vr = PseudoVLE32_V_MF2 %pt, %0, %1, 5, 0 + %2:vr = PseudoVLE32_V_MF2 %pt, %0, %1, 0 %dead:vr = IMPLICIT_DEF early-clobber %3:vr = PseudoVZEXT_VF2_M1 %dead, killed %2, %1, 6, 0 $v8 = COPY %3 @@ -271,21 +271,21 @@ ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x) - ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt2, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.y) + ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY1]], 2, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x) + ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt2, [[COPY]], 2, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.y) ; CHECK-NEXT: %pt3:vr = IMPLICIT_DEF ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt3, killed [[PseudoVLE64_V_M1_]], killed [[PseudoVLE64_V_M1_1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype - ; CHECK-NEXT: PseudoVSE64_V_M1 killed [[PseudoVADD_VV_M1_]], [[COPY1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (store (s128) into %ir.x) + ; CHECK-NEXT: PseudoVSE64_V_M1 killed [[PseudoVADD_VV_M1_]], [[COPY1]], 2, implicit $vl, implicit $vtype :: (store (s128) into %ir.x) ; CHECK-NEXT: PseudoRET %1:gpr = COPY $x11 %0:gpr = COPY $x10 %pt:vr = IMPLICIT_DEF %pt2:vr = IMPLICIT_DEF - %2:vr = PseudoVLE64_V_M1 %pt, %0, 2, 6, 0 :: (load (s128) from %ir.x) - %3:vr = PseudoVLE64_V_M1 %pt2, %1, 2, 6, 0 :: (load (s128) from %ir.y) + %2:vr = PseudoVLE64_V_M1 %pt, %0, 2, 0 :: (load (s128) from %ir.x) + %3:vr = PseudoVLE64_V_M1 %pt2, %1, 2, 0 :: (load (s128) from %ir.y) %pt3:vr = IMPLICIT_DEF %4:vr = PseudoVADD_VV_M1 %pt3, killed %2, killed %3, 2, 6, 0 - PseudoVSE64_V_M1 killed %4, %0, 2, 6 :: (store (s128) into %ir.x) + PseudoVSE64_V_M1 killed %4, %0, 2 :: (store (s128) into %ir.x) PseudoRET ... @@ -315,21 +315,21 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x) + ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY]], 2, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x) ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 152 /* e64, m1, tu, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $v2, 0, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype - ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_E8_]], 6 /* e64 */, implicit $vtype + ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E64_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E64 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 1 /* ta, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_E64_]], 6 /* e64 */, implicit $vtype ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]] ; CHECK-NEXT: PseudoRET implicit $x10 %0:gpr = COPY $x10 %pt:vr = IMPLICIT_DEF - %1:vr = PseudoVLE64_V_M1 %pt, %0, 2, 6, 0 :: (load (s128) from %ir.x) + %1:vr = PseudoVLE64_V_M1 %pt, %0, 2, 0 :: (load (s128) from %ir.x) %2:vr = PseudoVMV_V_I_M1 undef $v2, 0, -1, 6, 0 %4:vr = IMPLICIT_DEF - %3:vr = PseudoVREDSUM_VS_M1_E8 %4, killed %1, killed %2, 2, 6, 1 + %3:vr = PseudoVREDSUM_VS_M1_E64 %4, killed %1, killed %2, 2, 1 %5:gpr = PseudoVMV_X_S_M1 killed %3, 6 $x10 = COPY %5 PseudoRET implicit $x10 @@ -406,7 +406,7 @@ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */ ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype @@ -417,7 +417,7 @@ %1:vr = COPY $v8 %0:gpr = COPY $x10 %pt:vr = IMPLICIT_DEF - %3:vr = PseudoVLE64_V_M1 %pt, %0, %2, 6, 0 + %3:vr = PseudoVLE64_V_M1 %pt, %0, %2, 0 INLINEASM &"", 1 /* sideeffect attdialect */ %pt2:vr = IMPLICIT_DEF %4:vr = PseudoVADD_VV_M1 %pt2, killed %3, %1, %2, 6, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir --- a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir +++ b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir @@ -187,7 +187,7 @@ ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $x10 = ADDI $x2, 32 ; CHECK-NEXT: renamable $v8 = VL1RE8_V killed $x10 :: (load unknown-size from %stack.1, align 8) - ; CHECK-NEXT: PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, 3 /* e8 */, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1) + ; CHECK-NEXT: PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1) ; CHECK-NEXT: $x10 = COPY renamable $x9 ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) @fprintf, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2, implicit-def dead $x10 ; CHECK-NEXT: PseudoBR %bb.1 @@ -216,7 +216,7 @@ dead $x0 = PseudoVSETIVLI 2, 69, implicit-def $vl, implicit-def $vtype renamable $v8 = VL1RE8_V %stack.1 :: (load unknown-size from %stack.1, align 8) - PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, 3, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1) + PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1) ADJCALLSTACKDOWN 0, 0, implicit-def dead $x2, implicit $x2 $x10 = COPY renamable $x9 PseudoCALL target-flags(riscv-plt) @fprintf, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2, implicit-def dead $x10 diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir @@ -28,7 +28,7 @@ ; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x12 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 152 /* e64, m1, tu, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 undef $v0_v1_v2_v3_v4_v5_v6, renamable $x10, $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 undef $v0_v1_v2_v3_v4_v5_v6, renamable $x10, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x11 = ADDI $x2, 16 ; CHECK-NEXT: $x12 = PseudoReadVLENB ; CHECK-NEXT: VS1R_V $v0, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8) @@ -67,7 +67,7 @@ ; CHECK-NEXT: PseudoRET %0:gpr = COPY $x10 %1:gprnox0 = COPY $x11 - $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 undef $v0_v1_v2_v3_v4_v5_v6, %0, %1, 6, 0 + $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 undef $v0_v1_v2_v3_v4_v5_v6, %0, %1, 0 PseudoVSPILL7_M1 killed renamable $v0_v1_v2_v3_v4_v5_v6, %stack.0 :: (store unknown-size into %stack.0, align 8) renamable $v7_v8_v9_v10_v11_v12_v13 = PseudoVRELOAD7_M1 %stack.0 :: (load unknown-size from %stack.0, align 8) VS1R_V killed $v8, %0:gpr