diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -16,6 +16,7 @@ #include "MCTargetDesc/RISCVMCTargetDesc.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSwitch.h" +#include "llvm/CodeGen/MachineInstr.h" #include "llvm/MC/MCInstrDesc.h" #include "llvm/MC/SubtargetFeature.h" #include "llvm/Support/RISCVISAInfo.h" @@ -432,6 +433,7 @@ } // namespace RISCVVType +bool isFaultFirstLoad(const MachineInstr &MI); } // namespace llvm #endif diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp @@ -182,4 +182,9 @@ OS << ", mu"; } +bool isFaultFirstLoad(const MachineInstr &MI) { + return MI.getNumExplicitDefs() == 2 && MI.modifiesRegister(RISCV::VL) && + !MI.isInlineAsm(); +} + } // namespace llvm diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -402,8 +402,7 @@ unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain. MVT VT = Node->getSimpleValueType(0); MVT XLenVT = Subtarget->getXLenVT(); - unsigned SEW = VT.getScalarSizeInBits(); - unsigned Log2SEW = Log2_32(SEW); + unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; @@ -426,19 +425,7 @@ RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true, Log2SEW, static_cast(LMUL)); MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, - MVT::Other, MVT::Glue, Operands); - bool TailAgnostic = true; - bool MaskAgnostic = false; - if (IsMasked) { - uint64_t Policy = Node->getConstantOperandVal(Node->getNumOperands() - 1); - TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC; - MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC; - } - unsigned VType = - RISCVVType::encodeVTYPE(LMUL, SEW, TailAgnostic, MaskAgnostic); - SDValue VTypeOp = CurDAG->getTargetConstant(VType, DL, XLenVT); - SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT, - VTypeOp, /*Glue*/ SDValue(Load, 2)); + XLenVT, MVT::Other, Operands); if (auto *MemOp = dyn_cast(Node)) CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); @@ -450,8 +437,8 @@ CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg)); } - ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0)); // VL - ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain + ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); // VL + ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain CurDAG->RemoveDeadNode(Node); } @@ -1459,8 +1446,7 @@ bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask; MVT VT = Node->getSimpleValueType(0); - unsigned SEW = VT.getScalarSizeInBits(); - unsigned Log2SEW = Log2_32(SEW); + unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); unsigned CurOp = 2; // Masked intrinsic only have TU version pseduo instructions. @@ -1480,31 +1466,12 @@ const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true, Log2SEW, static_cast(LMUL)); - MachineSDNode *Load = - CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), - MVT::Other, MVT::Glue, Operands); - bool TailAgnostic = !IsTU; - bool MaskAgnostic = false; - if (IsMasked) { - uint64_t Policy = - Node->getConstantOperandVal(Node->getNumOperands() - 1); - TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC; - MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC; - } - unsigned VType = - RISCVVType::encodeVTYPE(LMUL, SEW, TailAgnostic, MaskAgnostic); - SDValue VTypeOp = CurDAG->getTargetConstant(VType, DL, XLenVT); - SDNode *ReadVL = - CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT, VTypeOp, - /*Glue*/ SDValue(Load, 2)); - + MachineSDNode *Load = CurDAG->getMachineNode( + P->Pseudo, DL, Node->getValueType(0), XLenVT, MVT::Other, Operands); if (auto *MemOp = dyn_cast(Node)) CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); - ReplaceUses(SDValue(Node, 0), SDValue(Load, 0)); - ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL - ReplaceUses(SDValue(Node, 2), SDValue(Load, 1)); // Chain - CurDAG->RemoveDeadNode(Node); + ReplaceNode(Node, Load); return; } } diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -7,7 +7,8 @@ //===----------------------------------------------------------------------===// // // This file implements a function pass that inserts VSETVLI instructions where -// needed. +// needed and expands the vl outputs of VLEFF/VLSEGFF to PseudoReadVL +// instructions. // // This pass consists of 3 phases: // @@ -497,6 +498,7 @@ void doLocalPrepass(MachineBasicBlock &MBB); void doLocalPostpass(MachineBasicBlock &MBB); void doPRE(MachineBasicBlock &MBB); + void insertReadVL(MachineBasicBlock &MBB); }; } // end anonymous namespace @@ -1409,6 +1411,20 @@ MI->eraseFromParent(); } +void RISCVInsertVSETVLI::insertReadVL(MachineBasicBlock &MBB) { + for (auto I = MBB.begin(), E = MBB.end(); I != E;) { + MachineInstr &MI = *I++; + if (isFaultFirstLoad(MI)) { + Register VLOutput = MI.getOperand(1).getReg(); + if (!MRI->use_nodbg_empty(VLOutput)) + BuildMI(MBB, I, MI.getDebugLoc(), TII->get(RISCV::PseudoReadVL), + VLOutput); + // We don't use the vl output of the VLEFF/VLSEGFF anymore. + MI.getOperand(1).setReg(RISCV::X0); + } + } +} + bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) { // Skip if the vector extension is not enabled. const RISCVSubtarget &ST = MF.getSubtarget(); @@ -1499,6 +1515,11 @@ } } + // Insert PseudoReadVL after VLEFF/VLSEGFF and replace it with the vl output + // of VLEFF/VLSEGFF. + for (MachineBasicBlock &MBB : MF) + insertReadVL(MBB); + BlockInfo.clear(); return HaveVectorOp; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -1367,14 +1367,13 @@ uint64_t TSFlags = MI.getDesc().TSFlags; - // Print the full VType operand of vsetvli/vsetivli and PseudoReadVL - // instructions, and the SEW operand of vector codegen pseudos. - if (((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI || - MI.getOpcode() == RISCV::PseudoVSETVLI || - MI.getOpcode() == RISCV::PseudoVSETIVLI || - MI.getOpcode() == RISCV::PseudoVSETVLIX0) && - OpIdx == 2) || - (MI.getOpcode() == RISCV::PseudoReadVL && OpIdx == 1)) { + // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW + // operand of vector codegen pseudos. + if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI || + MI.getOpcode() == RISCV::PseudoVSETVLI || + MI.getOpcode() == RISCV::PseudoVSETIVLI || + MI.getOpcode() == RISCV::PseudoVSETVLIX0) && + OpIdx == 2) { unsigned Imm = MI.getOperand(OpIdx).getImm(); RISCVVType::printVType(Imm, OS); } else if (RISCVII::hasSEWOp(TSFlags)) { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -643,11 +643,11 @@ let VLMul = m.value; } -class VPseudoUSLoadNoMask : +class VPseudoUSLoadNoMask : Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVVLE.val, VLMul> { + RISCVVLE.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -657,11 +657,11 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoUSLoadNoMaskTU : +class VPseudoUSLoadNoMaskTU : Pseudo<(outs RetClass:$rd), (ins RetClass:$dest, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVVLE.val, VLMul> { + RISCVVLE.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -673,13 +673,62 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoUSLoadMask : +class VPseudoUSLoadMask : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, RISCVVPseudo, - RISCVVLE.val, VLMul> { + RISCVVLE.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = "$rd = $merge"; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUSLoadFFNoMask : + Pseudo<(outs RetClass:$rd, GPR:$vl), + (ins GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLE.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = DummyMask; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUSLoadFFNoMaskTU : + Pseudo<(outs RetClass:$rd, GPR:$vl), + (ins RetClass:$dest, GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLE.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let Constraints = "$rd = $dest"; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUSLoadFFMask : + Pseudo<(outs GetVRegNoV0.R:$rd, GPR:$vl), + (ins GetVRegNoV0.R:$merge, + GPR:$rs1, + VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy),[]>, + RISCVVPseudo, + RISCVVLE.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1311,11 +1360,11 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoUSSegLoadNoMask NF, bit isFF>: +class VPseudoUSSegLoadNoMask NF>: Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVVLSEG.val, VLMul> { + RISCVVLSEG.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1325,11 +1374,11 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoUSSegLoadNoMaskTU NF, bit isFF>: +class VPseudoUSSegLoadNoMaskTU NF>: Pseudo<(outs RetClass:$rd), (ins RetClass:$dest, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVVLSEG.val, VLMul> { + RISCVVLSEG.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1341,12 +1390,60 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoUSSegLoadMask NF, bit isFF>: +class VPseudoUSSegLoadMask NF>: Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, RISCVVPseudo, - RISCVVLSEG.val, VLMul> { + RISCVVLSEG.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = "$rd = $merge"; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUSSegLoadFFNoMask NF>: + Pseudo<(outs RetClass:$rd, GPR:$vl), + (ins GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLSEG.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUSSegLoadFFNoMaskTU NF>: + Pseudo<(outs RetClass:$rd, GPR:$vl), + (ins RetClass:$dest, GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLSEG.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let Constraints = "$rd = $dest"; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUSSegLoadFFMask NF>: + Pseudo<(outs GetVRegNoV0.R:$rd, GPR:$vl), + (ins GetVRegNoV0.R:$merge, GPR:$rs1, + VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy),[]>, + RISCVVPseudo, + RISCVVLSEG.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1563,13 +1660,13 @@ defvar vreg = lmul.vrclass; let VLMul = lmul.value in { def "E" # eew # "_V_" # LInfo : - VPseudoUSLoadNoMask, + VPseudoUSLoadNoMask, VLESched; def "E" # eew # "_V_" # LInfo # "_TU": - VPseudoUSLoadNoMaskTU, + VPseudoUSLoadNoMaskTU, VLESched; def "E" # eew # "_V_" # LInfo # "_MASK" : - VPseudoUSLoadMask, + VPseudoUSLoadMask, VLESched; } } @@ -1582,14 +1679,14 @@ defvar LInfo = lmul.MX; defvar vreg = lmul.vrclass; let VLMul = lmul.value in { - def "E" # eew # "FF_V_" # LInfo : - VPseudoUSLoadNoMask, + def "E" # eew # "FF_V_" # LInfo: + VPseudoUSLoadFFNoMask, VLFSched; def "E" # eew # "FF_V_" # LInfo # "_TU": - VPseudoUSLoadNoMaskTU, + VPseudoUSLoadFFNoMaskTU, VLFSched; - def "E" # eew # "FF_V_" # LInfo # "_MASK" : - VPseudoUSLoadMask, + def "E" # eew # "FF_V_" # LInfo # "_MASK": + VPseudoUSLoadFFMask, VLFSched; } } @@ -1599,8 +1696,7 @@ multiclass VPseudoLoadMask { foreach mti = AllMasks in { let VLMul = mti.LMul.value in { - def "_V_" # mti.BX : VPseudoUSLoadNoMask; + def "_V_" # mti.BX : VPseudoUSLoadNoMask; } } } @@ -2795,20 +2891,38 @@ Sched<[WriteVFNCvtFToFV, ReadVFNCvtFToFV, ReadVMask]>; } -multiclass VPseudoUSSegLoad { +multiclass VPseudoUSSegLoad { foreach eew = EEWList in { foreach lmul = MxSet.m in { defvar LInfo = lmul.MX; let VLMul = lmul.value in { foreach nf = NFSet.L in { defvar vreg = SegRegClass.RC; - defvar FFStr = !if(isFF, "FF", ""); - def nf # "E" # eew # FFStr # "_V_" # LInfo : - VPseudoUSSegLoadNoMask; - def nf # "E" # eew # FFStr # "_V_" # LInfo # "_TU" : - VPseudoUSSegLoadNoMaskTU; - def nf # "E" # eew # FFStr # "_V_" # LInfo # "_MASK" : - VPseudoUSSegLoadMask; + def nf # "E" # eew # "_V_" # LInfo : + VPseudoUSSegLoadNoMask; + def nf # "E" # eew # "_V_" # LInfo # "_TU" : + VPseudoUSSegLoadNoMaskTU; + def nf # "E" # eew # "_V_" # LInfo # "_MASK" : + VPseudoUSSegLoadMask; + } + } + } + } +} + +multiclass VPseudoUSSegLoadFF { + foreach eew = EEWList in { + foreach lmul = MxSet.m in { + defvar LInfo = lmul.MX; + let VLMul = lmul.value in { + foreach nf = NFSet.L in { + defvar vreg = SegRegClass.RC; + def nf # "E" # eew # "FF_V_" # LInfo : + VPseudoUSSegLoadFFNoMask; + def nf # "E" # eew # "FF_V_" # LInfo # "_TU" : + VPseudoUSSegLoadFFNoMaskTU; + def nf # "E" # eew # "FF_V_" # LInfo # "_MASK" : + VPseudoUSSegLoadFFMask; } } } @@ -4300,7 +4414,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1, Uses = [VL] in -def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins ixlenimm:$vtype), []>; +def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>; let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in { def PseudoVSPILL_M1 : VPseudo; @@ -4393,7 +4507,7 @@ //===----------------------------------------------------------------------===// // 7.8. Vector Load/Store Segment Instructions //===----------------------------------------------------------------------===// -defm PseudoVLSEG : VPseudoUSSegLoad; +defm PseudoVLSEG : VPseudoUSSegLoad; defm PseudoVLSSEG : VPseudoSSegLoad; defm PseudoVLOXSEG : VPseudoISegLoad; defm PseudoVLUXSEG : VPseudoISegLoad; @@ -4403,8 +4517,9 @@ defm PseudoVSUXSEG : VPseudoISegStore; // vlsegeff.v may update VL register -let hasSideEffects = 1, Defs = [VL] in -defm PseudoVLSEG : VPseudoUSSegLoad; +let hasSideEffects = 1, Defs = [VL] in { +defm PseudoVLSEG : VPseudoUSSegLoadFF; +} //===----------------------------------------------------------------------===// // 12. Vector Integer Arithmetic Instructions diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp --- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp +++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp @@ -158,12 +158,16 @@ if (RISCVII::hasSEWOp(TSFlags)) --NumOps; + bool hasVLOutput = isFaultFirstLoad(*MI); for (unsigned OpNo = 0; OpNo != NumOps; ++OpNo) { const MachineOperand &MO = MI->getOperand(OpNo); + // Skip vl ouput. It should be the second output. + if (hasVLOutput && OpNo == 1) + continue; // Skip merge op. It should be the first operand after the result. - if (RISCVII::hasMergeOp(TSFlags) && OpNo == 1) { - assert(MI->getNumExplicitDefs() == 1); + if (RISCVII::hasMergeOp(TSFlags) && OpNo == 1U + hasVLOutput) { + assert(MI->getNumExplicitDefs() == 1U + hasVLOutput); continue; } @@ -210,16 +214,6 @@ if (lowerRISCVVMachineInstrToMCInst(MI, OutMI)) return false; - // Only need the output operand when lower PseudoReadVL from MI to MCInst. - if (MI->getOpcode() == RISCV::PseudoReadVL) { - OutMI.setOpcode(RISCV::CSRRS); - OutMI.addOperand(MCOperand::createReg(MI->getOperand(0).getReg())); - OutMI.addOperand( - MCOperand::createImm(RISCVSysReg::lookupSysRegByName("VL")->Encoding)); - OutMI.addOperand(MCOperand::createReg(RISCV::X0)); - return false; - } - OutMI.setOpcode(MI->getOpcode()); for (const MachineOperand &MO : MI->operands()) { @@ -248,6 +242,12 @@ RISCVSysReg::lookupSysRegByName("VLENB")->Encoding)); OutMI.addOperand(MCOperand::createReg(RISCV::X0)); break; + case RISCV::PseudoReadVL: + OutMI.setOpcode(RISCV::CSRRS); + OutMI.addOperand( + MCOperand::createImm(RISCVSysReg::lookupSysRegByName("VL")->Encoding)); + OutMI.addOperand(MCOperand::createReg(RISCV::X0)); + break; } return false; } diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll +++ /dev/null @@ -1,1891 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \ -; RUN: -target-abi=ilp32 | FileCheck %s -declare { , i32 } @llvm.riscv.vleff.nxv8i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv16i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv32i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv64i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv4i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv8i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv16i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv32i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv2i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv4i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv8i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv16i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv1i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv2i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv4i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv8i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(, *, , i32, i32 immarg) - -define i32 @vleffe8m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv64i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv64i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 131 /* e8, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 195 /* e8, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_:%[0-9]+]]:vr = PseudoVLE16FF_V_M1 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE16FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 139 /* e16, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 203 /* e16, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_:%[0-9]+]]:vr = PseudoVLE32FF_V_M1 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE32FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 146 /* e32, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 147 /* e32, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 210 /* e32, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 211 /* e32, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_:%[0-9]+]]:vr = PseudoVLE64FF_V_M1 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv1i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE64FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv1i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 155 /* e64, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 219 /* e64, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll +++ /dev/null @@ -1,1891 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \ -; RUN: -target-abi=ilp32 | FileCheck %s -declare { , i32 } @llvm.riscv.vleff.nxv8i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv16i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv32i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv64i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv4i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv8i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv16i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv32i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv2i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv4i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv8i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv16i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv1i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv2i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv4i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv8i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(, *, , i32, i32 immarg) - -define i32 @vleffe8m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv64i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv64i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 131 /* e8, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 195 /* e8, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_:%[0-9]+]]:vr = PseudoVLE16FF_V_M1 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE16FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 139 /* e16, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 203 /* e16, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_:%[0-9]+]]:vr = PseudoVLE32FF_V_M1 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE32FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 146 /* e32, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 147 /* e32, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 210 /* e32, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 211 /* e32, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_:%[0-9]+]]:vr = PseudoVLE64FF_V_M1 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv1i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE64FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv1i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 155 /* e64, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 219 /* e64, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll @@ -0,0 +1,114 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s + +declare { , i64 } @llvm.riscv.vleff.nxv8i8(, *, i64) +declare { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(, *, , i64, i64 immarg) + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(, , i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i64, i64) + +define i64 @test_vleff_nxv8i8( *%p, i64 %vl) { + ; CHECK-LABEL: name: test_vleff_nxv8i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i64 } @llvm.riscv.vleff.nxv8i8( undef, * %p, i64 %vl) + %1 = extractvalue { , i64 } %0, 1 + ret i64 %1 +} + +define i64 @test_vleff_nxv8i8_tu( %merge, *%p, i64 %vl) { + ; CHECK-LABEL: name: test_vleff_nxv8i8_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_TU1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_TU1]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i64 } @llvm.riscv.vleff.nxv8i8( %merge, * %p, i64 %vl) + %1 = extractvalue { , i64 } %0, 1 + ret i64 %1 +} + +define i64 @test_vleff_nxv8i8_mask( %maskedoff, *%p, %m, i64 %vl) { + ; CHECK-LABEL: name: test_vleff_nxv8i8_mask + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def dead $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_MASK1]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( %maskedoff, *%p, %m, i64 %vl, i64 0) + %1 = extractvalue { , i64 } %0, 1 + ret i64 %1 +} + +define i64 @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8( undef, undef, i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + ret i64 %1 +} + +define i64 @test_vlseg2ff_nxv8i8_tu( %val, i8* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1 = REG_SEQUENCE [[COPY2]], %subreg.sub_vrm1_0, [[COPY2]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_TU:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_TU1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_TU [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_TU1]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8( %val, %val, i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + ret i64 %1 +} + +define i64 @test_vlseg2ff_nxv8i8_mask( %val, i8* %base, %mask, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_mask + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY3]], %subreg.sub_vrm1_0, [[COPY3]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def dead $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK1]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + ret i64 %1 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll +++ /dev/null @@ -1,732 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \ -; RUN: -target-abi=ilp32d | FileCheck %s -declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(,, i16* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(,, i32* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(,, i8* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i64(,, i64* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(,, i64*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i32(,, i32* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(,, i16* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i8(,, i8* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i16(,, i16* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(,, i32* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(,, i8* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i64(,, i64* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(,, i64*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(,, i16* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i8(,, i8* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i8(,, i8* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i32(,, i32* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(,, i8* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i16(,, i16* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i64(,, i64* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(,, i64*, , i32, i32) - -define void @test_vlseg2ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i8( undef, undef, i8* %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv8i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl, i32 0) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,, i32} %2, 2 - store volatile i32 %3, i32* %outvl - %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl, i32 2) - %5 = extractvalue {,, i32} %4, 2 - store volatile i32 %5, i32* %outvl - %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl, i32 3) - %7 = extractvalue {,, i32} %6, 2 - store volatile i32 %7, i32* %outvl - ret void -} - -define void @test_vlseg2ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv16i8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E8FF_V_M2 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i8( undef, undef, i8* %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv16i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl, i32 0) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,, i32} %0, 2 - store volatile i32 %3, i32* %outvl - %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl, i32 2) - %5 = extractvalue {,, i32} %0, 2 - store volatile i32 %5, i32* %outvl - %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl, i32 3) - %7 = extractvalue {,, i32} %0, 2 - store volatile i32 %7, i32* %outvl - ret void -} - -define void @test_vlseg2ff_nxv32i8(i8* %base, i32 %vl, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv32i8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E8FF_V_M4 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv32i8( undef, undef, i8* %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv32i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv32i8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl, i32 0) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,, i32} %2, 2 - store volatile i32 %3, i32* %outvl - %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl, i32 2) - %5 = extractvalue {,, i32} %4, 2 - store volatile i32 %5, i32* %outvl - %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl, i32 3) - %7 = extractvalue {,, i32} %6, 2 - store volatile i32 %7, i32* %outvl - ret void -} - -define void @test_vlseg2ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv4i16 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E16FF_V_M1 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i16( undef, undef, i16* %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv4i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i16 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl, i32 0) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,, i32} %2, 2 - store volatile i32 %3, i32* %outvl - %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl, i32 2) - %5 = extractvalue {,, i32} %4, 2 - store volatile i32 %5, i32* %outvl - %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl, i32 3) - %7 = extractvalue {,, i32} %6, 2 - store volatile i32 %7, i32* %outvl - ret void -} - -define void @test_vlseg2ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv8i16 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E16FF_V_M2 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i16( undef, undef, i16* %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv8i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i16 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl, i32 0) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,, i32} %2, 2 - store volatile i32 %3, i32* %outvl - %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl, i32 2) - %5 = extractvalue {,, i32} %4, 2 - store volatile i32 %5, i32* %outvl - %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl, i32 3) - %7 = extractvalue {,, i32} %6, 2 - store volatile i32 %7, i32* %outvl - ret void -} - -define void @test_vlseg2ff_nxv16i16(i16* %base, i32 %vl, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv16i16 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E16FF_V_M4 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv16i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i16 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 0) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,, i32} %2, 2 - store volatile i32 %3, i32* %outvl - %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 2) - %5 = extractvalue {,, i32} %4, 2 - store volatile i32 %5, i32* %outvl - %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 3) - %7 = extractvalue {,, i32} %6, 2 - store volatile i32 %7, i32* %outvl - ret void -} - -define void @test_vlseg2ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv2i32 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E32FF_V_M1 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i32( undef, undef, i32* %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv2i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i32 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl, i32 0) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,, i32} %2, 2 - store volatile i32 %3, i32* %outvl - %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl, i32 2) - %5 = extractvalue {,, i32} %4, 2 - store volatile i32 %5, i32* %outvl - %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl, i32 3) - %7 = extractvalue {,, i32} %6, 2 - store volatile i32 %7, i32* %outvl - ret void -} - -define void @test_vlseg2ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv4i32 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E32FF_V_M2 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i32( undef, undef, i32* %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv4i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i32 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl, i32 0) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,, i32} %2, 2 - store volatile i32 %1, i32* %outvl - %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl, i32 2) - %5 = extractvalue {,, i32} %4, 2 - store volatile i32 %1, i32* %outvl - %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl, i32 3) - %7 = extractvalue {,, i32} %6, 2 - store volatile i32 %1, i32* %outvl - ret void -} - -define void @test_vlseg2ff_nxv1i64(i64* %base, i32 %vl, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv1i64 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E64FF_V_M1 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i64( undef, undef, i64* %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv1i64( %val, i64* %base, i32 %vl, %mask, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv1i64 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i32 %vl, i32 0) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,, i32} %2, 2 - store volatile i32 %3, i32* %outvl - %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i32 %vl, i32 2) - %5 = extractvalue {,, i32} %4, 2 - store volatile i32 %5, i32* %outvl - %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i32 %vl, i32 3) - %7 = extractvalue {,, i32} %6, 2 - store volatile i32 %7, i32* %outvl - ret void -} - -define void @test_vlseg2ff_nxv2i64(i64* %base, i32 %vl, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv2i64 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E64FF_V_M2 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i64( undef, undef, i64* %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv2i64( %val, i64* %base, i32 %vl, %mask, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i64 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i32 %vl, i32 0) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,, i32} %2, 2 - store volatile i32 %3, i32* %outvl - %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i32 %vl, i32 2) - %5 = extractvalue {,, i32} %4, 2 - store volatile i32 %5, i32* %outvl - %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i32 %vl, i32 3) - %7 = extractvalue {,, i32} %6, 2 - store volatile i32 %7, i32* %outvl - ret void -} - -define void @test_vlseg2ff_nxv4i64(i64* %base, i32 %vl, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv4i64 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E64FF_V_M4 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i64( undef, undef, i64* %base, i32 %vl) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv4i64( %val, i64* %base, i32 %vl, %mask, i32* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i64 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i32 %vl, i32 0) - %1 = extractvalue {,, i32} %0, 2 - store volatile i32 %1, i32* %outvl - %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i32 %vl, i32 1) - %3 = extractvalue {,, i32} %2, 2 - store volatile i32 %3, i32* %outvl - %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i32 %vl, i32 2) - %5 = extractvalue {,, i32} %4, 2 - store volatile i32 %5, i32* %outvl - %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i32 %vl, i32 3) - %7 = extractvalue {,, i32} %6, 2 - store volatile i32 %7, i32* %outvl - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll +++ /dev/null @@ -1,732 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel < %s \ -; RUN: -target-abi=lp64d | FileCheck %s -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(,, i16* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(,, i32* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(,, i8* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(,, i64* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(,, i64*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i32(,, i32* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(,, i16* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i8(,, i8* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i16(,, i16* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(,, i32* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(,, i8* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(,, i64* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(,, i64*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(,, i16* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i8(,, i8* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i8(,, i8* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i32(,, i32* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(,, i8* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i16(,, i16* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(,, i64* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(,, i64*, , i64, i64) - -define void @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8( undef, undef, i8* %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv8i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 0) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,, i64} %2, 2 - store volatile i64 %3, i64* %outvl - %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 2) - %5 = extractvalue {,, i64} %4, 2 - store volatile i64 %5, i64* %outvl - %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 3) - %7 = extractvalue {,, i64} %6, 2 - store volatile i64 %7, i64* %outvl - ret void -} - -define void @test_vlseg2ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv16i8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E8FF_V_M2 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i8( undef, undef, i8* %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv16i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl, i64 0) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,, i64} %0, 2 - store volatile i64 %3, i64* %outvl - %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl, i64 2) - %5 = extractvalue {,, i64} %0, 2 - store volatile i64 %5, i64* %outvl - %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl, i64 3) - %7 = extractvalue {,, i64} %0, 2 - store volatile i64 %7, i64* %outvl - ret void -} - -define void @test_vlseg2ff_nxv32i8(i8* %base, i64 %vl, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv32i8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E8FF_V_M4 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv32i8( undef, undef, i8* %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv32i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv32i8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl, i64 0) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,, i64} %2, 2 - store volatile i64 %3, i64* %outvl - %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl, i64 2) - %5 = extractvalue {,, i64} %4, 2 - store volatile i64 %5, i64* %outvl - %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl, i64 3) - %7 = extractvalue {,, i64} %6, 2 - store volatile i64 %7, i64* %outvl - ret void -} - -define void @test_vlseg2ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv4i16 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E16FF_V_M1 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i16( undef, undef, i16* %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv4i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i16 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl, i64 0) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,, i64} %2, 2 - store volatile i64 %3, i64* %outvl - %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl, i64 2) - %5 = extractvalue {,, i64} %4, 2 - store volatile i64 %5, i64* %outvl - %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl, i64 3) - %7 = extractvalue {,, i64} %6, 2 - store volatile i64 %7, i64* %outvl - ret void -} - -define void @test_vlseg2ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv8i16 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E16FF_V_M2 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i16( undef, undef, i16* %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv8i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i16 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl, i64 0) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,, i64} %2, 2 - store volatile i64 %3, i64* %outvl - %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl, i64 2) - %5 = extractvalue {,, i64} %4, 2 - store volatile i64 %5, i64* %outvl - %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl, i64 3) - %7 = extractvalue {,, i64} %6, 2 - store volatile i64 %7, i64* %outvl - ret void -} - -define void @test_vlseg2ff_nxv16i16(i16* %base, i64 %vl, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv16i16 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E16FF_V_M4 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv16i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i16 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 0) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,, i64} %2, 2 - store volatile i64 %3, i64* %outvl - %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 2) - %5 = extractvalue {,, i64} %4, 2 - store volatile i64 %5, i64* %outvl - %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 3) - %7 = extractvalue {,, i64} %6, 2 - store volatile i64 %7, i64* %outvl - ret void -} - -define void @test_vlseg2ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv2i32 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E32FF_V_M1 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i32( undef, undef, i32* %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv2i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i32 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl, i64 0) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,, i64} %2, 2 - store volatile i64 %3, i64* %outvl - %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl, i64 2) - %5 = extractvalue {,, i64} %4, 2 - store volatile i64 %5, i64* %outvl - %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl, i64 3) - %7 = extractvalue {,, i64} %6, 2 - store volatile i64 %7, i64* %outvl - ret void -} - -define void @test_vlseg2ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv4i32 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E32FF_V_M2 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i32( undef, undef, i32* %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv4i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i32 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl, i64 0) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,, i64} %2, 2 - store volatile i64 %1, i64* %outvl - %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl, i64 2) - %5 = extractvalue {,, i64} %4, 2 - store volatile i64 %1, i64* %outvl - %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl, i64 3) - %7 = extractvalue {,, i64} %6, 2 - store volatile i64 %1, i64* %outvl - ret void -} - -define void @test_vlseg2ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv1i64 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E64FF_V_M1 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i64( undef, undef, i64* %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv1i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv1i64 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl, i64 0) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,, i64} %2, 2 - store volatile i64 %3, i64* %outvl - %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl, i64 2) - %5 = extractvalue {,, i64} %4, 2 - store volatile i64 %5, i64* %outvl - %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl, i64 3) - %7 = extractvalue {,, i64} %6, 2 - store volatile i64 %7, i64* %outvl - ret void -} - -define void @test_vlseg2ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv2i64 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E64FF_V_M2 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i64( undef, undef, i64* %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv2i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i64 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl, i64 0) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,, i64} %2, 2 - store volatile i64 %3, i64* %outvl - %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl, i64 2) - %5 = extractvalue {,, i64} %4, 2 - store volatile i64 %5, i64* %outvl - %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl, i64 3) - %7 = extractvalue {,, i64} %6, 2 - store volatile i64 %7, i64* %outvl - ret void -} - -define void @test_vlseg2ff_nxv4i64(i64* %base, i64 %vl, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_nxv4i64 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E64FF_V_M4 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i64( undef, undef, i64* %base, i64 %vl) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - ret void -} - -define void @test_vlseg2ff_mask_nxv4i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { - ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i64 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11, $v0, $x12 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1 - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) - ; CHECK-NEXT: PseudoRET -entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl, i64 0) - %1 = extractvalue {,, i64} %0, 2 - store volatile i64 %1, i64* %outvl - %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl, i64 1) - %3 = extractvalue {,, i64} %2, 2 - store volatile i64 %3, i64* %outvl - %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl, i64 2) - %5 = extractvalue {,, i64} %4, 2 - store volatile i64 %5, i64* %outvl - %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl, i64 3) - %7 = extractvalue {,, i64} %6, 2 - store volatile i64 %7, i64* %outvl - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir --- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir @@ -82,11 +82,11 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v4m4 = PseudoVLE32FF_V_M4 $x16, $noreg, 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: $v4m4, $x0 = PseudoVLE32FF_V_M4 $x16, $noreg, 5 /* e32 */, implicit-def $vl ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype - $v4m4 = PseudoVLE32FF_V_M4 $x16, $noreg, 5, implicit-def $vl + $v4m4,$x0 = PseudoVLE32FF_V_M4 $x16, $noreg, 5, implicit-def $vl $v12m4 = COPY $v28m4 ... ---