diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -95,6 +95,10 @@ // compiler has free to select either one. UsesMaskPolicyShift = IsRVVWideningReductionShift + 1, UsesMaskPolicyMask = 1 << UsesMaskPolicyShift, + + // Does this instruction have a VL output. It will be the second output. + HasVLOutputShift = UsesMaskPolicyShift + 1, + HasVLOutputMask = 1 << HasVLOutputShift, }; // Match with the definitions in RISCVInstrFormats.td @@ -168,6 +172,11 @@ return TSFlags & UsesMaskPolicyMask; } +/// \returns true if there is a VL output for the instruction. +static inline bool hasVLOutput(uint64_t TSFlags) { + return TSFlags & HasVLOutputMask; +} + static inline unsigned getVLOpNum(const MCInstrDesc &Desc) { const uint64_t TSFlags = Desc.TSFlags; // This method is only called if we expect to have a VL operand, and all diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -191,6 +191,23 @@ uint16_t Pseudo; }; +struct VLEFFPseudo { + uint16_t Masked : 1; + uint16_t IsTU : 1; + uint16_t Log2SEW : 3; + uint16_t LMUL : 3; + uint16_t Pseudo; +}; + +struct VLSEGFFPseudo { + uint16_t NF : 4; + uint16_t Masked : 1; + uint16_t IsTU : 1; + uint16_t Log2SEW : 3; + uint16_t LMUL : 3; + uint16_t Pseudo; +}; + struct RISCVMaskedPseudoInfo { uint16_t MaskedPseudo; uint16_t UnmaskedPseudo; @@ -206,6 +223,8 @@ #define GET_RISCVVSETable_DECL #define GET_RISCVVLXTable_DECL #define GET_RISCVVSXTable_DECL +#define GET_RISCVVLEFFTable_DECL +#define GET_RISCVVLSEGFFTable_DECL #define GET_RISCVMaskedPseudosTable_DECL #include "RISCVGenSearchableTables.inc" } // namespace RISCV diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -38,6 +38,8 @@ #define GET_RISCVVLXTable_IMPL #define GET_RISCVVSXTable_IMPL #define GET_RISCVMaskedPseudosTable_IMPL +#define GET_RISCVVLEFFTable_IMPL +#define GET_RISCVVLSEGFFTable_IMPL #include "RISCVGenSearchableTables.inc" } // namespace RISCV } // namespace llvm @@ -368,8 +370,7 @@ unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain. MVT VT = Node->getSimpleValueType(0); MVT XLenVT = Subtarget->getXLenVT(); - unsigned SEW = VT.getScalarSizeInBits(); - unsigned Log2SEW = Log2_32(SEW); + unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; @@ -388,23 +389,10 @@ /*IsStridedOrIndexed*/ false, Operands, /*IsLoad=*/true); - const RISCV::VLSEGPseudo *P = - RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true, - Log2SEW, static_cast(LMUL)); + const RISCV::VLSEGFFPseudo *P = RISCV::getVLSEGFFPseudo( + NF, IsMasked, IsTU, Log2SEW, static_cast(LMUL)); MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, - MVT::Other, MVT::Glue, Operands); - bool TailAgnostic = true; - bool MaskAgnostic = false; - if (IsMasked) { - uint64_t Policy = Node->getConstantOperandVal(Node->getNumOperands() - 1); - TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC; - MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC; - } - unsigned VType = - RISCVVType::encodeVTYPE(LMUL, SEW, TailAgnostic, MaskAgnostic); - SDValue VTypeOp = CurDAG->getTargetConstant(VType, DL, XLenVT); - SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT, - VTypeOp, /*Glue*/ SDValue(Load, 2)); + XLenVT, MVT::Other, Operands); if (auto *MemOp = dyn_cast(Node)) CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); @@ -416,8 +404,8 @@ CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg)); } - ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0)); // VL - ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain + ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); // VL + ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain CurDAG->RemoveDeadNode(Node); } @@ -1368,8 +1356,7 @@ bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask; MVT VT = Node->getSimpleValueType(0); - unsigned SEW = VT.getScalarSizeInBits(); - unsigned Log2SEW = Log2_32(SEW); + unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); unsigned CurOp = 2; // Masked intrinsic only have TU version pseduo instructions. @@ -1386,34 +1373,14 @@ /*IsLoad=*/true); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - const RISCV::VLEPseudo *P = - RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true, - Log2SEW, static_cast(LMUL)); - MachineSDNode *Load = - CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), - MVT::Other, MVT::Glue, Operands); - bool TailAgnostic = !IsTU; - bool MaskAgnostic = false; - if (IsMasked) { - uint64_t Policy = - Node->getConstantOperandVal(Node->getNumOperands() - 1); - TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC; - MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC; - } - unsigned VType = - RISCVVType::encodeVTYPE(LMUL, SEW, TailAgnostic, MaskAgnostic); - SDValue VTypeOp = CurDAG->getTargetConstant(VType, DL, XLenVT); - SDNode *ReadVL = - CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT, VTypeOp, - /*Glue*/ SDValue(Load, 2)); - + const RISCV::VLEFFPseudo *P = RISCV::getVLEFFPseudo( + IsMasked, IsTU, Log2SEW, static_cast(LMUL)); + MachineSDNode *Load = CurDAG->getMachineNode( + P->Pseudo, DL, Node->getValueType(0), XLenVT, MVT::Other, Operands); if (auto *MemOp = dyn_cast(Node)) CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); - ReplaceUses(SDValue(Node, 0), SDValue(Load, 0)); - ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL - ReplaceUses(SDValue(Node, 2), SDValue(Load, 1)); // Chain - CurDAG->RemoveDeadNode(Node); + ReplaceNode(Node, Load); return; } } diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -471,6 +471,7 @@ void doLocalPrepass(MachineBasicBlock &MBB); void doLocalPostpass(MachineBasicBlock &MBB); void doPRE(MachineBasicBlock &MBB); + void expandLoadFF(MachineBasicBlock &MBB); }; } // end anonymous namespace @@ -1424,6 +1425,56 @@ MI->eraseFromParent(); } +void RISCVInsertVSETVLI::expandLoadFF(MachineBasicBlock &MBB) { + for (auto I = MBB.begin(), E = MBB.end(); I != E;) { + MachineInstr &MI = *I++; + uint64_t TSFlags = MI.getDesc().TSFlags; + if (RISCVII::hasVLOutput(TSFlags)) { + const auto *PseudoInfo = + RISCVVPseudosTable::getPseudoInfo(MI.getOpcode()); + unsigned NewOpc = PseudoInfo->BaseInstr; + const DebugLoc &DL = MI.getDebugLoc(); + MachineInstrBuilder MIB = + BuildMI(MBB, I, DL, TII->get(NewOpc), MI.getOperand(0).getReg()); + unsigned Cur = 2; + + // Merge Operand + if (RISCVII::hasMergeOp(TSFlags)) + MIB = MIB.addReg(MI.getOperand(Cur++).getReg()); + + // Address Operand + MIB = MIB.addReg(MI.getOperand(Cur++).getReg()); + + // Mask Operand + if (RISCVII::hasVecPolicyOp(TSFlags)) + MIB = MIB.addReg(MI.getOperand(Cur++).getReg()); + + // AVL Operand + const MachineOperand &AVLOp = MI.getOperand(Cur++); + if (AVLOp.isReg()) + MIB = MIB.addReg(AVLOp.getReg()); + else + MIB = MIB.addImm(AVLOp.getImm()); + + // SEW Operand + MIB = MIB.addImm(MI.getOperand(Cur++).getImm()); + + // Policy Operand + if (RISCVII::hasVecPolicyOp(TSFlags)) + MIB = MIB.addImm(MI.getOperand(Cur).getImm()); + + // Implicit use vl, vtype. + MIB.addReg(RISCV::VL, RegState::Implicit) + .addReg(RISCV::VTYPE, RegState::Implicit); + + Register VLOutput = MI.getOperand(1).getReg(); + if (!MRI->use_nodbg_empty(VLOutput)) + BuildMI(MBB, I, DL, TII->get(RISCV::PseudoReadVL), VLOutput); + MI.eraseFromParent(); + } + } +} + bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) { // Skip if the vector extension is not enabled. const RISCVSubtarget &ST = MF.getSubtarget(); @@ -1514,6 +1565,11 @@ } } + // Expand two-outputs version of VLEFF/VLSEGFF to single output of + // VLEFF/VLSEGFF and PseudoReadVL. + for (MachineBasicBlock &MBB : MF) + expandLoadFF(MBB); + BlockInfo.clear(); return HaveVectorOp; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -200,6 +200,9 @@ bit UsesMaskPolicy = 0; let TSFlags{18} = UsesMaskPolicy; + + bit HasVLOutput = 0; + let TSFlags{19} = HasVLOutput; } // Pseudo instructions diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -1367,14 +1367,13 @@ uint64_t TSFlags = MI.getDesc().TSFlags; - // Print the full VType operand of vsetvli/vsetivli and PseudoReadVL - // instructions, and the SEW operand of vector codegen pseudos. - if (((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI || - MI.getOpcode() == RISCV::PseudoVSETVLI || - MI.getOpcode() == RISCV::PseudoVSETIVLI || - MI.getOpcode() == RISCV::PseudoVSETVLIX0) && - OpIdx == 2) || - (MI.getOpcode() == RISCV::PseudoReadVL && OpIdx == 1)) { + // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW + // operand of vector codegen pseudos. + if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI || + MI.getOpcode() == RISCV::PseudoVSETVLI || + MI.getOpcode() == RISCV::PseudoVSETIVLI || + MI.getOpcode() == RISCV::PseudoVSETVLIX0) && + OpIdx == 2) { unsigned Imm = MI.getOperand(OpIdx).getImm(); RISCVVType::printVType(Imm, OS); } else if (RISCVII::hasSEWOp(TSFlags)) { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -456,6 +456,22 @@ let PrimaryKeyName = "getVLEPseudo"; } +class RISCVVLEFF S, bits<3> L> { + bits<1> Masked = M; + bits<1> IsTU = TU; + bits<3> Log2SEW = S; + bits<3> LMUL = L; + Pseudo Pseudo = !cast(NAME); +} + +def RISCVVLEFFTable : GenericTable { + let FilterClass = "RISCVVLEFF"; + let CppTypeName = "VLEFFPseudo"; + let Fields = ["Masked", "IsTU", "Log2SEW", "LMUL", "Pseudo"]; + let PrimaryKey = ["Masked", "IsTU", "Log2SEW", "LMUL"]; + let PrimaryKeyName = "getVLEFFPseudo"; +} + class RISCVVSE S, bits<3> L> { bits<1> Masked = M; bits<1> Strided = Str; @@ -576,6 +592,23 @@ let PrimaryKeyName = "getVSXSEGPseudo"; } +class RISCVVLSEGFF N, bit M, bit TU, bits<3> S, bits<3> L> { + bits<4> NF = N; + bits<1> Masked = M; + bits<1> IsTU = TU; + bits<3> Log2SEW = S; + bits<3> LMUL = L; + Pseudo Pseudo = !cast(NAME); +} + +def RISCVVLSEGFFTable : GenericTable { + let FilterClass = "RISCVVLSEGFF"; + let CppTypeName = "VLSEGFFPseudo"; + let Fields = ["NF", "Masked", "IsTU", "Log2SEW", "LMUL", "Pseudo"]; + let PrimaryKey = ["NF", "Masked", "IsTU", "Log2SEW", "LMUL"]; + let PrimaryKeyName = "getVLSEGFFPseudo"; +} + //===----------------------------------------------------------------------===// // Helpers to define the different pseudo instructions. //===----------------------------------------------------------------------===// @@ -1556,6 +1589,109 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoVLEFFNoMask : + Pseudo<(outs RetClass:$rd, GPR:$vl), + (ins GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLEFF.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = DummyMask; + let HasVLOutput = 1; + let BaseInstr = !cast(!subst("_VL", "", NAME)); +} + +class VPseudoVLEFFNoMaskTU : + Pseudo<(outs RetClass:$rd, GPR:$vl), + (ins RetClass:$dest, GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLEFF.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let HasVLOutput = 1; + let Constraints = "$rd = $dest"; + let BaseInstr = !cast(!subst("_VL", "", NAME)); +} + +class VPseudoVLEFFMask : + Pseudo<(outs GetVRegNoV0.R:$rd, GPR:$vl), + (ins GetVRegNoV0.R:$merge, + GPR:$rs1, + VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy),[]>, + RISCVVPseudo, + RISCVVLEFF.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = "$rd = $merge"; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; + let HasVLOutput = 1; + let BaseInstr = !cast(!subst("_VL", "", NAME)); +} + +class VPseudoVLSEGFFNoMask NF>: + Pseudo<(outs RetClass:$rd, GPR:$vl), + (ins GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLSEGFF.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasVLOutput = 1; + let BaseInstr = !cast(!subst("_VL", "", NAME)); +} + +class VPseudoVLSEGFFNoMaskTU NF>: + Pseudo<(outs RetClass:$rd, GPR:$vl), + (ins RetClass:$dest, GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLSEGFF.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let HasVLOutput = 1; + let Constraints = "$rd = $dest"; + let BaseInstr = !cast(!subst("_VL", "", NAME)); +} + +class VPseudoVLSEGFFMask NF>: + Pseudo<(outs GetVRegNoV0.R:$rd, GPR:$vl), + (ins GetVRegNoV0.R:$merge, GPR:$rs1, + VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy),[]>, + RISCVVPseudo, + RISCVVLSEGFF.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = "$rd = $merge"; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; + let HasVLOutput = 1; + let BaseInstr = !cast(!subst("_VL", "", NAME)); +} + multiclass VPseudoUSLoad { foreach eew = EEWList in { foreach lmul = MxSet.m in { @@ -1591,6 +1727,15 @@ def "E" # eew # "FF_V_" # LInfo # "_MASK" : VPseudoUSLoadMask, VLFSched; + def "E" # eew # "FF_V_" # LInfo # "_VL": + VPseudoVLEFFNoMask, + VLFSched; + def "E" # eew # "FF_V_" # LInfo # "_TU" # "_VL": + VPseudoVLEFFNoMaskTU, + VLFSched; + def "E" # eew # "FF_V_" # LInfo # "_MASK" # "_VL": + VPseudoVLEFFMask, + VLFSched; } } } @@ -2815,6 +2960,25 @@ } } +multiclass VPseudoSEGLoadFF { + foreach eew = EEWList in { + foreach lmul = MxSet.m in { + defvar LInfo = lmul.MX; + let VLMul = lmul.value in { + foreach nf = NFSet.L in { + defvar vreg = SegRegClass.RC; + def nf # "E" # eew # "FF_V_" # LInfo # "_VL": + VPseudoVLSEGFFNoMask; + def nf # "E" # eew # "FF_V_" # LInfo # "_TU" # "_VL": + VPseudoVLSEGFFNoMaskTU; + def nf # "E" # eew # "FF_V_" # LInfo # "_MASK" # "_VL": + VPseudoVLSEGFFMask; + } + } + } + } +} + multiclass VPseudoSSegLoad { foreach eew = EEWList in { foreach lmul = MxSet.m in { @@ -4300,7 +4464,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1, Uses = [VL] in -def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins ixlenimm:$vtype), []>; +def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>; let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in { def PseudoVSPILL_M1 : VPseudo; @@ -4403,8 +4567,10 @@ defm PseudoVSUXSEG : VPseudoISegStore; // vlsegeff.v may update VL register -let hasSideEffects = 1, Defs = [VL] in +let hasSideEffects = 1, Defs = [VL] in { defm PseudoVLSEG : VPseudoUSSegLoad; +defm PseudoVLSEG : VPseudoSEGLoadFF; +} //===----------------------------------------------------------------------===// // 12. Vector Integer Arithmetic Instructions diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp --- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp +++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp @@ -210,16 +210,6 @@ if (lowerRISCVVMachineInstrToMCInst(MI, OutMI)) return false; - // Only need the output operand when lower PseudoReadVL from MI to MCInst. - if (MI->getOpcode() == RISCV::PseudoReadVL) { - OutMI.setOpcode(RISCV::CSRRS); - OutMI.addOperand(MCOperand::createReg(MI->getOperand(0).getReg())); - OutMI.addOperand( - MCOperand::createImm(RISCVSysReg::lookupSysRegByName("VL")->Encoding)); - OutMI.addOperand(MCOperand::createReg(RISCV::X0)); - return false; - } - OutMI.setOpcode(MI->getOpcode()); for (const MachineOperand &MO : MI->operands()) { @@ -248,6 +238,12 @@ RISCVSysReg::lookupSysRegByName("VLENB")->Encoding)); OutMI.addOperand(MCOperand::createReg(RISCV::X0)); break; + case RISCV::PseudoReadVL: + OutMI.setOpcode(RISCV::CSRRS); + OutMI.addOperand( + MCOperand::createImm(RISCVSysReg::lookupSysRegByName("VL")->Encoding)); + OutMI.addOperand(MCOperand::createReg(RISCV::X0)); + break; } return false; } diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll +++ /dev/null @@ -1,1891 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \ -; RUN: -target-abi=ilp32 | FileCheck %s -declare { , i32 } @llvm.riscv.vleff.nxv8i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv16i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv32i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv64i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv4i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv8i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv16i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv32i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv2i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv4i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv8i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv16i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv1i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv2i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv4i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv8i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(, *, , i32, i32 immarg) - -define i32 @vleffe8m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv64i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv64i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 131 /* e8, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 195 /* e8, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_:%[0-9]+]]:vr = PseudoVLE16FF_V_M1 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE16FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 139 /* e16, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 203 /* e16, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_:%[0-9]+]]:vr = PseudoVLE32FF_V_M1 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE32FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 146 /* e32, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 147 /* e32, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 210 /* e32, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 211 /* e32, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_:%[0-9]+]]:vr = PseudoVLE64FF_V_M1 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv1i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE64FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv1i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 155 /* e64, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 219 /* e64, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll +++ /dev/null @@ -1,1891 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \ -; RUN: -target-abi=ilp32 | FileCheck %s -declare { , i32 } @llvm.riscv.vleff.nxv8i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv16i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv32i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv64i8(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv4i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv8i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv16i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv32i16(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv2i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv4i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv8i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv16i32(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv1i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv2i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv4i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.nxv8i64(, *, i32); -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(, *, , i32, i32 immarg) -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(, *, , i32, i32 immarg) - -define i32 @vleffe8m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv64i8( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv64i8( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 131 /* e8, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe8m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe8m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 195 /* e8, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_:%[0-9]+]]:vr = PseudoVLE16FF_V_M1 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i16( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE16FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv32i16( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 139 /* e16, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe16m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe16m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 203 /* e16, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_:%[0-9]+]]:vr = PseudoVLE32FF_V_M1 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i32( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE32FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv16i32( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 146 /* e32, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 147 /* e32, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 210 /* e32, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe32m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe32m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 211 /* e32, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_:%[0-9]+]]:vr = PseudoVLE64FF_V_M1 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv1i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8( *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8 - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i64( undef, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE64FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv1i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv2i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv4i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tu( %merge, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = call { , i32 } @llvm.riscv.vleff.nxv8i64( %merge, * %p, i32 %vl) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tumu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tamu - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tuma - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 155 /* e64, m8, tu, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m1_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m1_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m2_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m2_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m4_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m4_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} - -define i32 @vleffe64m8_tama( %mask, %maskedoff, *%p, i32 %vl) { - ; CHECK-LABEL: name: vleffe64m8_tama - ; CHECK: bb.0.entry: - ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 - ; CHECK-NEXT: $v0 = COPY [[COPY3]] - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] - ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl - ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 219 /* e64, m8, ta, ma */, implicit $vl - ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] - ; CHECK-NEXT: PseudoRET implicit $x10 -entry: - %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) - %1 = extractvalue { , i32 } %0, 1 - ret i32 %1 -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll @@ -0,0 +1,114 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s + +declare { , i64 } @llvm.riscv.vleff.nxv8i8(, *, i64) +declare { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(, *, , i64, i64 immarg) + +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(, , i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i64, i64) + +define i64 @test_vleff_nxv8i8( *%p, i64 %vl) { + ; CHECK-LABEL: name: test_vleff_nxv8i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_VL:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_VL1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_VL [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_VL1]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i64 } @llvm.riscv.vleff.nxv8i8( undef, * %p, i64 %vl) + %1 = extractvalue { , i64 } %0, 1 + ret i64 %1 +} + +define i64 @test_vleff_nxv8i8_tu( %merge, *%p, i64 %vl) { + ; CHECK-LABEL: name: test_vleff_nxv8i8_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_TU_VL:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_TU_VL1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_TU_VL [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_TU_VL1]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i64 } @llvm.riscv.vleff.nxv8i8( %merge, * %p, i64 %vl) + %1 = extractvalue { , i64 } %0, 1 + ret i64 %1 +} + +define i64 @test_vleff_nxv8i8_mask( %maskedoff, *%p, %m, i64 %vl) { + ; CHECK-LABEL: name: test_vleff_nxv8i8_mask + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK_VL:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK_VL1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK_VL [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def dead $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_MASK_VL1]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( %maskedoff, *%p, %m, i64 %vl, i64 0) + %1 = extractvalue { , i64 } %0, 1 + ret i64 %1 +} + +define i64 @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_VL:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_VL1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_VL [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_VL1]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8( undef, undef, i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + ret i64 %1 +} + +define i64 @test_vlseg2ff_nxv8i8_tu( %val, i8* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1 = REG_SEQUENCE [[COPY2]], %subreg.sub_vrm1_0, [[COPY2]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_TU_VL:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_TU_VL1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_TU_VL [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_TU_VL1]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8( %val, %val, i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + ret i64 %1 +} + +define i64 @test_vlseg2ff_nxv8i8_mask( %val, i8* %base, %mask, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_mask + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY3]], %subreg.sub_vrm1_0, [[COPY3]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK_VL:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK_VL1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK_VL [[REG_SEQUENCE]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def dead $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK_VL1]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + ret i64 %1 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll @@ -1,41 +1,41 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \ ; RUN: -target-abi=ilp32d | FileCheck %s -declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(,, i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(,, i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(,, i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i64(,, i64* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i64(i64* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(,, i64*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i32(,, i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i32(i32* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(,, i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i8(,, i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i8(i8* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i16(,, i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i16(i16* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(,, i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(,, i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i64(,, i64* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i64(i64* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(,, i64*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(,, i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i8(,, i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i8(i8* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i8(,, i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i8(i8* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i32(,, i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i32(i32* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(,, i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i16(,, i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i16(i16* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i32, i32) -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i64(,, i64* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i64(i64* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(,, i64*, , i32, i32) define void @test_vlseg2ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { @@ -51,7 +51,7 @@ ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i8( undef, undef, i8* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 2 store volatile i32 %1, i32* %outvl ret void @@ -114,7 +114,7 @@ ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i8( undef, undef, i8* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 2 store volatile i32 %1, i32* %outvl ret void @@ -177,7 +177,7 @@ ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv32i8( undef, undef, i8* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 2 store volatile i32 %1, i32* %outvl ret void @@ -240,7 +240,7 @@ ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i16( undef, undef, i16* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 2 store volatile i32 %1, i32* %outvl ret void @@ -303,7 +303,7 @@ ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i16( undef, undef, i16* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 2 store volatile i32 %1, i32* %outvl ret void @@ -366,7 +366,7 @@ ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 2 store volatile i32 %1, i32* %outvl ret void @@ -429,7 +429,7 @@ ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i32( undef, undef, i32* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 2 store volatile i32 %1, i32* %outvl ret void @@ -492,7 +492,7 @@ ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i32( undef, undef, i32* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 2 store volatile i32 %1, i32* %outvl ret void @@ -555,7 +555,7 @@ ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i64( undef, undef, i64* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i64(i64* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 2 store volatile i32 %1, i32* %outvl ret void @@ -618,7 +618,7 @@ ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i64( undef, undef, i64* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i64(i64* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 2 store volatile i32 %1, i32* %outvl ret void @@ -681,7 +681,7 @@ ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i64( undef, undef, i64* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i64(i64* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 2 store volatile i32 %1, i32* %outvl ret void diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll @@ -1,41 +1,41 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel < %s \ ; RUN: -target-abi=lp64d | FileCheck %s -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(,, i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(,, i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(,, i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(,, i64* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(,, i64*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i32(,, i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i32(i32* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(,, i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i8(,, i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i8(i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i16(,, i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i16(i16* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(,, i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(,, i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(,, i64* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(,, i64*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(,, i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i8(,, i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i8(i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i8(,, i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i8(i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i32(,, i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i32(i32* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(,, i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i16(,, i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i16(i16* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i64, i64) -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(,, i64* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(,, i64*, , i64, i64) define void @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { @@ -51,7 +51,7 @@ ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8( undef, undef, i8* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 store volatile i64 %1, i64* %outvl ret void @@ -114,7 +114,7 @@ ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i8( undef, undef, i8* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 store volatile i64 %1, i64* %outvl ret void @@ -177,7 +177,7 @@ ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv32i8( undef, undef, i8* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 store volatile i64 %1, i64* %outvl ret void @@ -240,7 +240,7 @@ ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i16( undef, undef, i16* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 store volatile i64 %1, i64* %outvl ret void @@ -303,7 +303,7 @@ ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i16( undef, undef, i16* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 store volatile i64 %1, i64* %outvl ret void @@ -366,7 +366,7 @@ ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 store volatile i64 %1, i64* %outvl ret void @@ -429,7 +429,7 @@ ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i32( undef, undef, i32* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 store volatile i64 %1, i64* %outvl ret void @@ -492,7 +492,7 @@ ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i32( undef, undef, i32* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 store volatile i64 %1, i64* %outvl ret void @@ -555,7 +555,7 @@ ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i64( undef, undef, i64* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 store volatile i64 %1, i64* %outvl ret void @@ -618,7 +618,7 @@ ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i64( undef, undef, i64* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 store volatile i64 %1, i64* %outvl ret void @@ -681,7 +681,7 @@ ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) ; CHECK-NEXT: PseudoRET entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i64( undef, undef, i64* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 store volatile i64 %1, i64* %outvl ret void