diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -357,7 +357,8 @@ unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain. MVT VT = Node->getSimpleValueType(0); MVT XLenVT = Subtarget->getXLenVT(); - unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + unsigned SEW = VT.getScalarSizeInBits(); + unsigned Log2SEW = Log2_32(SEW); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; @@ -379,8 +380,18 @@ Log2SEW, static_cast(LMUL)); MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, MVT::Glue, Operands); + bool TailAgnostic = true; + bool MaskAgnostic = false; + if (IsMasked) { + uint64_t Policy = Node->getConstantOperandVal(Node->getNumOperands() - 1); + TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC; + MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC; + } + unsigned VType = + RISCVVType::encodeVTYPE(LMUL, SEW, TailAgnostic, MaskAgnostic); + SDValue VTypeOp = CurDAG->getTargetConstant(VType, DL, XLenVT); SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT, - /*Glue*/ SDValue(Load, 2)); + VTypeOp, /*Glue*/ SDValue(Load, 2)); if (auto *MemOp = dyn_cast(Node)) CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); @@ -1342,7 +1353,8 @@ bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask; MVT VT = Node->getSimpleValueType(0); - unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + unsigned SEW = VT.getScalarSizeInBits(); + unsigned Log2SEW = Log2_32(SEW); unsigned CurOp = 2; // Masked intrinsic only have TU version pseduo instructions. @@ -1365,8 +1377,20 @@ MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), MVT::Other, MVT::Glue, Operands); - SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT, - /*Glue*/ SDValue(Load, 2)); + bool TailAgnostic = !IsTU; + bool MaskAgnostic = false; + if (IsMasked) { + uint64_t Policy = + Node->getConstantOperandVal(Node->getNumOperands() - 1); + TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC; + MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC; + } + unsigned VType = + RISCVVType::encodeVTYPE(LMUL, SEW, TailAgnostic, MaskAgnostic); + SDValue VTypeOp = CurDAG->getTargetConstant(VType, DL, XLenVT); + SDNode *ReadVL = + CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT, VTypeOp, + /*Glue*/ SDValue(Load, 2)); if (auto *MemOp = dyn_cast(Node)) CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -1352,13 +1352,14 @@ uint64_t TSFlags = MI.getDesc().TSFlags; - // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW - // operand of vector codegen pseudos. - if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI || - MI.getOpcode() == RISCV::PseudoVSETVLI || - MI.getOpcode() == RISCV::PseudoVSETIVLI || - MI.getOpcode() == RISCV::PseudoVSETVLIX0) && - OpIdx == 2) { + // Print the full VType operand of vsetvli/vsetivli and PseudoReadVL + // instructions, and the SEW operand of vector codegen pseudos. + if (((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI || + MI.getOpcode() == RISCV::PseudoVSETVLI || + MI.getOpcode() == RISCV::PseudoVSETIVLI || + MI.getOpcode() == RISCV::PseudoVSETVLIX0) && + OpIdx == 2) || + (MI.getOpcode() == RISCV::PseudoReadVL && OpIdx == 1)) { unsigned Imm = MI.getOperand(OpIdx).getImm(); RISCVVType::printVType(Imm, OS); } else if (RISCVII::hasSEWOp(TSFlags)) { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -4240,7 +4240,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1, Uses = [VL] in -def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>; +def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins ixlenimm:$vtype), []>; let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in { def PseudoVSPILL_M1 : VPseudo; diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp --- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp +++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp @@ -210,6 +210,16 @@ if (lowerRISCVVMachineInstrToMCInst(MI, OutMI)) return false; + // Only need the output operand when lower PseudoReadVL from MI to MCInst. + if (MI->getOpcode() == RISCV::PseudoReadVL) { + OutMI.setOpcode(RISCV::CSRRS); + OutMI.addOperand(MCOperand::createReg(MI->getOperand(0).getReg())); + OutMI.addOperand( + MCOperand::createImm(RISCVSysReg::lookupSysRegByName("VL")->Encoding)); + OutMI.addOperand(MCOperand::createReg(RISCV::X0)); + return false; + } + OutMI.setOpcode(MI->getOpcode()); for (const MachineOperand &MO : MI->operands()) { @@ -238,12 +248,6 @@ RISCVSysReg::lookupSysRegByName("VLENB")->Encoding)); OutMI.addOperand(MCOperand::createReg(RISCV::X0)); break; - case RISCV::PseudoReadVL: - OutMI.setOpcode(RISCV::CSRRS); - OutMI.addOperand( - MCOperand::createImm(RISCVSysReg::lookupSysRegByName("VL")->Encoding)); - OutMI.addOperand(MCOperand::createReg(RISCV::X0)); - break; } return false; } diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll @@ -0,0 +1,1891 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \ +; RUN: -target-abi=ilp32 | FileCheck %s +declare { , i32 } @llvm.riscv.vleff.nxv8i8(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv16i8(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv32i8(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv64i8(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv4i16(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv8i16(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv16i16(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv32i16(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv2i32(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv4i32(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv8i32(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv16i32(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv1i64(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv2i64(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv4i64(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv8i64(, *, i32); +declare { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(, *, , i32, i32 immarg) + +define i32 @vleffe8m1( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m1 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i8( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m2( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m2 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv16i8( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m4( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m4 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv32i8( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m8( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv64i8( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m1_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m1_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i8( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m2_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m2_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv16i8( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m4_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m4_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv32i8( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m8_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m8_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv64i8( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m1_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m2_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m4_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m8_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m1_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m2_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m4_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m8_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m1_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m2_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m4_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m8_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 131 /* e8, m8, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m1_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m1_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m2_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m2_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m4_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m4_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m8_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m8_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 195 /* e8, m8, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m1( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m1 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_:%[0-9]+]]:vr = PseudoVLE16FF_V_M1 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv4i16( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m2( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m2 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i16( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m4( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m4 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv16i16( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m8( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv32i16( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m1_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m1_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE16FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv4i16( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m2_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m2_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i16( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m4_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m4_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv16i16( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m8_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m8_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv32i16( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m1_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m2_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m4_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m8_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m1_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m2_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m4_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m8_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m1_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m2_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m4_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m8_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 139 /* e16, m8, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m1_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m1_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m2_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m2_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m4_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m4_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m8_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m8_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 203 /* e16, m8, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m1( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m1 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_:%[0-9]+]]:vr = PseudoVLE32FF_V_M1 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv2i32( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m2( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m2 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv4i32( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m4( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m4 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i32( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m8( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv16i32( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m1_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m1_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE32FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv2i32( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m2_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m2_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv4i32( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m4_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m4_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i32( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m8_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m8_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv16i32( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m1_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m2_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m4_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m8_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m1_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m2_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m4_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m8_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m1_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m2_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m4_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 146 /* e32, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m8_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 147 /* e32, m8, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m1_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m1_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m2_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m2_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m4_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m4_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 210 /* e32, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m8_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m8_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 211 /* e32, m8, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m1( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m1 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_:%[0-9]+]]:vr = PseudoVLE64FF_V_M1 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv1i64( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m2( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m2 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv2i64( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m4( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m4 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv4i64( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m8( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i64( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m1_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m1_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE64FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv1i64( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m2_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m2_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv2i64( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m4_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m4_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv4i64( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m8_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m8_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i64( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m1_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m2_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m4_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m8_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m1_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m2_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m4_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m8_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m1_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m2_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m4_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m8_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 155 /* e64, m8, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m1_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m1_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m2_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m2_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m4_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m4_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m8_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m8_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 219 /* e64, m8, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll @@ -0,0 +1,1891 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \ +; RUN: -target-abi=ilp32 | FileCheck %s +declare { , i32 } @llvm.riscv.vleff.nxv8i8(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv16i8(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv32i8(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv64i8(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv4i16(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv8i16(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv16i16(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv32i16(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv2i32(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv4i32(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv8i32(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv16i32(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv1i64(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv2i64(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv4i64(, *, i32); +declare { , i32 } @llvm.riscv.vleff.nxv8i64(, *, i32); +declare { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(, *, , i32, i32 immarg) +declare { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(, *, , i32, i32 immarg) + +define i32 @vleffe8m1( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m1 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i8( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m2( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m2 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv16i8( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m4( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m4 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv32i8( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m8( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv64i8( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m1_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m1_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i8( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m2_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m2_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv16i8( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m4_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m4_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv32i8( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m8_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m8_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv64i8( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m1_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m2_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m4_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m8_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m1_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m2_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m4_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m8_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m1_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m2_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m4_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m8_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 131 /* e8, m8, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m1_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m1_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m2_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m2_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m4_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m4_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe8m8_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe8m8_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 195 /* e8, m8, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv64i8.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m1( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m1 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_:%[0-9]+]]:vr = PseudoVLE16FF_V_M1 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv4i16( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m2( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m2 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i16( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m4( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m4 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv16i16( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m8( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv32i16( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m1_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m1_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE16FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv4i16( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m2_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m2_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i16( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m4_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m4_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv16i16( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m8_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m8_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv32i16( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m1_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m2_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m4_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m8_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m1_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m2_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m4_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m8_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m1_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m2_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m4_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m8_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 139 /* e16, m8, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m1_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m1_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m2_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m2_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m4_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m4_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe16m8_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe16m8_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 203 /* e16, m8, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv32i16.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m1( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m1 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_:%[0-9]+]]:vr = PseudoVLE32FF_V_M1 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv2i32( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m2( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m2 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv4i32( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m4( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m4 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i32( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m8( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv16i32( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m1_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m1_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE32FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv2i32( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m2_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m2_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv4i32( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m4_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m4_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i32( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m8_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m8_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv16i32( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m1_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m2_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m4_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m8_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m1_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m2_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m4_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m8_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m1_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m2_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m4_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 146 /* e32, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m8_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 147 /* e32, m8, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m1_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m1_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m2_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m2_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m4_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m4_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 210 /* e32, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe32m8_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe32m8_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 211 /* e32, m8, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv16i32.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m1( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m1 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_:%[0-9]+]]:vr = PseudoVLE64FF_V_M1 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv1i64( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m2( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m2 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv2i64( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m4( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m4 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv4i64( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m8( *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i64( undef, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m1_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m1_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE64FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv1i64( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m2_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m2_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv2i64( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m4_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m4_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv4i64( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m8_tu( %merge, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m8_tu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = call { , i32 } @llvm.riscv.vleff.nxv8i64( %merge, * %p, i32 %vl) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m1_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m1_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m2_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m2_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m4_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m4_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m8_tumu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m8_tumu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 0) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m1_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m1_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m2_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m2_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m4_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m4_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m8_tamu( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m8_tamu + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 1) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m1_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m1_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m2_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m2_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m4_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m4_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m8_tuma( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m8_tuma + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 155 /* e64, m8, tu, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 2) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m1_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m1_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv1i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m2_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m2_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m2, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv2i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m4_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m4_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m4, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv4i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} + +define i32 @vleffe64m8_tama( %mask, %maskedoff, *%p, i32 %vl) { + ; CHECK-LABEL: name: vleffe64m8_tama + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v0, $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: $v0 = COPY [[COPY3]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]] + ; CHECK-NEXT: [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 219 /* e64, m8, ta, ma */, implicit $vl + ; CHECK-NEXT: $x10 = COPY [[PseudoReadVL]] + ; CHECK-NEXT: PseudoRET implicit $x10 +entry: + %0 = tail call { , i32 } @llvm.riscv.vleff.mask.nxv8i64.i32( %maskedoff, * %p, %mask, i32 %vl, i32 3) + %1 = extractvalue { , i32 } %0, 1 + ret i32 %1 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll @@ -0,0 +1,732 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \ +; RUN: -target-abi=ilp32d | FileCheck %s +declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i64(i64* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(,, i64*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i32(i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i64(i64* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(,, i64*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i32(i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i32, i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i64(i64* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(,, i64*, , i32, i32) + +define void @test_vlseg2ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv8i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl, i32 0) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) + %3 = extractvalue {,, i32} %2, 2 + store volatile i32 %3, i32* %outvl + %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl, i32 2) + %5 = extractvalue {,, i32} %4, 2 + store volatile i32 %5, i32* %outvl + %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl, i32 3) + %7 = extractvalue {,, i32} %6, 2 + store volatile i32 %7, i32* %outvl + ret void +} + +define void @test_vlseg2ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv16i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E8FF_V_M2 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv16i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl, i32 0) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) + %3 = extractvalue {,, i32} %0, 2 + store volatile i32 %3, i32* %outvl + %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl, i32 2) + %5 = extractvalue {,, i32} %0, 2 + store volatile i32 %5, i32* %outvl + %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl, i32 3) + %7 = extractvalue {,, i32} %0, 2 + store volatile i32 %7, i32* %outvl + ret void +} + +define void @test_vlseg2ff_nxv32i8(i8* %base, i32 %vl, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv32i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E8FF_V_M4 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv32i8( %val, i8* %base, i32 %vl, %mask, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv32i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl, i32 0) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) + %3 = extractvalue {,, i32} %2, 2 + store volatile i32 %3, i32* %outvl + %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl, i32 2) + %5 = extractvalue {,, i32} %4, 2 + store volatile i32 %5, i32* %outvl + %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl, i32 3) + %7 = extractvalue {,, i32} %6, 2 + store volatile i32 %7, i32* %outvl + ret void +} + +define void @test_vlseg2ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv4i16 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E16FF_V_M1 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv4i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i16 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl, i32 0) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) + %3 = extractvalue {,, i32} %2, 2 + store volatile i32 %3, i32* %outvl + %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl, i32 2) + %5 = extractvalue {,, i32} %4, 2 + store volatile i32 %5, i32* %outvl + %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl, i32 3) + %7 = extractvalue {,, i32} %6, 2 + store volatile i32 %7, i32* %outvl + ret void +} + +define void @test_vlseg2ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv8i16 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E16FF_V_M2 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv8i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i16 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl, i32 0) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) + %3 = extractvalue {,, i32} %2, 2 + store volatile i32 %3, i32* %outvl + %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl, i32 2) + %5 = extractvalue {,, i32} %4, 2 + store volatile i32 %5, i32* %outvl + %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl, i32 3) + %7 = extractvalue {,, i32} %6, 2 + store volatile i32 %7, i32* %outvl + ret void +} + +define void @test_vlseg2ff_nxv16i16(i16* %base, i32 %vl, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv16i16 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E16FF_V_M4 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv16i16( %val, i16* %base, i32 %vl, %mask, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i16 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 0) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) + %3 = extractvalue {,, i32} %2, 2 + store volatile i32 %3, i32* %outvl + %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 2) + %5 = extractvalue {,, i32} %4, 2 + store volatile i32 %5, i32* %outvl + %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 3) + %7 = extractvalue {,, i32} %6, 2 + store volatile i32 %7, i32* %outvl + ret void +} + +define void @test_vlseg2ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv2i32 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E32FF_V_M1 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv2i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i32 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl, i32 0) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl, i32 1) + %3 = extractvalue {,, i32} %2, 2 + store volatile i32 %3, i32* %outvl + %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl, i32 2) + %5 = extractvalue {,, i32} %4, 2 + store volatile i32 %5, i32* %outvl + %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl, i32 3) + %7 = extractvalue {,, i32} %6, 2 + store volatile i32 %7, i32* %outvl + ret void +} + +define void @test_vlseg2ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv4i32 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E32FF_V_M2 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv4i32( %val, i32* %base, i32 %vl, %mask, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i32 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl, i32 0) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl, i32 1) + %3 = extractvalue {,, i32} %2, 2 + store volatile i32 %1, i32* %outvl + %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl, i32 2) + %5 = extractvalue {,, i32} %4, 2 + store volatile i32 %1, i32* %outvl + %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl, i32 3) + %7 = extractvalue {,, i32} %6, 2 + store volatile i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_nxv1i64(i64* %base, i32 %vl, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv1i64 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E64FF_V_M1 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i64(i64* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv1i64( %val, i64* %base, i32 %vl, %mask, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv1i64 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i32 %vl, i32 0) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i32 %vl, i32 1) + %3 = extractvalue {,, i32} %2, 2 + store volatile i32 %3, i32* %outvl + %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i32 %vl, i32 2) + %5 = extractvalue {,, i32} %4, 2 + store volatile i32 %5, i32* %outvl + %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i32 %vl, i32 3) + %7 = extractvalue {,, i32} %6, 2 + store volatile i32 %7, i32* %outvl + ret void +} + +define void @test_vlseg2ff_nxv2i64(i64* %base, i32 %vl, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv2i64 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E64FF_V_M2 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i64(i64* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv2i64( %val, i64* %base, i32 %vl, %mask, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i64 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i32 %vl, i32 0) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i32 %vl, i32 1) + %3 = extractvalue {,, i32} %2, 2 + store volatile i32 %3, i32* %outvl + %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i32 %vl, i32 2) + %5 = extractvalue {,, i32} %4, 2 + store volatile i32 %5, i32* %outvl + %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i32 %vl, i32 3) + %7 = extractvalue {,, i32} %6, 2 + store volatile i32 %7, i32* %outvl + ret void +} + +define void @test_vlseg2ff_nxv4i64(i64* %base, i32 %vl, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv4i64 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E64FF_V_M4 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i64(i64* %base, i32 %vl) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv4i64( %val, i64* %base, i32 %vl, %mask, i32* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i64 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i32 %vl, i32 0) + %1 = extractvalue {,, i32} %0, 2 + store volatile i32 %1, i32* %outvl + %2 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i32 %vl, i32 1) + %3 = extractvalue {,, i32} %2, 2 + store volatile i32 %3, i32* %outvl + %4 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i32 %vl, i32 2) + %5 = extractvalue {,, i32} %4, 2 + store volatile i32 %5, i32* %outvl + %6 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i32 %vl, i32 3) + %7 = extractvalue {,, i32} %6, 2 + store volatile i32 %7, i32* %outvl + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll @@ -0,0 +1,732 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel < %s \ +; RUN: -target-abi=lp64d | FileCheck %s +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(,, i64*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i32(i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(,, i64*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i32(i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i64, i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(,, i64*, , i64, i64) + +define void @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv8i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) + %3 = extractvalue {,, i64} %2, 2 + store volatile i64 %3, i64* %outvl + %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 2) + %5 = extractvalue {,, i64} %4, 2 + store volatile i64 %5, i64* %outvl + %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 3) + %7 = extractvalue {,, i64} %6, 2 + store volatile i64 %7, i64* %outvl + ret void +} + +define void @test_vlseg2ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv16i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E8FF_V_M2 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv16i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) + %3 = extractvalue {,, i64} %0, 2 + store volatile i64 %3, i64* %outvl + %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl, i64 2) + %5 = extractvalue {,, i64} %0, 2 + store volatile i64 %5, i64* %outvl + %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl, i64 3) + %7 = extractvalue {,, i64} %0, 2 + store volatile i64 %7, i64* %outvl + ret void +} + +define void @test_vlseg2ff_nxv32i8(i8* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv32i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E8FF_V_M4 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv32i8( %val, i8* %base, i64 %vl, %mask, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv32i8 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) + %3 = extractvalue {,, i64} %2, 2 + store volatile i64 %3, i64* %outvl + %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl, i64 2) + %5 = extractvalue {,, i64} %4, 2 + store volatile i64 %5, i64* %outvl + %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl, i64 3) + %7 = extractvalue {,, i64} %6, 2 + store volatile i64 %7, i64* %outvl + ret void +} + +define void @test_vlseg2ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv4i16 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E16FF_V_M1 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv4i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i16 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) + %3 = extractvalue {,, i64} %2, 2 + store volatile i64 %3, i64* %outvl + %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl, i64 2) + %5 = extractvalue {,, i64} %4, 2 + store volatile i64 %5, i64* %outvl + %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl, i64 3) + %7 = extractvalue {,, i64} %6, 2 + store volatile i64 %7, i64* %outvl + ret void +} + +define void @test_vlseg2ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv8i16 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E16FF_V_M2 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv8i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i16 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) + %3 = extractvalue {,, i64} %2, 2 + store volatile i64 %3, i64* %outvl + %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl, i64 2) + %5 = extractvalue {,, i64} %4, 2 + store volatile i64 %5, i64* %outvl + %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl, i64 3) + %7 = extractvalue {,, i64} %6, 2 + store volatile i64 %7, i64* %outvl + ret void +} + +define void @test_vlseg2ff_nxv16i16(i16* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv16i16 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E16FF_V_M4 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv16i16( %val, i16* %base, i64 %vl, %mask, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i16 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E16FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) + %3 = extractvalue {,, i64} %2, 2 + store volatile i64 %3, i64* %outvl + %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 2) + %5 = extractvalue {,, i64} %4, 2 + store volatile i64 %5, i64* %outvl + %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 3) + %7 = extractvalue {,, i64} %6, 2 + store volatile i64 %7, i64* %outvl + ret void +} + +define void @test_vlseg2ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv2i32 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E32FF_V_M1 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv2i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i32 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl, i64 1) + %3 = extractvalue {,, i64} %2, 2 + store volatile i64 %3, i64* %outvl + %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl, i64 2) + %5 = extractvalue {,, i64} %4, 2 + store volatile i64 %5, i64* %outvl + %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl, i64 3) + %7 = extractvalue {,, i64} %6, 2 + store volatile i64 %7, i64* %outvl + ret void +} + +define void @test_vlseg2ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv4i32 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E32FF_V_M2 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv4i32( %val, i32* %base, i64 %vl, %mask, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i32 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E32FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl, i64 1) + %3 = extractvalue {,, i64} %2, 2 + store volatile i64 %1, i64* %outvl + %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl, i64 2) + %5 = extractvalue {,, i64} %4, 2 + store volatile i64 %1, i64* %outvl + %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl, i64 3) + %7 = extractvalue {,, i64} %6, 2 + store volatile i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv1i64 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E64FF_V_M1 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv1i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv1i64 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl, i64 1) + %3 = extractvalue {,, i64} %2, 2 + store volatile i64 %3, i64* %outvl + %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl, i64 2) + %5 = extractvalue {,, i64} %4, 2 + store volatile i64 %5, i64* %outvl + %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl, i64 3) + %7 = extractvalue {,, i64} %6, 2 + store volatile i64 %7, i64* %outvl + ret void +} + +define void @test_vlseg2ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv2i64 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E64FF_V_M2 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv2i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i64 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m2, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl, i64 1) + %3 = extractvalue {,, i64} %2, 2 + store volatile i64 %3, i64* %outvl + %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl, i64 2) + %5 = extractvalue {,, i64} %4, 2 + store volatile i64 %5, i64* %outvl + %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl, i64 3) + %7 = extractvalue {,, i64} %6, 2 + store volatile i64 %7, i64* %outvl + ret void +} + +define void @test_vlseg2ff_nxv4i64(i64* %base, i64 %vl, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_nxv4i64 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E64FF_V_M4 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* %base, i64 %vl) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + ret void +} + +define void @test_vlseg2ff_mask_nxv4i64( %val, i64* %base, i64 %vl, %mask, i64* %outvl) { + ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i64 + ; CHECK: bb.0.entry: + ; CHECK-NEXT: liveins: $v8m4, $x10, $x11, $v0, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[PseudoVLSEG2E64FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl + ; CHECK-NEXT: [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl + ; CHECK-NEXT: SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl) + ; CHECK-NEXT: PseudoRET +entry: + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl, i64 0) + %1 = extractvalue {,, i64} %0, 2 + store volatile i64 %1, i64* %outvl + %2 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl, i64 1) + %3 = extractvalue {,, i64} %2, 2 + store volatile i64 %3, i64* %outvl + %4 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl, i64 2) + %5 = extractvalue {,, i64} %4, 2 + store volatile i64 %5, i64* %outvl + %6 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl, i64 3) + %7 = extractvalue {,, i64} %6, 2 + store volatile i64 %7, i64* %outvl + ret void +}