diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -101,6 +101,14 @@ [LLVMPointerType>, llvm_anyint_ty], [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For unit stride fault-only-first load + // Input: (pointer, vl) + // Output: (data, vl) + class RISCVUSLoadFF + : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty], + [LLVMPointerType>, LLVMMatchType<1>], + [NoCapture>, IntrReadMem, IntrHasSideEffects]>, + RISCVVIntrinsic; // For unit stride load with mask // Input: (maskedoff, pointer, mask, vl) class RISCVUSLoadMask @@ -110,6 +118,16 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For unit stride fault-only-first load with mask + // Input: (maskedoff, pointer, mask, vl) + // Output: (data, vl) + class RISCVUSLoadFFMask + : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty], + [LLVMMatchType<0>, + LLVMPointerType>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMMatchType<1>], + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; // For strided load // Input: (pointer, stride, vl) class RISCVSLoad @@ -449,6 +467,10 @@ def "int_riscv_" # NAME : RISCVUSLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask; } + multiclass RISCVUSLoadFF { + def "int_riscv_" # NAME : RISCVUSLoadFF; + def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMask; + } multiclass RISCVSLoad { def "int_riscv_" # NAME : RISCVSLoad; def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask; @@ -538,7 +560,7 @@ } defm vle : RISCVUSLoad; - defm vleff : RISCVUSLoad; + defm vleff : RISCVUSLoadFF; defm vse : RISCVUSStore; defm vlse: RISCVSLoad; defm vsse: RISCVSStore; diff --git a/llvm/lib/CodeGen/Analysis.cpp b/llvm/lib/CodeGen/Analysis.cpp --- a/llvm/lib/CodeGen/Analysis.cpp +++ b/llvm/lib/CodeGen/Analysis.cpp @@ -88,13 +88,18 @@ uint64_t StartingOffset) { // Given a struct type, recursively traverse the elements. if (StructType *STy = dyn_cast(Ty)) { - const StructLayout *SL = DL.getStructLayout(STy); + // If the Offsets aren't needed, don't query the struct layout. This allows + // us to support structs with scalable vectors for operations that don't + // need offsets. + const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr; for (StructType::element_iterator EB = STy->element_begin(), EI = EB, EE = STy->element_end(); - EI != EE; ++EI) + EI != EE; ++EI) { + uint64_t EltOffset = SL ? SL->getElementOffset(EI - EB) : 0; ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets, - StartingOffset + SL->getElementOffset(EI - EB)); + StartingOffset + EltOffset); + } return; } // Given an array type, recursively traverse the elements. diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -85,6 +85,11 @@ // Splats an i64 scalar to a vector type (with element type i64) where the // scalar is a sign-extended i32. SPLAT_VECTOR_I64, + // Unit-stride fault-only-first load + VLEFF, + VLEFF_MASK, + // read vl CSR + READ_VL, }; } // namespace RISCVISD diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -357,6 +357,8 @@ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom); + setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); + if (Subtarget.is64Bit()) { setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); @@ -1184,7 +1186,31 @@ } } - return SDValue(); + switch (IntNo) { + default: + return SDValue(); // Don't custom lower most intrinsics. + case Intrinsic::riscv_vleff: { + SDLoc DL(Op); + SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other, MVT::Glue); + SDValue Load = DAG.getNode(RISCVISD::VLEFF, DL, VTs, Op.getOperand(0), + Op.getOperand(2), Op.getOperand(3)); + VTs = DAG.getVTList(Op->getValueType(1), MVT::Other); + SDValue ReadVL = DAG.getNode(RISCVISD::READ_VL, DL, VTs, Load.getValue(1), + Load.getValue(2)); + return DAG.getMergeValues({Load, ReadVL, ReadVL.getValue(1)}, DL); + } + case Intrinsic::riscv_vleff_mask: { + SDLoc DL(Op); + SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other, MVT::Glue); + SDValue Load = DAG.getNode(RISCVISD::VLEFF_MASK, DL, VTs, Op.getOperand(0), + Op.getOperand(2), Op.getOperand(3), + Op.getOperand(4), Op.getOperand(5)); + VTs = DAG.getVTList(Op->getValueType(1), MVT::Other); + SDValue ReadVL = DAG.getNode(RISCVISD::READ_VL, DL, VTs, Load.getValue(1), + Load.getValue(2)); + return DAG.getMergeValues({Load, ReadVL, ReadVL.getValue(1)}, DL); + } + } } // Returns the opcode of the target-specific SDNode that implements the 32-bit @@ -3497,6 +3523,9 @@ NODE_NAME_CASE(GORCIW) NODE_NAME_CASE(VMV_X_S) NODE_NAME_CASE(SPLAT_VECTOR_I64) + NODE_NAME_CASE(VLEFF) + NODE_NAME_CASE(VLEFF_MASK) + NODE_NAME_CASE(READ_VL) } // clang-format on return nullptr; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -18,6 +18,23 @@ SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>, SDTCisInt<1>]>>; +def riscv_vleff : SDNode<"RISCVISD::VLEFF", + SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisPtrTy<1>, + SDTCisVT<2, XLenVT>]>, + [SDNPHasChain, SDNPOutGlue, SDNPMayLoad, + SDNPSideEffect]>; +def riscv_vleff_mask : SDNode<"RISCVISD::VLEFF_MASK", + SDTypeProfile<1, 4, [SDTCisVec<0>, + SDTCisSameAs<0, 1>, + SDTCisPtrTy<2>, + SDTCVecEltisVT<3, i1>, + SDTCisVT<4, XLenVT>]>, + [SDNPHasChain, SDNPOutGlue, SDNPMayLoad, + SDNPSideEffect]>; +def riscv_read_vl : SDNode<"RISCVISD::READ_VL", + SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>, + [SDNPHasChain, SDNPInGlue, SDNPSideEffect]>; + // X0 has special meaning for vsetvl/vsetvli. // rd | rs1 | AVL value | Effect on vl //-------------------------------------------------------------- @@ -1524,6 +1541,23 @@ $rs1, (mask_type V0), (NoX0 GPR:$vl), sew)>; } +multiclass VPatUSLoadFF +{ + defvar Pseudo = !cast(inst#"_V_"#vlmul.MX); + def : Pat<(type (riscv_vleff GPR:$rs1, GPR:$vl)), + (Pseudo $rs1, (NoX0 GPR:$vl), sew)>; + defvar PseudoMask = !cast(inst#"_V_"#vlmul.MX#"_MASK"); + def : Pat<(type (riscv_vleff_mask (type GetVRegNoV0.R:$merge), + GPR:$rs1, (mask_type V0), GPR:$vl)), + (PseudoMask $merge, + $rs1, (mask_type V0), (NoX0 GPR:$vl), sew)>; +} + multiclass VPatSLoad; } +let hasSideEffects = 1, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1, + Uses = [VL] in +def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), + [(set GPR:$rd, (riscv_read_vl))], + "csrr", "$rd, vl">; + //===----------------------------------------------------------------------===// // 6. Configuration-Setting Instructions //===----------------------------------------------------------------------===// @@ -2928,9 +2968,8 @@ defm : VPatUSLoad<"int_riscv_vle", "PseudoVLE" # vti.SEW, vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>; - defm : VPatUSLoad<"int_riscv_vleff", - "PseudoVLE" # vti.SEW # "FF", - vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>; + defm : VPatUSLoadFF<"PseudoVLE" # vti.SEW # "FF", + vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>; defm : VPatUSStore<"int_riscv_vse", "PseudoVSE" # vti.SEW, vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>; diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll @@ -1,1045 +1,1619 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f,+d -verify-machineinstrs \ -; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vleff.nxv1i32( +; RUN: < %s | FileCheck %s +declare { , i32 } @llvm.riscv.vleff.nxv1f64( + *, + i32); + +define @intrinsic_vleff_v_nxv1f64_nxv1f64(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vle64ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %a = call { , i32 } @llvm.riscv.vleff.nxv1f64( + * %0, + i32 %1) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b +} + +declare { , i32 } @llvm.riscv.vleff.mask.nxv1f64( + , + *, + , + i32); + +define @intrinsic_vleff_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vle64ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1f64( + %0, + * %1, + %2, + i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 + + ret %b +} + +declare { , i32 } @llvm.riscv.vleff.nxv2f64( + *, + i32); + +define @intrinsic_vleff_v_nxv2f64_nxv2f64(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vle64ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %a = call { , i32 } @llvm.riscv.vleff.nxv2f64( + * %0, + i32 %1) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b +} + +declare { , i32 } @llvm.riscv.vleff.mask.nxv2f64( + , + *, + , + i32); + +define @intrinsic_vleff_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vle64ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2f64( + %0, + * %1, + %2, + i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 + + ret %b +} + +declare { , i32 } @llvm.riscv.vleff.nxv4f64( + *, + i32); + +define @intrinsic_vleff_v_nxv4f64_nxv4f64(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vle64ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %a = call { , i32 } @llvm.riscv.vleff.nxv4f64( + * %0, + i32 %1) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b +} + +declare { , i32 } @llvm.riscv.vleff.mask.nxv4f64( + , + *, + , + i32); + +define @intrinsic_vleff_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vle64ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4f64( + %0, + * %1, + %2, + i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 + + ret %b +} + +declare { , i32 } @llvm.riscv.vleff.nxv8f64( + *, + i32); + +define @intrinsic_vleff_v_nxv8f64_nxv8f64(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vle64ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %a = call { , i32 } @llvm.riscv.vleff.nxv8f64( + * %0, + i32 %1) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b +} + +declare { , i32 } @llvm.riscv.vleff.mask.nxv8f64( + , + *, + , + i32); + +define @intrinsic_vleff_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu +; CHECK-NEXT: vle64ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret +entry: + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8f64( + %0, + * %1, + %2, + i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 + + ret %b +} + +declare { , i32 } @llvm.riscv.vleff.nxv1i32( *, i32); -define @intrinsic_vleff_v_nxv1i32_nxv1i32(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv1i32_nxv1i32(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv1i32( + %a = call { , i32 } @llvm.riscv.vleff.nxv1i32( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv1i32( +declare { , i32 } @llvm.riscv.vleff.mask.nxv1i32( , *, , i32); -define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv1i32( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1i32( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv2i32( +declare { , i32 } @llvm.riscv.vleff.nxv2i32( *, i32); -define @intrinsic_vleff_v_nxv2i32_nxv2i32(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv2i32_nxv2i32(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv2i32( + %a = call { , i32 } @llvm.riscv.vleff.nxv2i32( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv2i32( +declare { , i32 } @llvm.riscv.vleff.mask.nxv2i32( , *, , i32); -define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv2i32( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2i32( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv4i32( +declare { , i32 } @llvm.riscv.vleff.nxv4i32( *, i32); -define @intrinsic_vleff_v_nxv4i32_nxv4i32(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv4i32_nxv4i32(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv4i32( + %a = call { , i32 } @llvm.riscv.vleff.nxv4i32( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv4i32( +declare { , i32 } @llvm.riscv.vleff.mask.nxv4i32( , *, , i32); -define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv4i32( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4i32( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv8i32( +declare { , i32 } @llvm.riscv.vleff.nxv8i32( *, i32); -define @intrinsic_vleff_v_nxv8i32_nxv8i32(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv8i32_nxv8i32(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv8i32( + %a = call { , i32 } @llvm.riscv.vleff.nxv8i32( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv8i32( +declare { , i32 } @llvm.riscv.vleff.mask.nxv8i32( , *, , i32); -define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv8i32( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8i32( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv16i32( +declare { , i32 } @llvm.riscv.vleff.nxv16i32( *, i32); -define @intrinsic_vleff_v_nxv16i32_nxv16i32(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv16i32_nxv16i32(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv16i32( + %a = call { , i32 } @llvm.riscv.vleff.nxv16i32( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv16i32( +declare { , i32 } @llvm.riscv.vleff.mask.nxv16i32( , *, , i32); -define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv16i32( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv16i32( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv1f32( +declare { , i32 } @llvm.riscv.vleff.nxv1f32( *, i32); -define @intrinsic_vleff_v_nxv1f32_nxv1f32(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv1f32_nxv1f32(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv1f32( + %a = call { , i32 } @llvm.riscv.vleff.nxv1f32( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv1f32( +declare { , i32 } @llvm.riscv.vleff.mask.nxv1f32( , *, , i32); -define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv1f32( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1f32( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv2f32( +declare { , i32 } @llvm.riscv.vleff.nxv2f32( *, i32); -define @intrinsic_vleff_v_nxv2f32_nxv2f32(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv2f32_nxv2f32(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv2f32( + %a = call { , i32 } @llvm.riscv.vleff.nxv2f32( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv2f32( +declare { , i32 } @llvm.riscv.vleff.mask.nxv2f32( , *, , i32); -define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv2f32( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2f32( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv4f32( +declare { , i32 } @llvm.riscv.vleff.nxv4f32( *, i32); -define @intrinsic_vleff_v_nxv4f32_nxv4f32(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv4f32_nxv4f32(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv4f32( + %a = call { , i32 } @llvm.riscv.vleff.nxv4f32( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv4f32( +declare { , i32 } @llvm.riscv.vleff.mask.nxv4f32( , *, , i32); -define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv4f32( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4f32( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv8f32( +declare { , i32 } @llvm.riscv.vleff.nxv8f32( *, i32); -define @intrinsic_vleff_v_nxv8f32_nxv8f32(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv8f32_nxv8f32(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv8f32( + %a = call { , i32 } @llvm.riscv.vleff.nxv8f32( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv8f32( +declare { , i32 } @llvm.riscv.vleff.mask.nxv8f32( , *, , i32); -define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv8f32( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8f32( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv16f32( +declare { , i32 } @llvm.riscv.vleff.nxv16f32( *, i32); -define @intrinsic_vleff_v_nxv16f32_nxv16f32(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv16f32_nxv16f32(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv16f32( + %a = call { , i32 } @llvm.riscv.vleff.nxv16f32( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv16f32( +declare { , i32 } @llvm.riscv.vleff.mask.nxv16f32( , *, , i32); -define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv16f32( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv16f32( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv1i16( +declare { , i32 } @llvm.riscv.vleff.nxv1i16( *, i32); -define @intrinsic_vleff_v_nxv1i16_nxv1i16(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv1i16_nxv1i16(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv1i16( + %a = call { , i32 } @llvm.riscv.vleff.nxv1i16( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv1i16( +declare { , i32 } @llvm.riscv.vleff.mask.nxv1i16( , *, , i32); -define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv1i16( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1i16( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv2i16( +declare { , i32 } @llvm.riscv.vleff.nxv2i16( *, i32); -define @intrinsic_vleff_v_nxv2i16_nxv2i16(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv2i16_nxv2i16(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv2i16( + %a = call { , i32 } @llvm.riscv.vleff.nxv2i16( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv2i16( +declare { , i32 } @llvm.riscv.vleff.mask.nxv2i16( , *, , i32); -define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv2i16( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2i16( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv4i16( +declare { , i32 } @llvm.riscv.vleff.nxv4i16( *, i32); -define @intrinsic_vleff_v_nxv4i16_nxv4i16(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv4i16_nxv4i16(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv4i16( + %a = call { , i32 } @llvm.riscv.vleff.nxv4i16( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv4i16( +declare { , i32 } @llvm.riscv.vleff.mask.nxv4i16( , *, , i32); -define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv4i16( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4i16( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv8i16( +declare { , i32 } @llvm.riscv.vleff.nxv8i16( *, i32); -define @intrinsic_vleff_v_nxv8i16_nxv8i16(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv8i16_nxv8i16(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv8i16( + %a = call { , i32 } @llvm.riscv.vleff.nxv8i16( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv8i16( +declare { , i32 } @llvm.riscv.vleff.mask.nxv8i16( , *, , i32); -define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv8i16( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8i16( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv16i16( +declare { , i32 } @llvm.riscv.vleff.nxv16i16( *, i32); -define @intrinsic_vleff_v_nxv16i16_nxv16i16(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv16i16_nxv16i16(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv16i16( + %a = call { , i32 } @llvm.riscv.vleff.nxv16i16( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv16i16( +declare { , i32 } @llvm.riscv.vleff.mask.nxv16i16( , *, , i32); -define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv16i16( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv16i16( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv32i16( +declare { , i32 } @llvm.riscv.vleff.nxv32i16( *, i32); -define @intrinsic_vleff_v_nxv32i16_nxv32i16(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv32i16_nxv32i16(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv32i16( + %a = call { , i32 } @llvm.riscv.vleff.nxv32i16( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv32i16( +declare { , i32 } @llvm.riscv.vleff.mask.nxv32i16( , *, , i32); -define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv32i16( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv32i16( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv1f16( +declare { , i32 } @llvm.riscv.vleff.nxv1f16( *, i32); -define @intrinsic_vleff_v_nxv1f16_nxv1f16(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv1half_nxv1f16(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv1f16( + %a = call { , i32 } @llvm.riscv.vleff.nxv1f16( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv1f16( +declare { , i32 } @llvm.riscv.vleff.mask.nxv1f16( , *, , i32); -define @intrinsic_vleff_mask_v_nxv1f16_nxv1f16( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv1half_nxv1f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv1f16( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1f16( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv2f16( +declare { , i32 } @llvm.riscv.vleff.nxv2f16( *, i32); -define @intrinsic_vleff_v_nxv2f16_nxv2f16(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv2half_nxv2f16(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv2f16( + %a = call { , i32 } @llvm.riscv.vleff.nxv2f16( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv2f16( +declare { , i32 } @llvm.riscv.vleff.mask.nxv2f16( , *, , i32); -define @intrinsic_vleff_mask_v_nxv2f16_nxv2f16( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv2half_nxv2f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv2f16( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2f16( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv4f16( +declare { , i32 } @llvm.riscv.vleff.nxv4f16( *, i32); -define @intrinsic_vleff_v_nxv4f16_nxv4f16(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv4half_nxv4f16(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv4f16( + %a = call { , i32 } @llvm.riscv.vleff.nxv4f16( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv4f16( +declare { , i32 } @llvm.riscv.vleff.mask.nxv4f16( , *, , i32); -define @intrinsic_vleff_mask_v_nxv4f16_nxv4f16( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv4half_nxv4f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv4f16( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4f16( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv8f16( +declare { , i32 } @llvm.riscv.vleff.nxv8f16( *, i32); -define @intrinsic_vleff_v_nxv8f16_nxv8f16(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv8half_nxv8f16(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv8f16( + %a = call { , i32 } @llvm.riscv.vleff.nxv8f16( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv8f16( +declare { , i32 } @llvm.riscv.vleff.mask.nxv8f16( , *, , i32); -define @intrinsic_vleff_mask_v_nxv8f16_nxv8f16( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv8half_nxv8f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv8f16( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8f16( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv16f16( +declare { , i32 } @llvm.riscv.vleff.nxv16f16( *, i32); -define @intrinsic_vleff_v_nxv16f16_nxv16f16(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv16half_nxv16f16(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv16f16( + %a = call { , i32 } @llvm.riscv.vleff.nxv16f16( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv16f16( +declare { , i32 } @llvm.riscv.vleff.mask.nxv16f16( , *, , i32); -define @intrinsic_vleff_mask_v_nxv16f16_nxv16f16( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv16half_nxv16f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv16f16( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv16f16( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv32f16( +declare { , i32 } @llvm.riscv.vleff.nxv32f16( *, i32); -define @intrinsic_vleff_v_nxv32f16_nxv32f16(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv32half_nxv32f16(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv32f16( + %a = call { , i32 } @llvm.riscv.vleff.nxv32f16( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv32f16( +declare { , i32 } @llvm.riscv.vleff.mask.nxv32f16( , *, , i32); -define @intrinsic_vleff_mask_v_nxv32f16_nxv32f16( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv32half_nxv32f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv32f16( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv32f16( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv1i8( +declare { , i32 } @llvm.riscv.vleff.nxv1i8( *, i32); -define @intrinsic_vleff_v_nxv1i8_nxv1i8(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv1i8_nxv1i8(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv1i8( + %a = call { , i32 } @llvm.riscv.vleff.nxv1i8( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv1i8( +declare { , i32 } @llvm.riscv.vleff.mask.nxv1i8( , *, , i32); -define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv1i8( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1i8( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv2i8( +declare { , i32 } @llvm.riscv.vleff.nxv2i8( *, i32); -define @intrinsic_vleff_v_nxv2i8_nxv2i8(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv2i8_nxv2i8(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv2i8( + %a = call { , i32 } @llvm.riscv.vleff.nxv2i8( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv2i8( +declare { , i32 } @llvm.riscv.vleff.mask.nxv2i8( , *, , i32); -define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv2i8( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2i8( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv4i8( +declare { , i32 } @llvm.riscv.vleff.nxv4i8( *, i32); -define @intrinsic_vleff_v_nxv4i8_nxv4i8(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv4i8_nxv4i8(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv4i8( + %a = call { , i32 } @llvm.riscv.vleff.nxv4i8( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv4i8( +declare { , i32 } @llvm.riscv.vleff.mask.nxv4i8( , *, , i32); -define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv4i8( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4i8( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv8i8( +declare { , i32 } @llvm.riscv.vleff.nxv8i8( *, i32); -define @intrinsic_vleff_v_nxv8i8_nxv8i8(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv8i8_nxv8i8(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv8i8( + %a = call { , i32 } @llvm.riscv.vleff.nxv8i8( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv8i8( +declare { , i32 } @llvm.riscv.vleff.mask.nxv8i8( , *, , i32); -define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv8i8( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8i8( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv16i8( +declare { , i32 } @llvm.riscv.vleff.nxv16i8( *, i32); -define @intrinsic_vleff_v_nxv16i8_nxv16i8(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv16i8_nxv16i8(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv16i8( + %a = call { , i32 } @llvm.riscv.vleff.nxv16i8( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv16i8( +declare { , i32 } @llvm.riscv.vleff.mask.nxv16i8( , *, , i32); -define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv16i8( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv16i8( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv32i8( +declare { , i32 } @llvm.riscv.vleff.nxv32i8( *, i32); -define @intrinsic_vleff_v_nxv32i8_nxv32i8(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv32i8_nxv32i8(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv32i8( + %a = call { , i32 } @llvm.riscv.vleff.nxv32i8( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv32i8( +declare { , i32 } @llvm.riscv.vleff.mask.nxv32i8( , *, , i32); -define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv32i8( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv32i8( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv64i8( +declare { , i32 } @llvm.riscv.vleff.nxv64i8( *, i32); -define @intrinsic_vleff_v_nxv64i8_nxv64i8(* %0, i32 %1) nounwind { +define @intrinsic_vleff_v_nxv64i8_nxv64i8(* %0, i32 %1, i32* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv64i8( + %a = call { , i32 } @llvm.riscv.vleff.nxv64i8( * %0, i32 %1) - - ret %a + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv64i8( +declare { , i32 } @llvm.riscv.vleff.mask.nxv64i8( , *, , i32); -define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv64i8( + %a = call { , i32 } @llvm.riscv.vleff.mask.nxv64i8( %0, * %1, %2, i32 %3) + %b = extractvalue { , i32 } %a, 0 + %c = extractvalue { , i32 } %a, 1 + store i32 %c, i32* %4 - ret %a + ret %b } diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll @@ -1,1333 +1,1815 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \ -; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vleff.nxv1i64( +; RUN: < %s | FileCheck %s +declare { , i64 } @llvm.riscv.vleff.nxv1f64( *, i64); -define @intrinsic_vleff_v_nxv1i64_nxv1i64(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv1f64_nxv1f64(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vle64ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv1i64( + %a = call { , i64 } @llvm.riscv.vleff.nxv1f64( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv1i64( +declare { , i64 } @llvm.riscv.vleff.mask.nxv1f64( , *, , i64); -define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vle64ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv1i64( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv2i64( +declare { , i64 } @llvm.riscv.vleff.nxv2f64( *, i64); -define @intrinsic_vleff_v_nxv2i64_nxv2i64(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv2f64_nxv2f64(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vle64ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv2i64( + %a = call { , i64 } @llvm.riscv.vleff.nxv2f64( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv2i64( +declare { , i64 } @llvm.riscv.vleff.mask.nxv2f64( , *, , i64); -define @intrinsic_vleff_mask_v_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vle64ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv2i64( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2f64( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv4i64( +declare { , i64 } @llvm.riscv.vleff.nxv4f64( *, i64); -define @intrinsic_vleff_v_nxv4i64_nxv4i64(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv4f64_nxv4f64(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vle64ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv4i64( + %a = call { , i64 } @llvm.riscv.vleff.nxv4f64( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv4i64( +declare { , i64 } @llvm.riscv.vleff.mask.nxv4f64( , *, , i64); -define @intrinsic_vleff_mask_v_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vle64ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv4i64( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4f64( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv8i64( +declare { , i64 } @llvm.riscv.vleff.nxv8f64( *, i64); -define @intrinsic_vleff_v_nxv8i64_nxv8i64(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv8f64_nxv8f64(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vle64ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv8i64( + %a = call { , i64 } @llvm.riscv.vleff.nxv8f64( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv8i64( +declare { , i64 } @llvm.riscv.vleff.mask.nxv8f64( , *, , i64); -define @intrinsic_vleff_mask_v_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu +; CHECK-NEXT: vle64ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv8i64( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8f64( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv1f64( +declare { , i64 } @llvm.riscv.vleff.nxv1i64( *, i64); -define @intrinsic_vleff_v_nxv1f64_nxv1f64(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv1i64_nxv1i64(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vle64ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv1f64( + %a = call { , i64 } @llvm.riscv.vleff.nxv1i64( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv1f64( +declare { , i64 } @llvm.riscv.vleff.mask.nxv1i64( , *, , i64); -define @intrinsic_vleff_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vle64ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv1f64( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv2f64( +declare { , i64 } @llvm.riscv.vleff.nxv2i64( *, i64); -define @intrinsic_vleff_v_nxv2f64_nxv2f64(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv2i64_nxv2i64(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vle64ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv2f64( + %a = call { , i64 } @llvm.riscv.vleff.nxv2i64( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv2f64( +declare { , i64 } @llvm.riscv.vleff.mask.nxv2i64( , *, , i64); -define @intrinsic_vleff_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vle64ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv2f64( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv4f64( +declare { , i64 } @llvm.riscv.vleff.nxv4i64( *, i64); -define @intrinsic_vleff_v_nxv4f64_nxv4f64(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv4i64_nxv4i64(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vle64ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv4f64( + %a = call { , i64 } @llvm.riscv.vleff.nxv4i64( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv4f64( +declare { , i64 } @llvm.riscv.vleff.mask.nxv4i64( , *, , i64); -define @intrinsic_vleff_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vle64ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv4f64( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv8f64( +declare { , i64 } @llvm.riscv.vleff.nxv8i64( *, i64); -define @intrinsic_vleff_v_nxv8f64_nxv8f64(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv8i64_nxv8i64(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vle64ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv8f64( + %a = call { , i64 } @llvm.riscv.vleff.nxv8i64( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv8f64( +declare { , i64 } @llvm.riscv.vleff.mask.nxv8i64( , *, , i64); -define @intrinsic_vleff_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu +; CHECK-NEXT: vle64ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv8f64( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv1i32( +declare { , i64 } @llvm.riscv.vleff.nxv1i32( *, i64); -define @intrinsic_vleff_v_nxv1i32_nxv1i32(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv1i32_nxv1i32(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv1i32( + %a = call { , i64 } @llvm.riscv.vleff.nxv1i32( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv1i32( +declare { , i64 } @llvm.riscv.vleff.mask.nxv1i32( , *, , i64); -define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv1i32( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv2i32( +declare { , i64 } @llvm.riscv.vleff.nxv2i32( *, i64); -define @intrinsic_vleff_v_nxv2i32_nxv2i32(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv2i32_nxv2i32(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv2i32( + %a = call { , i64 } @llvm.riscv.vleff.nxv2i32( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv2i32( +declare { , i64 } @llvm.riscv.vleff.mask.nxv2i32( , *, , i64); -define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv2i32( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv4i32( +declare { , i64 } @llvm.riscv.vleff.nxv4i32( *, i64); -define @intrinsic_vleff_v_nxv4i32_nxv4i32(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv4i32_nxv4i32(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv4i32( + %a = call { , i64 } @llvm.riscv.vleff.nxv4i32( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv4i32( +declare { , i64 } @llvm.riscv.vleff.mask.nxv4i32( , *, , i64); -define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv4i32( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv8i32( +declare { , i64 } @llvm.riscv.vleff.nxv8i32( *, i64); -define @intrinsic_vleff_v_nxv8i32_nxv8i32(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv8i32_nxv8i32(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv8i32( + %a = call { , i64 } @llvm.riscv.vleff.nxv8i32( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv8i32( +declare { , i64 } @llvm.riscv.vleff.mask.nxv8i32( , *, , i64); -define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv8i32( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv16i32( +declare { , i64 } @llvm.riscv.vleff.nxv16i32( *, i64); -define @intrinsic_vleff_v_nxv16i32_nxv16i32(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv16i32_nxv16i32(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv16i32( + %a = call { , i64 } @llvm.riscv.vleff.nxv16i32( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv16i32( +declare { , i64 } @llvm.riscv.vleff.mask.nxv16i32( , *, , i64); -define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv16i32( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv1f32( +declare { , i64 } @llvm.riscv.vleff.nxv1f32( *, i64); -define @intrinsic_vleff_v_nxv1f32_nxv1f32(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv1f32_nxv1f32(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv1f32( + %a = call { , i64 } @llvm.riscv.vleff.nxv1f32( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv1f32( +declare { , i64 } @llvm.riscv.vleff.mask.nxv1f32( , *, , i64); -define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv1f32( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1f32( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv2f32( +declare { , i64 } @llvm.riscv.vleff.nxv2f32( *, i64); -define @intrinsic_vleff_v_nxv2f32_nxv2f32(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv2f32_nxv2f32(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv2f32( + %a = call { , i64 } @llvm.riscv.vleff.nxv2f32( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv2f32( +declare { , i64 } @llvm.riscv.vleff.mask.nxv2f32( , *, , i64); -define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv2f32( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2f32( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv4f32( +declare { , i64 } @llvm.riscv.vleff.nxv4f32( *, i64); -define @intrinsic_vleff_v_nxv4f32_nxv4f32(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv4f32_nxv4f32(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv4f32( + %a = call { , i64 } @llvm.riscv.vleff.nxv4f32( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv4f32( +declare { , i64 } @llvm.riscv.vleff.mask.nxv4f32( , *, , i64); -define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv4f32( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4f32( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv8f32( +declare { , i64 } @llvm.riscv.vleff.nxv8f32( *, i64); -define @intrinsic_vleff_v_nxv8f32_nxv8f32(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv8f32_nxv8f32(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv8f32( + %a = call { , i64 } @llvm.riscv.vleff.nxv8f32( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv8f32( +declare { , i64 } @llvm.riscv.vleff.mask.nxv8f32( , *, , i64); -define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv8f32( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8f32( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv16f32( +declare { , i64 } @llvm.riscv.vleff.nxv16f32( *, i64); -define @intrinsic_vleff_v_nxv16f32_nxv16f32(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv16f32_nxv16f32(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vle32ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv16f32( + %a = call { , i64 } @llvm.riscv.vleff.nxv16f32( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv16f32( +declare { , i64 } @llvm.riscv.vleff.mask.nxv16f32( , *, , i64); -define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu +; CHECK-NEXT: vle32ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv16f32( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv16f32( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv1i16( +declare { , i64 } @llvm.riscv.vleff.nxv1i16( *, i64); -define @intrinsic_vleff_v_nxv1i16_nxv1i16(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv1i16_nxv1i16(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv1i16( + %a = call { , i64 } @llvm.riscv.vleff.nxv1i16( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv1i16( +declare { , i64 } @llvm.riscv.vleff.mask.nxv1i16( , *, , i64); -define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv1i16( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv2i16( +declare { , i64 } @llvm.riscv.vleff.nxv2i16( *, i64); -define @intrinsic_vleff_v_nxv2i16_nxv2i16(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv2i16_nxv2i16(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv2i16( + %a = call { , i64 } @llvm.riscv.vleff.nxv2i16( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv2i16( +declare { , i64 } @llvm.riscv.vleff.mask.nxv2i16( , *, , i64); -define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv2i16( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv4i16( +declare { , i64 } @llvm.riscv.vleff.nxv4i16( *, i64); -define @intrinsic_vleff_v_nxv4i16_nxv4i16(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv4i16_nxv4i16(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv4i16( + %a = call { , i64 } @llvm.riscv.vleff.nxv4i16( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv4i16( +declare { , i64 } @llvm.riscv.vleff.mask.nxv4i16( , *, , i64); -define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv4i16( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv8i16( +declare { , i64 } @llvm.riscv.vleff.nxv8i16( *, i64); -define @intrinsic_vleff_v_nxv8i16_nxv8i16(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv8i16_nxv8i16(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv8i16( + %a = call { , i64 } @llvm.riscv.vleff.nxv8i16( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv8i16( +declare { , i64 } @llvm.riscv.vleff.mask.nxv8i16( , *, , i64); -define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv8i16( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv16i16( +declare { , i64 } @llvm.riscv.vleff.nxv16i16( *, i64); -define @intrinsic_vleff_v_nxv16i16_nxv16i16(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv16i16_nxv16i16(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv16i16( + %a = call { , i64 } @llvm.riscv.vleff.nxv16i16( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv16i16( +declare { , i64 } @llvm.riscv.vleff.mask.nxv16i16( , *, , i64); -define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv16i16( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv32i16( +declare { , i64 } @llvm.riscv.vleff.nxv32i16( *, i64); -define @intrinsic_vleff_v_nxv32i16_nxv32i16(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv32i16_nxv32i16(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv32i16( + %a = call { , i64 } @llvm.riscv.vleff.nxv32i16( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv32i16( +declare { , i64 } @llvm.riscv.vleff.mask.nxv32i16( , *, , i64); -define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv32i16( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv1f16( +declare { , i64 } @llvm.riscv.vleff.nxv1f16( *, i64); -define @intrinsic_vleff_v_nxv1f16_nxv1f16(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv1half_nxv1f16(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv1f16( + %a = call { , i64 } @llvm.riscv.vleff.nxv1f16( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv1f16( +declare { , i64 } @llvm.riscv.vleff.mask.nxv1f16( , *, , i64); -define @intrinsic_vleff_mask_v_nxv1f16_nxv1f16( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv1half_nxv1f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv1f16( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv2f16( +declare { , i64 } @llvm.riscv.vleff.nxv2f16( *, i64); -define @intrinsic_vleff_v_nxv2f16_nxv2f16(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv2half_nxv2f16(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv2f16( + %a = call { , i64 } @llvm.riscv.vleff.nxv2f16( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv2f16( +declare { , i64 } @llvm.riscv.vleff.mask.nxv2f16( , *, , i64); -define @intrinsic_vleff_mask_v_nxv2f16_nxv2f16( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv2half_nxv2f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv2f16( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv4f16( +declare { , i64 } @llvm.riscv.vleff.nxv4f16( *, i64); -define @intrinsic_vleff_v_nxv4f16_nxv4f16(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv4half_nxv4f16(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv4f16( + %a = call { , i64 } @llvm.riscv.vleff.nxv4f16( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv4f16( +declare { , i64 } @llvm.riscv.vleff.mask.nxv4f16( , *, , i64); -define @intrinsic_vleff_mask_v_nxv4f16_nxv4f16( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv4half_nxv4f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv4f16( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv8f16( +declare { , i64 } @llvm.riscv.vleff.nxv8f16( *, i64); -define @intrinsic_vleff_v_nxv8f16_nxv8f16(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv8half_nxv8f16(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv8f16( + %a = call { , i64 } @llvm.riscv.vleff.nxv8f16( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv8f16( +declare { , i64 } @llvm.riscv.vleff.mask.nxv8f16( , *, , i64); -define @intrinsic_vleff_mask_v_nxv8f16_nxv8f16( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv8half_nxv8f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv8f16( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv16f16( +declare { , i64 } @llvm.riscv.vleff.nxv16f16( *, i64); -define @intrinsic_vleff_v_nxv16f16_nxv16f16(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv16half_nxv16f16(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv16f16( + %a = call { , i64 } @llvm.riscv.vleff.nxv16f16( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv16f16( +declare { , i64 } @llvm.riscv.vleff.mask.nxv16f16( , *, , i64); -define @intrinsic_vleff_mask_v_nxv16f16_nxv16f16( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv16half_nxv16f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv16f16( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv32f16( +declare { , i64 } @llvm.riscv.vleff.nxv32f16( *, i64); -define @intrinsic_vleff_v_nxv32f16_nxv32f16(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv32half_nxv32f16(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vle16ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv32f16( + %a = call { , i64 } @llvm.riscv.vleff.nxv32f16( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv32f16( +declare { , i64 } @llvm.riscv.vleff.mask.nxv32f16( , *, , i64); -define @intrinsic_vleff_mask_v_nxv32f16_nxv32f16( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv32half_nxv32f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu +; CHECK-NEXT: vle16ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv32f16( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv1i8( +declare { , i64 } @llvm.riscv.vleff.nxv1i8( *, i64); -define @intrinsic_vleff_v_nxv1i8_nxv1i8(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv1i8_nxv1i8(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv1i8( + %a = call { , i64 } @llvm.riscv.vleff.nxv1i8( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv1i8( +declare { , i64 } @llvm.riscv.vleff.mask.nxv1i8( , *, , i64); -define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv1i8( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv2i8( +declare { , i64 } @llvm.riscv.vleff.nxv2i8( *, i64); -define @intrinsic_vleff_v_nxv2i8_nxv2i8(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv2i8_nxv2i8(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv2i8( + %a = call { , i64 } @llvm.riscv.vleff.nxv2i8( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv2i8( +declare { , i64 } @llvm.riscv.vleff.mask.nxv2i8( , *, , i64); -define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv2i8( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv4i8( +declare { , i64 } @llvm.riscv.vleff.nxv4i8( *, i64); -define @intrinsic_vleff_v_nxv4i8_nxv4i8(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv4i8_nxv4i8(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv4i8( + %a = call { , i64 } @llvm.riscv.vleff.nxv4i8( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv4i8( +declare { , i64 } @llvm.riscv.vleff.mask.nxv4i8( , *, , i64); -define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv4i8( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv8i8( +declare { , i64 } @llvm.riscv.vleff.nxv8i8( *, i64); -define @intrinsic_vleff_v_nxv8i8_nxv8i8(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv8i8_nxv8i8(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv8i8( + %a = call { , i64 } @llvm.riscv.vleff.nxv8i8( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv8i8( +declare { , i64 } @llvm.riscv.vleff.mask.nxv8i8( , *, , i64); -define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv8i8( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv16i8( +declare { , i64 } @llvm.riscv.vleff.nxv16i8( *, i64); -define @intrinsic_vleff_v_nxv16i8_nxv16i8(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv16i8_nxv16i8(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv16i8( + %a = call { , i64 } @llvm.riscv.vleff.nxv16i8( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv16i8( +declare { , i64 } @llvm.riscv.vleff.mask.nxv16i8( , *, , i64); -define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv16i8( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv32i8( +declare { , i64 } @llvm.riscv.vleff.nxv32i8( *, i64); -define @intrinsic_vleff_v_nxv32i8_nxv32i8(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv32i8_nxv32i8(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv32i8( + %a = call { , i64 } @llvm.riscv.vleff.nxv32i8( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv32i8( +declare { , i64 } @llvm.riscv.vleff.mask.nxv32i8( , *, , i64); -define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv32i8( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b } -declare @llvm.riscv.vleff.nxv64i8( +declare { , i64 } @llvm.riscv.vleff.nxv64i8( *, i64); -define @intrinsic_vleff_v_nxv64i8_nxv64i8(* %0, i64 %1) nounwind { +define @intrinsic_vleff_v_nxv64i8_nxv64i8(* %0, i64 %1, i64* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu +; CHECK-NEXT: vle8ff.v v16, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0) - %a = call @llvm.riscv.vleff.nxv64i8( + %a = call { , i64 } @llvm.riscv.vleff.nxv64i8( * %0, i64 %1) - - ret %a + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %2 + ret %b } -declare @llvm.riscv.vleff.mask.nxv64i8( +declare { , i64 } @llvm.riscv.vleff.mask.nxv64i8( , *, , i64); -define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3) nounwind { +define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu +; CHECK-NEXT: vle8ff.v v16, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: ret entry: -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu -; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t - %a = call @llvm.riscv.vleff.mask.nxv64i8( + %a = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8( %0, * %1, %2, i64 %3) + %b = extractvalue { , i64 } %a, 0 + %c = extractvalue { , i64 } %a, 1 + store i64 %c, i64* %4 - ret %a + ret %b }