diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -219,7 +219,7 @@ let VLOperand = 2; } // For unit stride load with mask - // Input: (maskedoff, pointer, mask, vl, ta) + // Input: (maskedoff, pointer, mask, vl, policy) class RISCVUSLoadMask : Intrinsic<[llvm_anyvector_ty ], [LLVMMatchType<0>, @@ -231,7 +231,7 @@ let VLOperand = 3; } // For unit stride fault-only-first load with mask - // Input: (maskedoff, pointer, mask, vl, ta) + // Input: (maskedoff, pointer, mask, vl, policy) // Output: (data, vl) // NOTE: We model this with default memory properties since we model writing // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. @@ -255,7 +255,7 @@ let VLOperand = 3; } // For strided load with mask - // Input: (maskedoff, pointer, stride, mask, vl, ta) + // Input: (maskedoff, pointer, stride, mask, vl, policy) class RISCVSLoadMask : Intrinsic<[llvm_anyvector_ty ], [LLVMMatchType<0>, @@ -277,7 +277,7 @@ let VLOperand = 3; } // For indexed load with mask - // Input: (maskedoff, pointer, index, mask, vl, ta) + // Input: (maskedoff, pointer, index, mask, vl, policy) class RISCVILoadMask : Intrinsic<[llvm_anyvector_ty ], [LLVMMatchType<0>, @@ -358,7 +358,7 @@ let VLOperand = 2; } // For destination vector type is the same as first source vector (with mask). - // Input: (vector_in, mask, vl, ta) + // Input: (vector_in, vector_in, mask, vl, policy) class RISCVUnaryAAMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, @@ -367,7 +367,8 @@ [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 3; } - class RISCVUnaryAAMaskTU + // Input: (passthru, vector_in, vector_in, mask, vl) + class RISCVCompress : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], @@ -392,7 +393,7 @@ let VLOperand = 3; } // For destination vector type is the same as first and second source vector. - // Input: (vector_in, vector_in, int_vector_in, vl, ta) + // Input: (vector_in, vector_in, int_vector_in, vl, policy) class RISCVRGatherVVMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, @@ -411,7 +412,7 @@ let VLOperand = 3; } // For destination vector type is the same as first and second source vector. - // Input: (vector_in, vector_in, int16_vector_in, vl, ta) + // Input: (vector_in, vector_in, int16_vector_in, vl, policy) class RISCVRGatherEI16VVMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, @@ -426,14 +427,14 @@ // Input: (passthru, vector_in, xlen_in, vl) class RISCVGatherVXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], [IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // Second operand is XLen. - // Input: (maskedoff, vector_in, xlen_in, mask, vl, ta) + // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy) class RISCVGatherVXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, @@ -453,7 +454,7 @@ let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). - // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) class RISCVBinaryAAXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, @@ -475,7 +476,7 @@ } // For destination vector type is the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. - // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) class RISCVBinaryAAShiftMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, @@ -495,7 +496,7 @@ let VLOperand = 3; } // For destination vector type is NOT the same as first source vector (with mask). - // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) class RISCVBinaryABXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, @@ -517,7 +518,7 @@ } // For destination vector type is NOT the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. - // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) class RISCVBinaryABShiftMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, @@ -615,7 +616,7 @@ } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. - // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) class RISCVSaturatingBinaryAAXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, @@ -639,7 +640,7 @@ // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. - // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) class RISCVSaturatingBinaryAAShiftMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, @@ -662,7 +663,7 @@ // For Saturating binary operations with mask. // The destination vector type is NOT the same as first source vector (with mask). // The second source operand matches the destination type or is an XLen scalar. - // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) class RISCVSaturatingBinaryABShiftMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, @@ -671,6 +672,7 @@ [ImmArg>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { let VLOperand = 4; } + // Input: (vector_in, vector_in, vector_in/scalar_in, vl) class RISCVTernaryAAAXNoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, @@ -678,6 +680,7 @@ [IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 3; } + // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy) class RISCVTernaryAAAXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, @@ -686,6 +689,7 @@ [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 4; } + // NoMask Vector Multiply-Add operations, its first operand can not be undef. // Input: (vector_in, vector_in/scalar, vector_in, vl, policy) class RISCVTernaryAAXANoMask : Intrinsic<[llvm_anyvector_ty], @@ -695,6 +699,7 @@ let ScalarOperand = 1; let VLOperand = 3; } + // Mask Vector Multiply-Add operations, its first operand can not be undef. // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy class RISCVTernaryAAXAMask : Intrinsic<[llvm_anyvector_ty], @@ -705,6 +710,7 @@ let ScalarOperand = 1; let VLOperand = 4; } + // NoMask Widening Vector Multiply-Add operations, its first operand can not be undef. // Input: (vector_in, vector_in/scalar, vector_in, vl, policy) class RISCVTernaryWideNoMask : Intrinsic< [llvm_anyvector_ty], @@ -714,6 +720,7 @@ let ScalarOperand = 1; let VLOperand = 3; } + // Mask Widening Vector Multiply-Add operations, its first operand can not be undef. // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy class RISCVTernaryWideMask : Intrinsic< [llvm_anyvector_ty], @@ -772,7 +779,7 @@ let VLOperand = 2; } // For destination vector type is NOT the same as source vector (with mask). - // Input: (maskedoff, vector_in, mask, vl, ta) + // Input: (maskedoff, vector_in, mask, vl, policy) class RISCVUnaryABMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, @@ -824,7 +831,7 @@ let VLOperand = 2; } // For Conversion unary operations with mask. - // Input: (maskedoff, vector_in, mask, vl, ta) + // Input: (maskedoff, vector_in, mask, vl, policy) class RISCVConversionMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, @@ -844,7 +851,7 @@ let VLOperand = 1; } // For unit stride segment load with mask - // Input: (maskedoff, pointer, mask, vl, ta) + // Input: (maskedoff, pointer, mask, vl, policy) class RISCVUSSegLoadMask : Intrinsic, !add(nf, -1))), @@ -870,7 +877,7 @@ let VLOperand = 1; } // For unit stride fault-only-first segment load with mask - // Input: (maskedoff, pointer, mask, vl, ta) + // Input: (maskedoff, pointer, mask, vl, policy) // Output: (data, vl) // NOTE: We model this with default memory properties since we model writing // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. @@ -896,7 +903,7 @@ let VLOperand = 2; } // For stride segment load with mask - // Input: (maskedoff, pointer, offset, mask, vl, ta) + // Input: (maskedoff, pointer, offset, mask, vl, policy) class RISCVSSegLoadMask : Intrinsic, !add(nf, -1))), @@ -920,7 +927,7 @@ let VLOperand = 2; } // For indexed segment load with mask - // Input: (maskedoff, pointer, index, mask, vl, ta) + // Input: (maskedoff, pointer, index, mask, vl, policy) class RISCVISegLoadMask : Intrinsic, !add(nf, -1))), @@ -1360,7 +1367,7 @@ defm vrgather_vx : RISCVRGatherVX; defm vrgatherei16_vv : RISCVRGatherEI16VV; - def "int_riscv_vcompress" : RISCVUnaryAAMaskTU; + def "int_riscv_vcompress" : RISCVCompress; defm vaaddu : RISCVSaturatingBinaryAAX; defm vaadd : RISCVSaturatingBinaryAAX; diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -88,6 +88,13 @@ // Pseudos. IsRVVWideningReductionShift = HasVecPolicyOpShift + 1, IsRVVWideningReductionMask = 1 << IsRVVWideningReductionShift, + + // Does this instruction care about mask policy. If it is not, the mask policy + // could be either agnostic or undisturbed. For example, unmasked, store, and + // reduction operations result would not be affected by mask policy, so + // compiler has free to select either one. + UsesMaskPolicyShift = IsRVVWideningReductionShift + 1, + UsesMaskPolicyMask = 1 << UsesMaskPolicyShift, }; // Match with the definitions in RISCVInstrFormatsV.td @@ -110,8 +117,8 @@ }; enum { - TAIL_UNDISTURBED = 0, TAIL_AGNOSTIC = 1, + MASK_AGNOSTIC = 2, }; // Helper functions to read TSFlags. @@ -156,6 +163,10 @@ static inline bool isRVVWideningReduction(uint64_t TSFlags) { return TSFlags & IsRVVWideningReductionMask; } +/// \returns true if mask policy is valid for the instruction. +static inline bool UsesMaskPolicy(uint64_t TSFlags) { + return TSFlags & UsesMaskPolicyMask; +} // RISC-V Specific Machine Operand Flags enum { diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -502,29 +502,46 @@ unsigned NumOperands = MI.getNumExplicitOperands(); bool HasPolicy = RISCVII::hasVecPolicyOp(TSFlags); - // Default to tail agnostic unless the destination is tied to a source. - // Unless the source is undef. In that case the user would have some control - // over the tail values. Some pseudo instructions force a tail agnostic policy - // despite having a tied def. + // If the instruction has policy argument, use the argument. + // If there is no policy argument, default to tail agnostic unless the + // destination is tied to a source. Unless the source is undef. In that case + // the user would have some control over the policy values. Some pseudo + // instructions force a tail agnostic policy despite having a tied def. bool ForceTailAgnostic = RISCVII::doesForceTailAgnostic(TSFlags); bool TailAgnostic = true; - // If the instruction has policy argument, use the argument. + bool UsesMaskPolicy = RISCVII::UsesMaskPolicy(TSFlags); + // FIXME: Could we look at the above or below instructions to choose the + // matched mask policy to reduce vsetvli instructions? Default mask policy is + // agnostic if instructions use mask policy, otherwise is undisturbed. Because + // most mask operations are mask undisturbed, so we could possibly reduce the + // vsetvli between mask and nomasked instruction sequence. + bool MaskAgnostic = UsesMaskPolicy; + unsigned UseOpIdx; if (HasPolicy) { const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1); - TailAgnostic = Op.getImm() & 0x1; - } - - unsigned UseOpIdx; - if (!(ForceTailAgnostic || (HasPolicy && TailAgnostic)) && - MI.isRegTiedToUseOperand(0, &UseOpIdx)) { + uint64_t Policy = Op.getImm(); + assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) && + "Invalid Policy Value"); + // Although in some cases, mismatched passthru/maskedoff with policy value + // does not make sense (ex. tied operand is IMPLICIT_DEF with non-TAMA + // policy, or tied operand is not IMPLICIT_DEF with TAMA policy), but users + // have set the policy value explicitly, so compiler would not fix it. + TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC; + MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC; + } else if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) { TailAgnostic = false; + if (UsesMaskPolicy) + MaskAgnostic = false; // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic. const MachineOperand &UseMO = MI.getOperand(UseOpIdx); MachineInstr *UseMI = MRI->getVRegDef(UseMO.getReg()); if (UseMI) { UseMI = elideCopies(UseMI, MRI); - if (UseMI && UseMI->isImplicitDef()) + if (UseMI && UseMI->isImplicitDef()) { TailAgnostic = true; + if (UsesMaskPolicy) + MaskAgnostic = true; + } } } @@ -559,8 +576,8 @@ } } else InstrInfo.setAVLReg(RISCV::NoRegister); - InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic, - /*MaskAgnostic*/ false, MaskRegOp, StoreOp, ScalarMovOp); + InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, MaskRegOp, StoreOp, + ScalarMovOp); return InstrInfo; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -196,7 +196,10 @@ let TSFlags{16} = HasVecPolicyOp; bit IsRVVWideningReduction = 0; - let TSFlags{17} = IsRVVWideningReduction; + let TSFlags{17} = IsRVVWideningReduction; + + bit UsesMaskPolicy = 0; + let TSFlags{18} = UsesMaskPolicy; } // Pseudo instructions diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -685,6 +685,7 @@ let HasSEWOp = 1; let HasMergeOp = 1; let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -733,6 +734,7 @@ let HasSEWOp = 1; let HasMergeOp = 1; let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -787,6 +789,7 @@ let HasSEWOp = 1; let HasMergeOp = 1; let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -912,6 +915,7 @@ let HasVLOp = 1; let HasSEWOp = 1; let HasMergeOp = 1; + let UsesMaskPolicy = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -973,6 +977,7 @@ let HasVLOp = 1; let HasSEWOp = 1; let HasMergeOp = 1; + let UsesMaskPolicy = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -989,6 +994,7 @@ let HasSEWOp = 1; let HasMergeOp = 1; let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1164,6 +1170,7 @@ let HasSEWOp = 1; let HasMergeOp = 1; let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1184,6 +1191,9 @@ let HasVLOp = 1; let HasSEWOp = 1; let HasMergeOp = 1; + // FIXME: In current design, we would not change the mask policy, so + // UsesMaskPolicy is false. We could fix after add the policy operand. + let UsesMaskPolicy = 0; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1206,6 +1216,7 @@ let HasSEWOp = 1; let HasMergeOp = 0; // Merge is also rs2. let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1325,6 +1336,7 @@ let HasSEWOp = 1; let HasMergeOp = 1; let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1358,6 +1370,7 @@ let HasSEWOp = 1; let HasMergeOp = 1; let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1397,6 +1410,7 @@ let HasSEWOp = 1; let HasMergeOp = 1; let HasVecPolicyOp = 1; + let UsesMaskPolicy = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll @@ -0,0 +1,1269 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vle.mask.nxv1i64( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_nxv1i64_nxv1i64(* %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1i64( + undef, + * %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(* %0, %1, iXLen %2, iXLen* %3) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( + undef, + * %0, + %1, + iXLen %2, iXLen 3) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %3 + + ret %b +} + +declare @llvm.riscv.vlse.mask.nxv1i64( + , + *, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(* %0, iXLen %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i64( + undef, + * %0, + iXLen %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( + , + *, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(* %0, %1, %2, iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( + undef, + * %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vwadd.vv v10, v8, v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsub.mask.nxv1i8.i8( + undef, + %0, + i8 -9, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vzext.vf2 v9, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vzext.vf4 v9, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} +declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vzext.vf8 v9, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vnsra.wv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vwmul.vv v10, v8, v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmacc.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vwmacc.vv v10, v8, v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vaadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vnclip.wv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfwadd.vv v10, v8, v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( + , + , + half, + , + iXLen, + iXLen); + +define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, half %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( + undef, + %0, + half %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfwmul.vv v10, v8, v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfmacc.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfwmacc.vv v10, v8, v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3); + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv1f16( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv1f16( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv1i16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: ret + %0, + %1, + iXLen %2) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv1i16( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfcvt.xu.f.v v8, v8, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfwcvt.xu.f.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfwcvt.f.f.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfncvt.f.x.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfncvt.f.f.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + undef, + %0, + %1, + iXLen %2, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv1i8( + , + , + iXLen, + , + iXLen, iXLen); + +define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, iXLen %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vslideup.vx v9, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslideup.mask.nxv1i8( + undef, + %0, + iXLen %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vslide1up.vx v9, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1up.mask.nxv1i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( + , + , + half, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, half %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16( + undef, + %0, + half %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vrgather.vv v10, v8, v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} + +declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vrgatherei16.vv v10, v8, v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( + undef, + %0, + %1, + %2, + iXLen %3, iXLen 3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll @@ -0,0 +1,1198 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vle.mask.nxv1i64( + , + *, + , + iXLen, + iXLen) +define @intrinsic_vle_mask_nxv1i64_nxv1i64( %0, * %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1i64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( + , + *, + , + iXLen, + iXLen) +define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare @llvm.riscv.vlse.mask.nxv1i64( + , + *, + iXLen, + , + iXLen, + iXLen) +define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i64( + %0, + * %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( + , + *, + , + , + iXLen, + iXLen) +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen( %0, * %1, %2, %3, iXLen %4) nounwind { +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( + %0, + * %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) +define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) +define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsub.mask.nxv1i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( + , + , + , + iXLen, + iXLen) +define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vzext.vf2 v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vzext.vf4 v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} +declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( + , + , + , + iXLen, + iXLen) +define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vzext.vf8 v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vand.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vwmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( + , + , + half, + , + iXLen, + iXLen) +define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsqrt.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv1f16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrec7.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv1f16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv1i16( + , + , + , + iXLen, iXLen) +define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfclass.v v8, v9, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv1i8( + , + , + iXLen, + , + iXLen, iXLen) +define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslideup.mask.nxv1i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) +define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1up.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( + , + , + half, + , + iXLen, + iXLen) +define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll @@ -0,0 +1,1198 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vle.mask.nxv1i64( + , + *, + , + iXLen, + iXLen) +define @intrinsic_vle_mask_nxv1i64_nxv1i64( %0, * %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1i64( + %0, + * %1, + %2, + iXLen %3, iXLen 2) + + ret %a +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( + , + *, + , + iXLen, + iXLen) +define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( + %0, + * %1, + %2, + iXLen %3, iXLen 2) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare @llvm.riscv.vlse.mask.nxv1i64( + , + *, + iXLen, + , + iXLen, + iXLen) +define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i64( + %0, + * %1, + iXLen %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( + , + *, + , + , + iXLen, + iXLen) +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen( %0, * %1, %2, %3, iXLen %4) nounwind { +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( + %0, + * %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) +define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) +define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsub.mask.nxv1i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( + , + , + , + iXLen, + iXLen) +define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vzext.vf2 v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( + %1, + %2, + %0, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vzext.vf4 v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( + %1, + %2, + %0, + iXLen %3, iXLen 2) + + ret %a +} +declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( + , + , + , + iXLen, + iXLen) +define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vzext.vf8 v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( + %1, + %2, + %0, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vand.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vwmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( + , + , + half, + , + iXLen, + iXLen) +define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfsqrt.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv1f16( + %1, + %2, + %0, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfrec7.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv1f16( + %1, + %2, + %0, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv1i16( + , + , + , + iXLen, iXLen) +define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfclass.v v8, v9, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + %0, + %1, + %2, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv1i8( + , + , + iXLen, + , + iXLen, iXLen) +define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslideup.mask.nxv1i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) +define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1up.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( + , + , + half, + , + iXLen, + iXLen) +define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll @@ -0,0 +1,1198 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh\ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vle.mask.nxv1i64( + , + *, + , + iXLen, + iXLen) +define @intrinsic_vle_mask_nxv1i64_nxv1i64( %0, * %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1i64( + %0, + * %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( + , + *, + , + iXLen, + iXLen) +define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( + %0, + * %1, + %2, + iXLen %3, iXLen 0) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare @llvm.riscv.vlse.mask.nxv1i64( + , + *, + iXLen, + , + iXLen, + iXLen) +define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i64( + %0, + * %1, + iXLen %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( + , + *, + , + , + iXLen, + iXLen) +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen( %0, * %1, %2, %3, iXLen %4) nounwind { +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( + %0, + * %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) +define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) +define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsub.mask.nxv1i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( + , + , + , + iXLen, + iXLen) +define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vzext.vf2 v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( + %1, + %2, + %0, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vzext.vf4 v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( + %1, + %2, + %0, + iXLen %3, iXLen 0) + + ret %a +} +declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( + , + , + , + iXLen, + iXLen) +define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vzext.vf8 v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( + %1, + %2, + %0, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vand.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vwmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( + , + , + half, + , + iXLen, + iXLen) +define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( + , + , + , + , + iXLen, iXLen) +define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfsqrt.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv1f16( + %1, + %2, + %0, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfrec7.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv1f16( + %1, + %2, + %0, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv1i16( + , + , + , + iXLen, iXLen) +define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfclass.v v8, v9, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + , + , + , + iXLen, + iXLen) +define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv1i8( + , + , + iXLen, + , + iXLen, iXLen) +define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslideup.mask.nxv1i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) +define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1up.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( + , + , + half, + , + iXLen, + iXLen) +define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( + , + , + , + , + iXLen, + iXLen) +define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll +++ /dev/null @@ -1,67 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: --riscv-no-aliases < %s | FileCheck %s - -declare @llvm.riscv.vadd.nxv8i8.nxv8i8( - , - , - , - i64); - -define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vadd.vv v8, v8, v9 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, i64); - -define @intrinsic_vadd_mask_tu( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vadd_mask_tu: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu -; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -define @intrinsic_vadd_mask_ta( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vadd_mask_ta: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} -