diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -226,6 +226,11 @@ [["vv", "v", "vvv"], ["vf", "v", "vve"]]>; +multiclass RVVFloatingBinBuiltinSetRoundingMode + : RVVOutOp1BuiltinSet; + multiclass RVVFloatingBinVFBuiltinSet : RVVOutOp1BuiltinSet; @@ -2277,10 +2282,71 @@ defm vnclipu : RVVUnsignedNShiftBuiltinSetRoundingMode; defm vnclip : RVVSignedNShiftBuiltinSetRoundingMode; } +} // 14. Vector Floating-Point Instructions +let HeaderCode = +[{ +enum __RISCV_FRM { + __RISCV_FRM_RNE = 0, + __RISCV_FRM_RTZ = 1, + __RISCV_FRM_RDN = 2, + __RISCV_FRM_RUP = 3, + __RISCV_FRM_RMM = 4, +}; +}] in def frm_enum : RVVHeader; + +let UnMaskedPolicyScheme = HasPassthruOperand in { // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions -defm vfadd : RVVFloatingBinBuiltinSet; +let ManualCodegen = [{ + { + // LLVM intrinsic + // Unmasked: (passthru, op0, op1, round_mode, vl) + // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy) + + SmallVector Operands; + bool HasMaskedOff = !( + (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || + (!IsMasked && PolicyAttrs & RVV_VTA)); + bool HasRoundModeOp = IsMasked ? + (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) : + (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4); + + unsigned Offset = IsMasked ? + (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); + + if (!HasMaskedOff) + Operands.push_back(llvm::PoisonValue::get(ResultType)); + else + Operands.push_back(Ops[IsMasked ? 1 : 0]); + + Operands.push_back(Ops[Offset]); // op0 + Operands.push_back(Ops[Offset + 1]); // op1 + + if (IsMasked) + Operands.push_back(Ops[0]); // mask + + if (HasRoundModeOp) { + Operands.push_back(Ops[Offset + 2]); // frm + Operands.push_back(Ops[Offset + 3]); // vl + } else { + Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 99)); // frm + Operands.push_back(Ops[Offset + 2]); // vl + } + + if (IsMasked) + Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); + + IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), Ops.back()->getType()}; + llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); + return Builder.CreateCall(F, Operands, ""); + } +}] in { + let HasFRMRoundModeOp = true in { + defm vfadd : RVVFloatingBinBuiltinSetRoundingMode; + } + defm vfadd : RVVFloatingBinBuiltinSet; +} defm vfsub : RVVFloatingBinBuiltinSet; defm vfrsub : RVVFloatingBinVFBuiltinSet; diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td --- a/clang/include/clang/Basic/riscv_vector_common.td +++ b/clang/include/clang/Basic/riscv_vector_common.td @@ -234,6 +234,10 @@ // Set to true if the builtin is associated with tuple types. bit IsTuple = false; + + // Set to true if the builtin has a parameter that models floating-point + // rounding mode control + bit HasFRMRoundModeOp = false; } // This is the code emitted in the header. diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h --- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h +++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h @@ -387,6 +387,7 @@ std::vector IntrinsicTypes; unsigned NF = 1; Policy PolicyAttrs; + bool HasFRMRoundModeOp; public: RVVIntrinsic(llvm::StringRef Name, llvm::StringRef Suffix, @@ -397,7 +398,7 @@ const RVVTypes &Types, const std::vector &IntrinsicTypes, const std::vector &RequiredFeatures, - unsigned NF, Policy PolicyAttrs); + unsigned NF, Policy PolicyAttrs, bool HasFRMRoundModeOp); ~RVVIntrinsic() = default; RVVTypePtr getOutputType() const { return OutputType; } @@ -467,7 +468,7 @@ static void updateNamesAndPolicy(bool IsMasked, bool HasPolicy, std::string &Name, std::string &BuiltinName, std::string &OverloadedName, - Policy &PolicyAttrs); + Policy &PolicyAttrs, bool HasFRMRoundModeOp); }; // RVVRequire should be sync'ed with target features, but only @@ -526,6 +527,7 @@ bool HasMaskedOffOperand : 1; bool HasTailPolicy : 1; bool HasMaskPolicy : 1; + bool HasFRMRoundModeOp : 1; bool IsTuple : 1; uint8_t UnMaskedPolicyScheme : 2; uint8_t MaskedPolicyScheme : 2; diff --git a/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/clang/lib/Sema/SemaRISCVVectorLookup.cpp --- a/clang/lib/Sema/SemaRISCVVectorLookup.cpp +++ b/clang/lib/Sema/SemaRISCVVectorLookup.cpp @@ -349,7 +349,8 @@ std::string BuiltinName = "__builtin_rvv_" + std::string(Record.Name); RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, Name, BuiltinName, - OverloadedName, PolicyAttrs); + OverloadedName, PolicyAttrs, + Record.HasFRMRoundModeOp); // Put into IntrinsicList. size_t Index = IntrinsicList.size(); diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -875,20 +875,19 @@ //===----------------------------------------------------------------------===// // RVVIntrinsic implementation //===----------------------------------------------------------------------===// -RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, - StringRef NewOverloadedName, - StringRef OverloadedSuffix, StringRef IRName, - bool IsMasked, bool HasMaskedOffOperand, bool HasVL, - PolicyScheme Scheme, bool SupportOverloading, - bool HasBuiltinAlias, StringRef ManualCodegen, - const RVVTypes &OutInTypes, - const std::vector &NewIntrinsicTypes, - const std::vector &RequiredFeatures, - unsigned NF, Policy NewPolicyAttrs) +RVVIntrinsic::RVVIntrinsic( + StringRef NewName, StringRef Suffix, StringRef NewOverloadedName, + StringRef OverloadedSuffix, StringRef IRName, bool IsMasked, + bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme, + bool SupportOverloading, bool HasBuiltinAlias, StringRef ManualCodegen, + const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, + const std::vector &RequiredFeatures, unsigned NF, + Policy NewPolicyAttrs, bool HasFRMRoundModeOp) : IRName(IRName), IsMasked(IsMasked), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), Scheme(Scheme), SupportOverloading(SupportOverloading), HasBuiltinAlias(HasBuiltinAlias), - ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs) { + ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs), + HasFRMRoundModeOp(HasFRMRoundModeOp) { // Init BuiltinName, Name and OverloadedName BuiltinName = NewName.str(); @@ -903,7 +902,7 @@ OverloadedName += "_" + OverloadedSuffix.str(); updateNamesAndPolicy(IsMasked, hasPolicy(), Name, BuiltinName, OverloadedName, - PolicyAttrs); + PolicyAttrs, HasFRMRoundModeOp); // Init OutputType and InputTypes OutputType = OutInTypes[0]; @@ -1045,11 +1044,9 @@ "and mask policy"); } -void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy, - std::string &Name, - std::string &BuiltinName, - std::string &OverloadedName, - Policy &PolicyAttrs) { +void RVVIntrinsic::updateNamesAndPolicy( + bool IsMasked, bool HasPolicy, std::string &Name, std::string &BuiltinName, + std::string &OverloadedName, Policy &PolicyAttrs, bool HasFRMRoundModeOp) { auto appendPolicySuffix = [&](const std::string &suffix) { Name += suffix; @@ -1086,6 +1083,11 @@ } else llvm_unreachable("Unhandled policy condition"); } + + if (HasFRMRoundModeOp) { + Name += "_rm"; + BuiltinName += "_rm"; + } } SmallVector parsePrototypes(StringRef Prototypes) { @@ -1132,6 +1134,7 @@ OS << (int)Record.HasMaskedOffOperand << ","; OS << (int)Record.HasTailPolicy << ","; OS << (int)Record.HasMaskPolicy << ","; + OS << (int)Record.HasFRMRoundModeOp << ","; OS << (int)Record.IsTuple << ","; OS << (int)Record.UnMaskedPolicyScheme << ","; OS << (int)Record.MaskedPolicyScheme << ","; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -20,7 +20,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -30,7 +30,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -40,7 +40,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -50,7 +50,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -100,7 +100,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -110,7 +110,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -120,7 +120,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -130,7 +130,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -190,7 +190,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -200,7 +200,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -210,7 +210,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -220,7 +220,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -280,7 +280,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -290,7 +290,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -300,7 +300,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -310,7 +310,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -320,7 +320,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -370,7 +370,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -380,7 +380,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -390,7 +390,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -400,7 +400,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -410,7 +410,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -460,7 +460,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { @@ -470,7 +470,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -480,7 +480,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { @@ -490,7 +490,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -500,7 +500,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { @@ -550,7 +550,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -560,7 +560,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { @@ -570,7 +570,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -580,7 +580,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { @@ -590,7 +590,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -600,10 +600,610 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { return __riscv_vfadd_vf_f64m8_m(mask, op1, op2, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm(vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm(vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm(vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm(vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm(vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm(vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm(vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm(vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm(vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_m_rm(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf4_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_m_rm(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf4_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_m_rm(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf2_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_m_rm(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf2_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_m_rm(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_m_rm(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_m_rm(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_m_rm(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_m_rm(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_m_rm(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_m_rm(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_m_rm(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_m_rm(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32mf2_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_m_rm(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32mf2_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_m_rm(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_m_rm(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_m_rm(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_m_rm(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_m_rm(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_m_rm(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_m_rm(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_m_rm(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_m_rm(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_m_rm(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_m_rm(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_m_rm(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_m_rm(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_m_rm(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_m_rm(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_m_rm(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_m_rm(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -20,7 +20,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -30,7 +30,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -40,7 +40,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -50,7 +50,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -100,7 +100,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -110,7 +110,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -120,7 +120,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -130,7 +130,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -190,7 +190,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -200,7 +200,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -210,7 +210,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -220,7 +220,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -280,7 +280,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -290,7 +290,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -300,7 +300,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -310,7 +310,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -320,7 +320,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -370,7 +370,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -380,7 +380,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -390,7 +390,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -400,7 +400,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -410,7 +410,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -460,7 +460,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { @@ -470,7 +470,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -480,7 +480,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { @@ -490,7 +490,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -500,7 +500,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { @@ -550,7 +550,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -560,7 +560,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { @@ -570,7 +570,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -580,7 +580,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { @@ -590,7 +590,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -600,10 +600,610 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { return __riscv_vfadd(mask, op1, op2, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm(vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm(vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm(vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm(vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm(vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm(vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm(vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm(vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm(vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_m_rm(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_m_rm(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_m_rm(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_m_rm(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_m_rm(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_m_rm(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_m_rm(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_m_rm(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_m_rm(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_m_rm(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_m_rm(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_m_rm(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_m_rm(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_m_rm(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_m_rm(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_m_rm(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_m_rm(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_m_rm(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_m_rm(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_m_rm(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_m_rm(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_m_rm(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_m_rm(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_m_rm(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_m_rm(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_m_rm(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_m_rm(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_m_rm(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_m_rm(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_m_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_m_rm(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -20,7 +20,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -30,7 +30,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -40,7 +40,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -50,7 +50,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -100,7 +100,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -110,7 +110,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -120,7 +120,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -130,7 +130,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -190,7 +190,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -200,7 +200,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -210,7 +210,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -220,7 +220,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -280,7 +280,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -290,7 +290,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -300,7 +300,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { @@ -310,7 +310,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -320,7 +320,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -370,7 +370,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -380,7 +380,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -390,7 +390,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -400,7 +400,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -410,7 +410,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -460,7 +460,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -470,7 +470,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -480,7 +480,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -490,7 +490,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -500,7 +500,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -550,7 +550,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -560,7 +560,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -570,7 +570,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -580,7 +580,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -590,7 +590,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { @@ -610,7 +610,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -640,7 +640,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -650,7 +650,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -660,7 +660,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -670,7 +670,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -680,7 +680,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -700,7 +700,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -710,7 +710,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -730,7 +730,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -740,7 +740,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -750,7 +750,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -760,7 +760,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -770,7 +770,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -790,7 +790,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -800,7 +800,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -820,7 +820,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -830,7 +830,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -840,7 +840,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -850,7 +850,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -860,7 +860,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -880,7 +880,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -890,7 +890,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { @@ -910,7 +910,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -920,7 +920,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -930,7 +930,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -940,7 +940,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -950,7 +950,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -970,7 +970,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -980,7 +980,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1000,7 +1000,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1010,7 +1010,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1020,7 +1020,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -1030,7 +1030,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -1060,7 +1060,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -1070,7 +1070,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -1090,7 +1090,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -1100,7 +1100,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -1110,7 +1110,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -1120,7 +1120,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -1130,7 +1130,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -1150,7 +1150,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -1160,7 +1160,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -1180,7 +1180,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -1190,7 +1190,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -1200,10 +1200,1210 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return __riscv_vfadd_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_tu_rm(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf4_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_tu_rm(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf4_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_tu_rm(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf2_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_tu_rm(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf2_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_tu_rm(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_tu_rm(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_tu_rm(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_tu_rm(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_tu_rm(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_tu_rm(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_tu_rm(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_tu_rm(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tu_rm(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32mf2_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tu_rm(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32mf2_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_tu_rm(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_tu_rm(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_tu_rm(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_tu_rm(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_tu_rm(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_tu_rm(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_tu_rm(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_tu_rm(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_tu_rm(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_tu_rm(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_tu_rm(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_tu_rm(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_tu_rm(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_tu_rm(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_tu_rm(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_tu_rm(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_tu_rm(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_tum_rm(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf4_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_tum_rm(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf4_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_tum_rm(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf2_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_tum_rm(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf2_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_tum_rm(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_tum_rm(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_tum_rm(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_tum_rm(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_tum_rm(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_tum_rm(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_tum_rm(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_tum_rm(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tum_rm(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32mf2_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tum_rm(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32mf2_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_tum_rm(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_tum_rm(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_tum_rm(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_tum_rm(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_tum_rm(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_tum_rm(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_tum_rm(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_tum_rm(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_tum_rm(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_tum_rm(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_tum_rm(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_tum_rm(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_tum_rm(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_tum_rm(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_tum_rm(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_tum_rm(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_tum_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_tumu_rm(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf4_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_tumu_rm(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf4_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_tumu_rm(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf2_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_tumu_rm(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf2_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_tumu_rm(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_tumu_rm(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_tumu_rm(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_tumu_rm(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_tumu_rm(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_tumu_rm(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_tumu_rm(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_tumu_rm(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tumu_rm(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32mf2_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tumu_rm(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32mf2_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_tumu_rm(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_tumu_rm(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_tumu_rm(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_tumu_rm(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_tumu_rm(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_tumu_rm(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_tumu_rm(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_tumu_rm(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_tumu_rm(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_tumu_rm(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_tumu_rm(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_tumu_rm(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_tumu_rm(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_tumu_rm(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_tumu_rm(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_tumu_rm(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_tumu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_mu_rm(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf4_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_mu_rm(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf4_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_mu_rm(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf2_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_mu_rm(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf2_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_mu_rm(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_mu_rm(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_mu_rm(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_mu_rm(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_mu_rm(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_mu_rm(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_mu_rm(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_mu_rm(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_mu_rm(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32mf2_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_mu_rm(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32mf2_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_mu_rm(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_mu_rm(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_mu_rm(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_mu_rm(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_mu_rm(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_mu_rm(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_mu_rm(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_mu_rm(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_mu_rm(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_mu_rm(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_mu_rm(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_mu_rm(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_mu_rm(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_mu_rm(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_mu_rm(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_mu_rm(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_mu_rm(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -20,7 +20,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -30,7 +30,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -40,7 +40,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -50,7 +50,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -100,7 +100,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -110,7 +110,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -120,7 +120,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -130,7 +130,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -190,7 +190,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -200,7 +200,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -210,7 +210,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -220,7 +220,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -280,7 +280,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -290,7 +290,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -300,7 +300,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 99, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { @@ -310,7 +310,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -320,7 +320,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -370,7 +370,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -380,7 +380,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -390,7 +390,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -400,7 +400,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -410,7 +410,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -460,7 +460,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -470,7 +470,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -480,7 +480,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -490,7 +490,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -500,7 +500,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -550,7 +550,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -560,7 +560,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -570,7 +570,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -580,7 +580,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -590,7 +590,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { @@ -610,7 +610,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -640,7 +640,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -650,7 +650,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -660,7 +660,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -670,7 +670,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -680,7 +680,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -700,7 +700,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -710,7 +710,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -730,7 +730,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -740,7 +740,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -750,7 +750,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -760,7 +760,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -770,7 +770,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -790,7 +790,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -800,7 +800,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -820,7 +820,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -830,7 +830,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -840,7 +840,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -850,7 +850,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -860,7 +860,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -880,7 +880,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -890,7 +890,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { @@ -910,7 +910,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -920,7 +920,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -930,7 +930,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -940,7 +940,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -950,7 +950,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -970,7 +970,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -980,7 +980,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1000,7 +1000,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1010,7 +1010,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1020,7 +1020,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -1030,7 +1030,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -1060,7 +1060,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -1070,7 +1070,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -1090,7 +1090,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -1100,7 +1100,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -1110,7 +1110,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -1120,7 +1120,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -1130,7 +1130,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -1150,7 +1150,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -1160,7 +1160,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -1180,7 +1180,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -1190,7 +1190,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -1200,10 +1200,1210 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 99, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_tu_rm(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_tu_rm(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_tu_rm(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_tu_rm(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_tu_rm(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_tu_rm(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_tu_rm(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_tu_rm(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_tu_rm(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_tu_rm(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_tu_rm(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_tu_rm(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tu_rm(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tu_rm(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_tu_rm(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_tu_rm(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_tu_rm(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_tu_rm(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_tu_rm(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_tu_rm(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_tu_rm(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_tu_rm(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_tu_rm(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_tu_rm(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_tu_rm(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_tu_rm(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_tu_rm(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_tu_rm(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_tu_rm(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tu_rm +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_tu_rm(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_tum_rm(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_tum_rm(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_tum_rm(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_tum_rm(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_tum_rm(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_tum_rm(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_tum_rm(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_tum_rm(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_tum_rm(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_tum_rm(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_tum_rm(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_tum_rm(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tum_rm(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tum_rm(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_tum_rm(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_tum_rm(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_tum_rm(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_tum_rm(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_tum_rm(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_tum_rm(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_tum_rm(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_tum_rm(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_tum_rm(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_tum_rm(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_tum_rm(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_tum_rm(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_tum_rm(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_tum_rm(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_tum_rm(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tum_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_tum_rm(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_tumu_rm(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_tumu_rm(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_tumu_rm(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_tumu_rm(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_tumu_rm(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_tumu_rm(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_tumu_rm(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_tumu_rm(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_tumu_rm(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_tumu_rm(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_tumu_rm(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_tumu_rm(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tumu_rm(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tumu_rm(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_tumu_rm(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_tumu_rm(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_tumu_rm(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_tumu_rm(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_tumu_rm(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_tumu_rm(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_tumu_rm(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_tumu_rm(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_tumu_rm(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_tumu_rm(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_tumu_rm(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_tumu_rm(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_tumu_rm(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_tumu_rm(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_tumu_rm(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tumu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_tumu_rm(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_mu_rm(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_mu_rm(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_mu_rm(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_mu_rm(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_mu_rm(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_mu_rm(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_mu_rm(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_mu_rm(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_mu_rm(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_mu_rm(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_mu_rm(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_mu_rm(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_mu_rm(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_mu_rm(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_mu_rm(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_mu_rm(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_mu_rm(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_mu_rm(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_mu_rm(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_mu_rm(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_mu_rm(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_mu_rm(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_mu_rm(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_mu_rm(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_mu_rm(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_mu_rm(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_mu_rm(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_mu_rm(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_mu_rm(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_mu_rm +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_mu_rm(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -65,6 +65,7 @@ bool HasMaskedOffOperand :1; bool HasTailPolicy : 1; bool HasMaskPolicy : 1; + bool HasFRMRoundModeOp : 1; bool IsTuple : 1; uint8_t UnMaskedPolicyScheme : 2; uint8_t MaskedPolicyScheme : 2; @@ -530,6 +531,7 @@ StringRef MaskedIRName = R->getValueAsString("MaskedIRName"); unsigned NF = R->getValueAsInt("NF"); bool IsTuple = R->getValueAsBit("IsTuple"); + bool HasFRMRoundModeOp = R->getValueAsBit("HasFRMRoundModeOp"); const Policy DefaultPolicy; SmallVector SupportedUnMaskedPolicies = @@ -577,7 +579,7 @@ /*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL, UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias, ManualCodegen, *Types, IntrinsicTypes, RequiredFeatures, NF, - DefaultPolicy)); + DefaultPolicy, HasFRMRoundModeOp)); if (UnMaskedPolicyScheme != PolicyScheme::SchemeNone) for (auto P : SupportedUnMaskedPolicies) { SmallVector PolicyPrototype = @@ -592,7 +594,7 @@ /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias, ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures, - NF, P)); + NF, P, HasFRMRoundModeOp)); } if (!HasMasked) continue; @@ -603,7 +605,8 @@ Name, SuffixStr, OverloadedName, OverloadedSuffixStr, MaskedIRName, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicyScheme, SupportOverloading, HasBuiltinAlias, ManualCodegen, *MaskTypes, - IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy)); + IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy, + HasFRMRoundModeOp)); if (MaskedPolicyScheme == PolicyScheme::SchemeNone) continue; for (auto P : SupportedMaskedPolicies) { @@ -618,7 +621,7 @@ MaskedIRName, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicyScheme, SupportOverloading, HasBuiltinAlias, ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures, NF, - P)); + P, HasFRMRoundModeOp)); } } // End for Log2LMULList } // End for TypeRange @@ -671,6 +674,7 @@ SR.Suffix = parsePrototypes(SuffixProto); SR.OverloadedSuffix = parsePrototypes(OverloadedSuffixProto); SR.IsTuple = IsTuple; + SR.HasFRMRoundModeOp = HasFRMRoundModeOp; SemaRecords->push_back(SR); } @@ -713,6 +717,7 @@ R.UnMaskedPolicyScheme = SR.UnMaskedPolicyScheme; R.MaskedPolicyScheme = SR.MaskedPolicyScheme; R.IsTuple = SR.IsTuple; + R.HasFRMRoundModeOp = SR.HasFRMRoundModeOp; assert(R.PrototypeIndex != static_cast(SemaSignatureTable::INVALID_INDEX)); diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -420,6 +420,27 @@ let ScalarOperand = 2; let VLOperand = 4; } + // For destination vector type is the same as first source vector. + // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl) + class RISCVBinaryAAXUnMaskedRoundingMode + : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty, LLVMMatchType<2>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let ScalarOperand = 2; + let VLOperand = 4; + } + // For destination vector type is the same as first source vector (with mask). + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy) + class RISCVBinaryAAXMaskedRoundingMode + : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<2>, LLVMMatchType<2>], + [ImmArg>, ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let ScalarOperand = 2; + let VLOperand = 5; + } // For destination vector type is the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. // Input: (passthru, vector_in, vector_in/scalar_in, vl) @@ -1088,6 +1109,10 @@ def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked; def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked; } + multiclass RISCVBinaryAAXRoundingMode { + def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode; + def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode; + } // Like RISCVBinaryAAX, but the second operand is used a shift amount so it // must be a vector or an XLen scalar. multiclass RISCVBinaryAAShift { @@ -1296,7 +1321,7 @@ defm vwmaccus : RISCVTernaryWide; defm vwmaccsu : RISCVTernaryWide; - defm vfadd : RISCVBinaryAAX; + defm vfadd : RISCVBinaryAAXRoundingMode; defm vfsub : RISCVBinaryAAX; defm vfrsub : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -110,6 +110,9 @@ HasRoundModeOpShift = IsSignExtendingOpWShift + 1, HasRoundModeOpMask = 1 << HasRoundModeOpShift, + + IsRVVFixedPointShift = HasRoundModeOpShift + 1, + IsRVVFixedPointMask = 1 << IsRVVFixedPointShift, }; enum VLMUL : uint8_t { @@ -172,6 +175,11 @@ return TSFlags & HasRoundModeOpMask; } +/// \returns true if this instruction is a RISC-V Vector fixed-point instruction +static inline bool isRVVFixedPoint(uint64_t TSFlags) { + return TSFlags & IsRVVFixedPointMask; +} + static inline unsigned getVLOpNum(const MCInstrDesc &Desc) { const uint64_t TSFlags = Desc.TSFlags; // This method is only called if we expect to have a VL operand, and all diff --git a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp @@ -13,6 +13,8 @@ // //===----------------------------------------------------------------------===// +#include "MCTargetDesc/RISCVBaseInfo.h" +#include "MCTargetDesc/RISCVMCTargetDesc.h" #include "RISCV.h" #include "RISCVSubtarget.h" #include "llvm/CodeGen/MachineFunctionPass.h" @@ -45,7 +47,7 @@ } private: - bool emitWriteVXRM(MachineBasicBlock &MBB); + bool emitWriteRoundingMode(MachineBasicBlock &MBB); std::optional getRoundModeIdx(const MachineInstr &MI); }; @@ -74,21 +76,37 @@ // This function inserts a write to vxrm when encountering an RVV fixed-point // instruction. -bool RISCVInsertReadWriteCSR::emitWriteVXRM(MachineBasicBlock &MBB) { +bool RISCVInsertReadWriteCSR::emitWriteRoundingMode(MachineBasicBlock &MBB) { bool Changed = false; for (MachineInstr &MI : MBB) { if (auto RoundModeIdx = getRoundModeIdx(MI)) { - unsigned VXRMImm = MI.getOperand(*RoundModeIdx).getImm(); + if (RISCVII::isRVVFixedPoint(MI.getDesc().TSFlags)) { + unsigned VXRMImm = MI.getOperand(*RoundModeIdx).getImm(); - // The value '99' is a hint to this pass to not alter the vxrm value. - if (VXRMImm == 99) - continue; + // The value '99' is a hint to this pass to not alter the vxrm value. + if (VXRMImm == 99) + continue; - Changed = true; - BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm)) - .addImm(VXRMImm); - MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false, - /*IsImp*/ true)); + Changed = true; + + BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm)) + .addImm(VXRMImm); + MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false, + /*IsImp*/ true)); + } else { // FRM + unsigned FRMImm = MI.getOperand(*RoundModeIdx).getImm(); + + // The value '99' is a hint to this pass to not alter the frm value. + if (FRMImm == 99) + continue; + + Changed = true; + + BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteFRMImm)) + .addImm(FRMImm); + MI.addOperand(MachineOperand::CreateReg(RISCV::FRM, /*IsDef*/ false, + /*IsImp*/ true)); + } } } return Changed; @@ -105,7 +123,7 @@ bool Changed = false; for (MachineBasicBlock &MBB : MF) - Changed |= emitWriteVXRM(MBB); + Changed |= emitWriteRoundingMode(MBB); return Changed; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -217,6 +217,14 @@ bit HasRoundModeOp = 0; let TSFlags{19} = HasRoundModeOp; + + // This is only valid when HasRoundModeOp is set to 1. HasRoundModeOp is set + // to 1 for vector fixed-point or floating-point intrinsics. This bit is + // processed under pass 'RISCVInsertReadWriteCSR' pass to distinguish between + // fixed-point / floating-point instructions and emit appropriate read/write + // to the correct CSR. + bit IsRVVFixedPoint = 0; + let TSFlags{20} = IsRVVFixedPoint; } // Pseudo instructions diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1113,7 +1113,8 @@ class VPseudoBinaryNoMaskRoundingMode : + string Constraint, + int RVVFixedPoint = 1> : Pseudo<(outs RetClass:$rd), (ins Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, AVL:$vl, ixlenimm:$sew), []>, RISCVVPseudo { @@ -1123,12 +1124,14 @@ let HasVLOp = 1; let HasSEWOp = 1; let HasRoundModeOp = 1; + let IsRVVFixedPoint = RVVFixedPoint; } class VPseudoBinaryNoMaskTURoundingMode : + string Constraint, + int RVVFixedPoint> : Pseudo<(outs RetClass:$rd), (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, AVL:$vl, ixlenimm:$sew), []>, @@ -1139,12 +1142,14 @@ let HasVLOp = 1; let HasSEWOp = 1; let HasRoundModeOp = 1; + let IsRVVFixedPoint = RVVFixedPoint; } class VPseudoBinaryMaskPolicyRoundingMode : + string Constraint, + int RVVFixedPoint> : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, Op1Class:$rs2, Op2Class:$rs1, @@ -1159,6 +1164,7 @@ let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; let HasRoundModeOp = 1; + let IsRVVFixedPoint = RVVFixedPoint; } // Special version of VPseudoBinaryNoMask where we pretend the first source is @@ -1944,16 +1950,18 @@ VReg Op1Class, DAGOperand Op2Class, LMULInfo MInfo, - string Constraint = ""> { + string Constraint = "", + int IsRVVFixedPoint = 1> { let VLMul = MInfo.value in { def "_" # MInfo.MX : - VPseudoBinaryNoMaskRoundingMode; + VPseudoBinaryNoMaskRoundingMode; def "_" # MInfo.MX # "_TU" : VPseudoBinaryNoMaskTURoundingMode; + Constraint, IsRVVFixedPoint>; def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode, + Constraint, IsRVVFixedPoint>, RISCVMaskedPseudo; } } @@ -2017,6 +2025,11 @@ defm _VV : VPseudoBinary; } +multiclass VPseudoBinaryFV_VV_RM { + defm _VV : VPseudoBinaryRoundingMode; +} + multiclass VPseudoVGTR_VV_EEW { foreach m = MxList in { defvar mx = m.MX; @@ -2065,6 +2078,12 @@ f.fprclass, m, Constraint, sew>; } +multiclass VPseudoBinaryV_VF_RM { + defm "_V" # f.FX : VPseudoBinaryRoundingMode; +} + multiclass VPseudoVSLD1_VF { foreach f = FPList in { foreach m = f.MxList in { @@ -2799,6 +2818,28 @@ } } +multiclass VPseudoVALU_VV_VF_RM { + foreach m = MxListF in { + defvar mx = m.MX; + defvar WriteVFALUV_MX = !cast("WriteVFALUV_" # mx); + defvar ReadVFALUV_MX = !cast("ReadVFALUV_" # mx); + + defm "" : VPseudoBinaryFV_VV_RM, + Sched<[WriteVFALUV_MX, ReadVFALUV_MX, ReadVFALUV_MX, ReadVMask]>; + } + + foreach f = FPList in { + foreach m = f.MxList in { + defvar mx = m.MX; + defvar WriteVFALUF_MX = !cast("WriteVFALUF_" # mx); + defvar ReadVFALUV_MX = !cast("ReadVFALUV_" # mx); + defvar ReadVFALUF_MX = !cast("ReadVFALUF_" # mx); + defm "" : VPseudoBinaryV_VF_RM, + Sched<[WriteVFALUF_MX, ReadVFALUV_MX, ReadVFALUF_MX, ReadVMask]>; + } + } +} + multiclass VPseudoVALU_VF { foreach f = FPList in { foreach m = f.MxList in { @@ -5911,8 +5952,11 @@ //===----------------------------------------------------------------------===// // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions //===----------------------------------------------------------------------===// +let mayRaiseFPException = true in { +defm PseudoVFADD : VPseudoVALU_VV_VF_RM; +} + let Uses = [FRM], mayRaiseFPException = true in { -defm PseudoVFADD : VPseudoVALU_VV_VF; defm PseudoVFSUB : VPseudoVALU_VV_VF; defm PseudoVFRSUB : VPseudoVALU_VF; } @@ -6585,7 +6629,8 @@ //===----------------------------------------------------------------------===// // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions //===----------------------------------------------------------------------===// -defm : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfadd", "PseudoVFADD", + AllFloatVectors>; defm : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>; defm : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -243,6 +243,30 @@ (xop_type xop_kind:$rs2), avl, log2sew)>; +class VPatBinarySDNode_VF_RM : + Pat<(result_type (vop (vop_type vop_reg_class:$rs1), + (vop_type (SplatFPOp xop_kind:$rs2)))), + (!cast( + !if(isSEWAware, + instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), + instruction_name#"_"#vlmul.MX)) + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + // Value to indicate no rounding mode change in + // RISCVInertReadWriteCSR + (XLenVT 99), + avl, log2sew)>; + multiclass VPatBinaryFPSDNode_VV_VF { foreach vti = AllFloatVectors in { @@ -258,6 +282,21 @@ } } +multiclass VPatBinaryFPSDNode_VV_VF_RM { + foreach vti = AllFloatVectors in { + let Predicates = GetVTypePredicates.Predicates in { + def : VPatBinarySDNode_VV_RM; + def : VPatBinarySDNode_VF_RM; + } + } +} + multiclass VPatBinaryFPSDNode_R_VF { foreach fvti = AllFloatVectors in @@ -1000,7 +1039,7 @@ // 13. Vector Floating-Point Instructions // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions -defm : VPatBinaryFPSDNode_VV_VF; +defm : VPatBinaryFPSDNode_VV_VF_RM; defm : VPatBinaryFPSDNode_VV_VF; defm : VPatBinaryFPSDNode_R_VF; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -870,6 +870,36 @@ scalar_reg_class:$rs2, (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; +class VPatBinaryVL_VF_RM + : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), + (vop2_type (SplatFPOp scalar_reg_class:$rs2)), + (result_type result_reg_class:$merge), + (mask_type V0), + VLOpFrag)), + (!cast( + !if(isSEWAware, + instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", + instruction_name#"_"#vlmul.MX#"_MASK")) + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInertReadWriteCSR + (XLenVT 99), + GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + multiclass VPatBinaryFPVL_VV_VF { foreach vti = AllFloatVectors in { @@ -886,6 +916,22 @@ } } +multiclass VPatBinaryFPVL_VV_VF_RM { + foreach vti = AllFloatVectors in { + let Predicates = GetVTypePredicates.Predicates in { + def : VPatBinaryVL_V_RM; + def : VPatBinaryVL_VF_RM; + } + } +} + multiclass VPatBinaryFPVL_R_VF { foreach fvti = AllFloatVectors in { @@ -1920,7 +1966,7 @@ // 13. Vector Floating-Point Instructions // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions -defm : VPatBinaryFPVL_VV_VF; +defm : VPatBinaryFPVL_VV_VF_RM; defm : VPatBinaryFPVL_VV_VF; defm : VPatBinaryFPVL_R_VF; diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll @@ -11,12 +11,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -24,7 +25,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -34,13 +35,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -49,7 +50,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -58,12 +59,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -71,7 +73,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -81,13 +83,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -96,7 +98,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -105,12 +107,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -118,7 +121,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -128,13 +131,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -143,7 +146,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -152,12 +155,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -165,7 +169,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -175,13 +179,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -190,7 +194,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -199,12 +203,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -212,7 +217,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -222,13 +227,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -237,7 +242,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -246,12 +251,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -259,7 +265,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -269,14 +275,14 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -285,7 +291,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -294,12 +300,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -307,7 +314,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -317,13 +324,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +339,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -341,12 +348,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -354,7 +362,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -364,13 +372,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -379,7 +387,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -388,12 +396,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -411,13 +420,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +435,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -435,12 +444,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -448,7 +458,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -458,13 +468,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -473,7 +483,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -482,12 +492,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -495,7 +506,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -505,14 +516,14 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -521,7 +532,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -530,12 +541,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -543,7 +555,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -553,13 +565,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -568,7 +580,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -577,12 +589,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -590,7 +603,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -600,13 +613,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -615,7 +628,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -624,12 +637,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -637,7 +651,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -647,13 +661,13 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -662,7 +676,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -671,12 +685,13 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -684,7 +699,7 @@ undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -694,14 +709,14 @@ , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -710,7 +725,7 @@ %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -719,12 +734,13 @@ , , half, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -732,7 +748,7 @@ undef, %0, half %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -742,13 +758,13 @@ , half, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -757,7 +773,7 @@ %1, half %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -766,12 +782,13 @@ , , half, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -779,7 +796,7 @@ undef, %0, half %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -789,13 +806,13 @@ , half, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -804,7 +821,7 @@ %1, half %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -813,12 +830,13 @@ , , half, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -826,7 +844,7 @@ undef, %0, half %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -836,13 +854,13 @@ , half, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +869,7 @@ %1, half %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -860,12 +878,13 @@ , , half, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -873,7 +892,7 @@ undef, %0, half %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -883,13 +902,13 @@ , half, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -898,7 +917,7 @@ %1, half %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -907,12 +926,13 @@ , , half, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -920,7 +940,7 @@ undef, %0, half %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -930,13 +950,13 @@ , half, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -945,7 +965,7 @@ %1, half %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -954,12 +974,13 @@ , , half, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -967,7 +988,7 @@ undef, %0, half %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -977,13 +998,13 @@ , half, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -992,7 +1013,7 @@ %1, half %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1001,12 +1022,13 @@ , , float, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1014,7 +1036,7 @@ undef, %0, float %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1024,13 +1046,13 @@ , float, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -1039,7 +1061,7 @@ %1, float %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1048,12 +1070,13 @@ , , float, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1061,7 +1084,7 @@ undef, %0, float %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1071,13 +1094,13 @@ , float, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -1086,7 +1109,7 @@ %1, float %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1095,12 +1118,13 @@ , , float, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1108,7 +1132,7 @@ undef, %0, float %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1118,13 +1142,13 @@ , float, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -1133,7 +1157,7 @@ %1, float %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1142,12 +1166,13 @@ , , float, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1155,7 +1180,7 @@ undef, %0, float %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1165,13 +1190,13 @@ , float, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -1180,7 +1205,7 @@ %1, float %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1189,12 +1214,13 @@ , , float, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1202,7 +1228,7 @@ undef, %0, float %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1212,13 +1238,13 @@ , float, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -1227,7 +1253,7 @@ %1, float %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1236,12 +1262,13 @@ , , double, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1249,7 +1276,7 @@ undef, %0, double %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1259,13 +1286,13 @@ , double, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -1274,7 +1301,7 @@ %1, double %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1283,12 +1310,13 @@ , , double, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1296,7 +1324,7 @@ undef, %0, double %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1306,13 +1334,13 @@ , double, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -1321,7 +1349,7 @@ %1, double %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1330,12 +1358,13 @@ , , double, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1343,7 +1372,7 @@ undef, %0, double %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1353,13 +1382,13 @@ , double, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -1368,7 +1397,7 @@ %1, double %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1377,12 +1406,13 @@ , , double, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1390,7 +1420,7 @@ undef, %0, double %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1400,13 +1430,13 @@ , double, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: fsrmi 0 ; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: @@ -1415,7 +1445,7 @@ %1, double %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a }