diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -173,6 +173,13 @@ // This builtin has a granted vector length parameter in the last position. bit HasVL = true; + // Normally, intrinsics have the tail policy argument if it is masked and + // have no tail policy argument if it is unmasked. When HasPolicy is false, it + // means the intrinsic has no tail policy argument regardless masked or not. + // For example, when the output result is mask type or scalar type, there is + // no need to specify the tail policy. + bit HasPolicy = true; + // This builtin supports non-masked function overloading api. // All masked operations support overloading api. bit HasNoMaskedOverloaded = true; @@ -557,6 +564,7 @@ } let HasNoMaskedOverloaded = false, + HasPolicy = false, ManualCodegen = [{ IntrinsicTypes = {ResultType, Ops[1]->getType()}; Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); @@ -572,6 +580,19 @@ let IRName = "vle1"; let HasMask = false; } +} + +let HasNoMaskedOverloaded = false, + ManualCodegen = [{ + IntrinsicTypes = {ResultType, Ops[1]->getType()}; + Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); + }], + ManualCodegenMask= [{ + // Move mask to right before vl. + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); + IntrinsicTypes = {ResultType, Ops[3]->getType()}; + Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); + }] in { multiclass RVVVLEBuiltin types> { let Name = NAME # "_v", IRName = "vle", @@ -611,7 +632,7 @@ ManualCodegenMask = [{ { // Move mask to right before vl. - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); IntrinsicTypes = {ResultType, Ops[4]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); Value *NewVL = Ops[2]; @@ -648,7 +669,7 @@ }], ManualCodegenMask= [{ // Move mask to right before vl. - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); IntrinsicTypes = {ResultType, Ops[4]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { @@ -668,7 +689,7 @@ }], ManualCodegenMask = [{ // Move mask to right before vl. - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops[4]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { @@ -688,6 +709,7 @@ } let HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl) std::swap(Ops[0], Ops[1]); @@ -705,6 +727,22 @@ let IRName = "vse1"; let HasMask = false; } +} + +let HasMaskedOffOperand = false, + HasPolicy = false, + ManualCodegen = [{ + // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl) + std::swap(Ops[0], Ops[1]); + Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); + IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()}; + }], + ManualCodegenMask= [{ + // Builtin: (mask, ptr, value, vl). Intrinsic: (value, ptr, mask, vl) + std::swap(Ops[0], Ops[2]); + Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); + IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()}; + }] in { multiclass RVVVSEBuiltin types> { let Name = NAME # "_v", IRName = "vse", @@ -724,6 +762,7 @@ IRName = "vsse", IRNameMask = "vsse_mask", HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl) std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3); @@ -747,6 +786,7 @@ multiclass RVVIndexedStore { let HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ // Builtin: (ptr, index, value, vl). Intrinsic: (value, ptr, index, vl) std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3); @@ -833,7 +873,8 @@ Operands.push_back(Ops[2 * NF + 1]); Operands.push_back(Ops[NF]); Operands.push_back(Ops[2 * NF + 2]); - assert(Operands.size() == NF + 3); + Operands.push_back(Ops[2 * NF + 3]); + assert(Operands.size() == NF + 4); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); clang::CharUnits Align = @@ -905,8 +946,9 @@ Operands.push_back(Ops[2 * NF + 1]); Operands.push_back(Ops[NF]); Operands.push_back(Ops[2 * NF + 3]); + Operands.push_back(Ops[2 * NF + 4]); Value *NewVL = Ops[2 * NF + 2]; - assert(Operands.size() == NF + 3); + assert(Operands.size() == NF + 4); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); clang::CharUnits Align = @@ -978,7 +1020,8 @@ Operands.push_back(Ops[2 * NF + 2]); Operands.push_back(Ops[NF]); Operands.push_back(Ops[2 * NF + 3]); - assert(Operands.size() == NF + 4); + Operands.push_back(Ops[2 * NF + 4]); + assert(Operands.size() == NF + 5); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); clang::CharUnits Align = @@ -1044,7 +1087,8 @@ Operands.push_back(Ops[2 * NF + 2]); Operands.push_back(Ops[NF]); Operands.push_back(Ops[2 * NF + 3]); - assert(Operands.size() == NF + 4); + Operands.push_back(Ops[2 * NF + 4]); + assert(Operands.size() == NF + 5); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); clang::CharUnits Align = @@ -1094,6 +1138,7 @@ IRNameMask = op # nf # "_mask", NF = nf, HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ { // Builtin: (ptr, val0, val1, ..., vl) @@ -1139,6 +1184,7 @@ IRNameMask = op # nf # "_mask", NF = nf, HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ { // Builtin: (ptr, stride, val0, val1, ..., vl). @@ -1180,6 +1226,7 @@ IRNameMask = op # nf # "_mask", NF = nf, HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ { // Builtin: (ptr, index, val0, val1, ..., vl) @@ -1224,6 +1271,7 @@ IRName = NAME, IRNameMask = NAME # "_mask", HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ // base, bindex, value, vl IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; @@ -1258,7 +1306,7 @@ }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, cast(ResultType)->getElementType(), @@ -1288,7 +1336,7 @@ }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, cast(ResultType)->getElementType(), @@ -1335,7 +1383,7 @@ }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, Ops[1]->getType(), @@ -1367,7 +1415,7 @@ }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, Ops[1]->getType(), @@ -1402,7 +1450,7 @@ }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, Ops[1]->getType(), @@ -1423,6 +1471,7 @@ let HasVL = false, HasMask = false, HasSideEffects = true, + HasPolicy = false, Log2LMUL = [0], ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type { @@ -1596,7 +1645,7 @@ } // 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions -let HasMask = false in { +let HasMask = false, HasPolicy = false in { defm vadc : RVVCarryinBuiltinSet; defm vmadc : RVVCarryOutInBuiltinSet<"vmadc_carry_in">; defm vmadc : RVVIntMaskOutBuiltinSet; @@ -1624,6 +1673,7 @@ ["Uv", "UvUw"]]>; // 12.8. Vector Integer Comparison Instructions +let HasPolicy = false in { defm vmseq : RVVIntMaskOutBuiltinSet; defm vmsne : RVVIntMaskOutBuiltinSet; defm vmsltu : RVVUnsignedMaskOutBuiltinSet; @@ -1634,6 +1684,7 @@ defm vmsgt : RVVSignedMaskOutBuiltinSet; defm vmsgeu : RVVUnsignedMaskOutBuiltinSet; defm vmsge : RVVSignedMaskOutBuiltinSet; +} // 12.9. Vector Integer Min/Max Instructions defm vminu : RVVUnsignedBinBuiltinSet; @@ -1669,6 +1720,7 @@ } // 12.13. Vector Single-Width Integer Multiply-Add Instructions +let HasPolicy = false in { defm vmacc : RVVIntTerBuiltinSet; defm vnmsac : RVVIntTerBuiltinSet; defm vmadd : RVVIntTerBuiltinSet; @@ -1689,10 +1741,11 @@ defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi", [["vx", "w", "wwUev"]]>; } +} // 12.15. Vector Integer Merge Instructions // C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl) -let HasMask = false, +let HasMask = false, HasPolicy = false, ManualCodegen = [{ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; @@ -1705,7 +1758,7 @@ } // 12.16. Vector Integer Move Instructions -let HasMask = false in { +let HasMask = false, HasPolicy = false in { let MangledName = "vmv_v" in { defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil", [["v", "Uv", "UvUv"]]>; @@ -1769,6 +1822,7 @@ } // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions +let HasPolicy = false in { defm vfmacc : RVVFloatingTerBuiltinSet; defm vfnmacc : RVVFloatingTerBuiltinSet; defm vfmsac : RVVFloatingTerBuiltinSet; @@ -1783,6 +1837,7 @@ defm vfwnmacc : RVVFloatingWidenTerBuiltinSet; defm vfwmsac : RVVFloatingWidenTerBuiltinSet; defm vfwnmsac : RVVFloatingWidenTerBuiltinSet; +} // 14.8. Vector Floating-Point Square-Root Instruction def vfsqrt : RVVFloatingUnaryVVBuiltin; @@ -1805,20 +1860,22 @@ defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">; // 14.13. Vector Floating-Point Compare Instructions +let HasPolicy = false in { defm vmfeq : RVVFloatingMaskOutBuiltinSet; defm vmfne : RVVFloatingMaskOutBuiltinSet; defm vmflt : RVVFloatingMaskOutBuiltinSet; defm vmfle : RVVFloatingMaskOutBuiltinSet; defm vmfgt : RVVFloatingMaskOutBuiltinSet; defm vmfge : RVVFloatingMaskOutBuiltinSet; +} // 14.14. Vector Floating-Point Classify Instruction -let Name = "vfclass_v" in +let Name = "vfclass_v", HasPolicy = false in def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">; // 14.15. Vector Floating-Point Merge Instructio // C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl) -let HasMask = false, +let HasMask = false, HasPolicy = false, ManualCodegen = [{ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; @@ -1830,7 +1887,7 @@ } // 14.16. Vector Floating-Point Move Instruction -let HasMask = false, HasNoMaskedOverloaded = false in +let HasMask = false, HasNoMaskedOverloaded = false, HasPolicy = false in defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "xfd", [["f", "v", "ve"]]>; @@ -1867,6 +1924,7 @@ // 15. Vector Reduction Operations // 15.1. Vector Single-Width Integer Reduction Instructions +let HasPolicy = false in { defm vredsum : RVVIntReductionBuiltinSet; defm vredmaxu : RVVUnsignedReductionBuiltin; defm vredmax : RVVSignedReductionBuiltin; @@ -1894,6 +1952,7 @@ // 15.4. Vector Widening Floating-Point Reduction Instructions defm vfwredsum : RVVFloatingWidenReductionBuiltin; defm vfwredosum : RVVFloatingWidenReductionBuiltin; +} // 16. Vector Mask Instructions // 16.1. Vector Mask-Register Logical Instructions @@ -1911,6 +1970,7 @@ defm vmmv_m : RVVPseudoMaskBuiltin<"vmand", "c">; defm vmnot_m : RVVPseudoMaskBuiltin<"vmnand", "c">; +let HasPolicy = false in { // 16.2. Vector mask population count vpopc def vpopc : RVVMaskOp0Builtin<"um">; @@ -1934,10 +1994,11 @@ defm vid : RVVOutBuiltinSet<"vid", "csil", [["v", "v", "v"], ["v", "Uv", "Uv"]]>; } +} // 17. Vector Permutation Instructions // 17.1. Integer Scalar Move Instructions -let HasMask = false in { +let HasMask = false, HasPolicy = false in { let HasVL = false, MangledName = "vmv_x" in defm vmv_x : RVVOp0BuiltinSet<"vmv_x_s", "csil", [["s", "ve", "ev"], @@ -1949,7 +2010,7 @@ } // 17.2. Floating-Point Scalar Move Instructions -let HasMask = false in { +let HasMask = false, HasPolicy = false in { let HasVL = false, MangledName = "vfmv_f" in defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "xfd", [["s", "ve", "ev"]]>; @@ -1960,10 +2021,12 @@ } // 17.3. Vector Slide Instructions +let HasPolicy = false in { // 17.3.1. Vector Slideup Instructions defm vslideup : RVVSlideBuiltinSet; // 17.3.2. Vector Slidedown Instructions defm vslidedown : RVVSlideBuiltinSet; +} // 17.3.3. Vector Slide1up Instructions defm vslide1up : RVVSlideOneBuiltinSet; @@ -1990,7 +2053,7 @@ [["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>; // 17.5. Vector Compress Instruction -let HasMask = false, +let HasMask = false, HasPolicy = false, ManualCodegen = [{ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); IntrinsicTypes = {ResultType, Ops[3]->getType()}; @@ -2005,7 +2068,7 @@ // Miscellaneous let HasMask = false, HasVL = false, IRName = "" in { - let Name = "vreinterpret_v", + let Name = "vreinterpret_v", HasPolicy = false, ManualCodegen = [{ return Builder.CreateBitCast(Ops[0], ResultType); }] in { @@ -2027,7 +2090,7 @@ } } - let Name = "vundefined", HasNoMaskedOverloaded = false, + let Name = "vundefined", HasNoMaskedOverloaded = false, HasPolicy = false, ManualCodegen = [{ return llvm::UndefValue::get(ResultType); }] in { @@ -2037,7 +2100,7 @@ // LMUL truncation // C/C++ Operand: VecTy, IR Operand: VecTy, Index - let Name = "vlmul_trunc_v", MangledName = "vlmul_trunc", + let Name = "vlmul_trunc_v", MangledName = "vlmul_trunc", HasPolicy = false, ManualCodegen = [{ { ID = Intrinsic::experimental_vector_extract; IntrinsicTypes = {ResultType, Ops[0]->getType()}; @@ -2055,7 +2118,7 @@ // LMUL extension // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index - let Name = "vlmul_ext_v", MangledName = "vlmul_ext", + let Name = "vlmul_ext_v", MangledName = "vlmul_ext", HasPolicy = false, ManualCodegen = [{ ID = Intrinsic::experimental_vector_insert; IntrinsicTypes = {ResultType, Ops[0]->getType()}; @@ -2073,7 +2136,7 @@ } } - let Name = "vget_v", + let Name = "vget_v", HasPolicy = false, ManualCodegen = [{ { ID = Intrinsic::experimental_vector_extract; @@ -2091,7 +2154,7 @@ } } - let Name = "vset_v", Log2LMUL = [0, 1, 2], + let Name = "vset_v", Log2LMUL = [0, 1, 2], HasPolicy = false, ManualCodegen = [{ { ID = Intrinsic::experimental_vector_insert; @@ -2110,3 +2173,14 @@ } } } + +class RVVHeader +{ + code HeaderCode; +} + +let HeaderCode = [{ +#define VE_TAIL_UNDISTURBED 0 +#define VE_TAIL_AGNOSTIC 1 +}] in +def policy : RVVHeader; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c @@ -5,7 +5,6 @@ #include -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -15,7 +14,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -25,7 +23,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -35,7 +32,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -45,7 +41,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -55,7 +50,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -65,7 +59,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -75,7 +68,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -85,7 +77,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -95,7 +86,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -105,7 +95,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -115,7 +104,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -125,7 +113,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -135,7 +122,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -145,7 +131,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -155,7 +140,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -165,7 +149,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -175,7 +158,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -185,7 +167,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -195,7 +176,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -205,7 +185,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -215,7 +194,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -225,7 +203,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -235,7 +212,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -245,7 +221,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -255,7 +230,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -265,7 +239,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -275,7 +248,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -285,7 +257,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -295,7 +266,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -305,7 +275,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -315,7 +284,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -325,7 +293,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -335,7 +302,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -345,7 +311,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -355,7 +320,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -365,7 +329,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -375,7 +338,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -385,7 +347,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -395,7 +356,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -405,7 +365,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -415,7 +374,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -425,7 +383,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -435,7 +392,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -445,7 +401,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -455,7 +410,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -465,7 +419,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -475,7 +428,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -485,7 +437,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -495,7 +446,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -505,7 +455,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -515,7 +464,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -525,7 +473,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -535,7 +482,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -545,7 +491,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -555,7 +500,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -565,7 +509,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -575,7 +518,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -585,7 +527,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -595,7 +536,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -605,7 +545,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -615,7 +554,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -625,7 +563,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -635,7 +572,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -645,7 +581,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -655,7 +590,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -665,7 +599,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -675,7 +608,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -685,7 +617,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -695,7 +626,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -705,7 +635,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -715,7 +644,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -725,7 +653,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -735,7 +662,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -745,7 +671,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -755,7 +680,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -765,7 +689,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -775,7 +698,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -785,7 +707,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -795,7 +716,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -805,7 +725,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -815,7 +734,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -825,7 +743,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -835,7 +752,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -845,7 +761,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -855,7 +770,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -865,7 +779,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -875,7 +788,6 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) @@ -885,883 +797,795 @@ return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c @@ -1,1767 +1,2384 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vadd_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vadd_vv_i8mf8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vadd_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl) { return vadd_vx_i8mf8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vadd_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vadd_vv_i8mf4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vadd_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl) { return vadd_vx_i8mf4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vadd_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vadd_vv_i8mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vadd_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl) { return vadd_vx_i8mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vadd_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl) { return vadd_vv_i8m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vadd_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl) { return vadd_vx_i8m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vadd_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl) { return vadd_vv_i8m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vadd_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl) { return vadd_vx_i8m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vadd_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl) { return vadd_vv_i8m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vadd_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl) { return vadd_vx_i8m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vadd_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl) { return vadd_vv_i8m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vadd_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl) { return vadd_vx_i8m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint16mf4_t test_vadd_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vadd_vv_i16mf4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vadd_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl) { return vadd_vx_i16mf4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint16mf2_t test_vadd_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vadd_vv_i16mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vadd_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl) { return vadd_vx_i16mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vadd_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl) { return vadd_vv_i16m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vadd_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl) { return vadd_vx_i16m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vadd_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl) { return vadd_vv_i16m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vadd_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl) { return vadd_vx_i16m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vadd_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl) { return vadd_vv_i16m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vadd_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl) { return vadd_vx_i16m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vadd_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl) { return vadd_vv_i16m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vadd_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl) { return vadd_vx_i16m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint32mf2_t test_vadd_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vadd_vv_i32mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vadd_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl) { return vadd_vx_i32mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vadd_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl) { return vadd_vv_i32m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vadd_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl) { return vadd_vx_i32m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vadd_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl) { return vadd_vv_i32m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vadd_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl) { return vadd_vx_i32m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vadd_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl) { return vadd_vv_i32m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vadd_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl) { return vadd_vx_i32m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vadd_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl) { return vadd_vv_i32m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vadd_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl) { return vadd_vx_i32m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vadd_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl) { return vadd_vv_i64m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vadd_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl) { return vadd_vx_i64m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vadd_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl) { return vadd_vv_i64m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vadd_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl) { return vadd_vx_i64m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vadd_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl) { return vadd_vv_i64m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vadd_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl) { return vadd_vx_i64m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vadd_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl) { return vadd_vv_i64m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vadd_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl) { return vadd_vx_i64m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint8mf8_t test_vadd_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vadd_vv_u8mf8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vadd_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8mf8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint8mf4_t test_vadd_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vadd_vv_u8mf4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vadd_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8mf4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint8mf2_t test_vadd_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vadd_vv_u8mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vadd_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vadd_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vadd_vv_u8m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vadd_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vadd_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vadd_vv_u8m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vadd_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vadd_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vadd_vv_u8m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vadd_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vadd_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vadd_vv_u8m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vadd_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint16mf4_t test_vadd_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vadd_vv_u16mf4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vadd_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl) { return vadd_vx_u16mf4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint16mf2_t test_vadd_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vadd_vv_u16mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vadd_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl) { return vadd_vx_u16mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint16m1_t test_vadd_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vadd_vv_u16m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vadd_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl) { return vadd_vx_u16m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint16m2_t test_vadd_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vadd_vv_u16m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vadd_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl) { return vadd_vx_u16m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint16m4_t test_vadd_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vadd_vv_u16m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vadd_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl) { return vadd_vx_u16m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vuint16m8_t test_vadd_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vadd_vv_u16m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vadd_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl) { return vadd_vx_u16m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint32mf2_t test_vadd_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vadd_vv_u32mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vadd_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl) { return vadd_vx_u32mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint32m1_t test_vadd_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vadd_vv_u32m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vadd_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl) { return vadd_vx_u32m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint32m2_t test_vadd_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vadd_vv_u32m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vadd_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl) { return vadd_vx_u32m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint32m4_t test_vadd_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vadd_vv_u32m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vadd_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl) { return vadd_vx_u32m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vuint32m8_t test_vadd_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vadd_vv_u32m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vadd_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl) { return vadd_vx_u32m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vuint64m1_t test_vadd_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vadd_vv_u64m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vadd_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl) { return vadd_vx_u64m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vuint64m2_t test_vadd_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vadd_vv_u64m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vadd_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl) { return vadd_vx_u64m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vuint64m4_t test_vadd_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vadd_vv_u64m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vadd_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl) { return vadd_vx_u64m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vuint64m8_t test_vadd_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vadd_vv_u64m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vadd_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vadd_vx_u64m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vadd_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vadd_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vadd_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vadd_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vadd_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vadd_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vadd_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vadd_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vadd_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vadd_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vadd_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vadd_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vadd_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vadd_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint16mf4_t test_vadd_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vadd_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint16mf2_t test_vadd_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vadd_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vadd_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vadd_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vadd_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vadd_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vadd_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vadd_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vadd_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vadd_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint32mf2_t test_vadd_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vadd_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vadd_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vadd_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vadd_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vadd_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vadd_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vadd_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vadd_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vadd_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vadd_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vadd_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vadd_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vadd_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vadd_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vadd_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vadd_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vadd_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint8mf8_t test_vadd_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vadd_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vadd_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint8mf4_t test_vadd_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vadd_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vadd_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint8mf2_t test_vadd_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vadd_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vadd_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vadd_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vadd_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vadd_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vadd_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vadd_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vadd_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vadd_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vadd_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vadd_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vadd_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vadd_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vadd_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vadd_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint16mf4_t test_vadd_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vadd_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vadd_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vadd_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint16mf2_t test_vadd_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vadd_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vadd_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vadd_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint16m1_t test_vadd_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vadd_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vadd_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vadd_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint16m2_t test_vadd_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vadd_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vadd_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vadd_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint16m4_t test_vadd_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vadd_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vadd_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vadd_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vuint16m8_t test_vadd_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vadd_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vadd_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vadd_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint32mf2_t test_vadd_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vadd_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vadd_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vadd_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint32m1_t test_vadd_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vadd_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vadd_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vadd_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint32m2_t test_vadd_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vadd_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vadd_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vadd_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint32m4_t test_vadd_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vadd_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vadd_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vadd_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vuint32m8_t test_vadd_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vadd_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vadd_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vadd_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vuint64m1_t test_vadd_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vadd_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vadd_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vadd_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vuint64m2_t test_vadd_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vadd_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vadd_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vadd_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vuint64m4_t test_vadd_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vadd_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vadd_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vadd_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vuint64m8_t test_vadd_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vadd_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vadd_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vadd_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c @@ -2,7 +2,8 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ // RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -12,7 +13,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vle8_v_i8mf8(const int8_t *base, size_t vl) { +vint8mf8_t test_vle8_v_i8mf8 (const int8_t *base, size_t vl) { return vle8_v_i8mf8(base, vl); } @@ -22,7 +23,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vle8_v_i8mf4(const int8_t *base, size_t vl) { +vint8mf4_t test_vle8_v_i8mf4 (const int8_t *base, size_t vl) { return vle8_v_i8mf4(base, vl); } @@ -32,7 +33,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vle8_v_i8mf2(const int8_t *base, size_t vl) { +vint8mf2_t test_vle8_v_i8mf2 (const int8_t *base, size_t vl) { return vle8_v_i8mf2(base, vl); } @@ -42,7 +43,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vle8_v_i8m1(const int8_t *base, size_t vl) { +vint8m1_t test_vle8_v_i8m1 (const int8_t *base, size_t vl) { return vle8_v_i8m1(base, vl); } @@ -52,7 +53,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t test_vle8_v_i8m2(const int8_t *base, size_t vl) { +vint8m2_t test_vle8_v_i8m2 (const int8_t *base, size_t vl) { return vle8_v_i8m2(base, vl); } @@ -62,7 +63,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t test_vle8_v_i8m4(const int8_t *base, size_t vl) { +vint8m4_t test_vle8_v_i8m4 (const int8_t *base, size_t vl) { return vle8_v_i8m4(base, vl); } @@ -72,7 +73,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv64i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m8_t test_vle8_v_i8m8(const int8_t *base, size_t vl) { +vint8m8_t test_vle8_v_i8m8 (const int8_t *base, size_t vl) { return vle8_v_i8m8(base, vl); } @@ -82,7 +83,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i16.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vle16_v_i16mf4(const int16_t *base, size_t vl) { +vint16mf4_t test_vle16_v_i16mf4 (const int16_t *base, size_t vl) { return vle16_v_i16mf4(base, vl); } @@ -92,7 +93,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i16.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vle16_v_i16mf2(const int16_t *base, size_t vl) { +vint16mf2_t test_vle16_v_i16mf2 (const int16_t *base, size_t vl) { return vle16_v_i16mf2(base, vl); } @@ -102,7 +103,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i16.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vle16_v_i16m1(const int16_t *base, size_t vl) { +vint16m1_t test_vle16_v_i16m1 (const int16_t *base, size_t vl) { return vle16_v_i16m1(base, vl); } @@ -112,7 +113,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i16.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vle16_v_i16m2(const int16_t *base, size_t vl) { +vint16m2_t test_vle16_v_i16m2 (const int16_t *base, size_t vl) { return vle16_v_i16m2(base, vl); } @@ -122,7 +123,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i16.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t test_vle16_v_i16m4(const int16_t *base, size_t vl) { +vint16m4_t test_vle16_v_i16m4 (const int16_t *base, size_t vl) { return vle16_v_i16m4(base, vl); } @@ -132,7 +133,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i16.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t test_vle16_v_i16m8(const int16_t *base, size_t vl) { +vint16m8_t test_vle16_v_i16m8 (const int16_t *base, size_t vl) { return vle16_v_i16m8(base, vl); } @@ -142,7 +143,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vle32_v_i32mf2(const int32_t *base, size_t vl) { +vint32mf2_t test_vle32_v_i32mf2 (const int32_t *base, size_t vl) { return vle32_v_i32mf2(base, vl); } @@ -152,7 +153,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vle32_v_i32m1(const int32_t *base, size_t vl) { +vint32m1_t test_vle32_v_i32m1 (const int32_t *base, size_t vl) { return vle32_v_i32m1(base, vl); } @@ -162,7 +163,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vle32_v_i32m2(const int32_t *base, size_t vl) { +vint32m2_t test_vle32_v_i32m2 (const int32_t *base, size_t vl) { return vle32_v_i32m2(base, vl); } @@ -172,7 +173,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vle32_v_i32m4(const int32_t *base, size_t vl) { +vint32m4_t test_vle32_v_i32m4 (const int32_t *base, size_t vl) { return vle32_v_i32m4(base, vl); } @@ -182,7 +183,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vle32_v_i32m8(const int32_t *base, size_t vl) { +vint32m8_t test_vle32_v_i32m8 (const int32_t *base, size_t vl) { return vle32_v_i32m8(base, vl); } @@ -192,7 +193,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i64.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vle64_v_i64m1(const int64_t *base, size_t vl) { +vint64m1_t test_vle64_v_i64m1 (const int64_t *base, size_t vl) { return vle64_v_i64m1(base, vl); } @@ -202,7 +203,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i64.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vle64_v_i64m2(const int64_t *base, size_t vl) { +vint64m2_t test_vle64_v_i64m2 (const int64_t *base, size_t vl) { return vle64_v_i64m2(base, vl); } @@ -212,7 +213,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i64.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vle64_v_i64m4(const int64_t *base, size_t vl) { +vint64m4_t test_vle64_v_i64m4 (const int64_t *base, size_t vl) { return vle64_v_i64m4(base, vl); } @@ -222,7 +223,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i64.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vle64_v_i64m8(const int64_t *base, size_t vl) { +vint64m8_t test_vle64_v_i64m8 (const int64_t *base, size_t vl) { return vle64_v_i64m8(base, vl); } @@ -232,7 +233,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vle8_v_u8mf8(const uint8_t *base, size_t vl) { +vuint8mf8_t test_vle8_v_u8mf8 (const uint8_t *base, size_t vl) { return vle8_v_u8mf8(base, vl); } @@ -242,7 +243,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vle8_v_u8mf4(const uint8_t *base, size_t vl) { +vuint8mf4_t test_vle8_v_u8mf4 (const uint8_t *base, size_t vl) { return vle8_v_u8mf4(base, vl); } @@ -252,7 +253,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vle8_v_u8mf2(const uint8_t *base, size_t vl) { +vuint8mf2_t test_vle8_v_u8mf2 (const uint8_t *base, size_t vl) { return vle8_v_u8mf2(base, vl); } @@ -262,7 +263,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vle8_v_u8m1(const uint8_t *base, size_t vl) { +vuint8m1_t test_vle8_v_u8m1 (const uint8_t *base, size_t vl) { return vle8_v_u8m1(base, vl); } @@ -272,7 +273,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t test_vle8_v_u8m2(const uint8_t *base, size_t vl) { +vuint8m2_t test_vle8_v_u8m2 (const uint8_t *base, size_t vl) { return vle8_v_u8m2(base, vl); } @@ -282,7 +283,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t test_vle8_v_u8m4(const uint8_t *base, size_t vl) { +vuint8m4_t test_vle8_v_u8m4 (const uint8_t *base, size_t vl) { return vle8_v_u8m4(base, vl); } @@ -292,7 +293,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv64i8.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m8_t test_vle8_v_u8m8(const uint8_t *base, size_t vl) { +vuint8m8_t test_vle8_v_u8m8 (const uint8_t *base, size_t vl) { return vle8_v_u8m8(base, vl); } @@ -302,7 +303,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i16.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vle16_v_u16mf4(const uint16_t *base, size_t vl) { +vuint16mf4_t test_vle16_v_u16mf4 (const uint16_t *base, size_t vl) { return vle16_v_u16mf4(base, vl); } @@ -312,7 +313,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i16.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vle16_v_u16mf2(const uint16_t *base, size_t vl) { +vuint16mf2_t test_vle16_v_u16mf2 (const uint16_t *base, size_t vl) { return vle16_v_u16mf2(base, vl); } @@ -322,7 +323,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i16.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vle16_v_u16m1(const uint16_t *base, size_t vl) { +vuint16m1_t test_vle16_v_u16m1 (const uint16_t *base, size_t vl) { return vle16_v_u16m1(base, vl); } @@ -332,7 +333,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i16.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vle16_v_u16m2(const uint16_t *base, size_t vl) { +vuint16m2_t test_vle16_v_u16m2 (const uint16_t *base, size_t vl) { return vle16_v_u16m2(base, vl); } @@ -342,7 +343,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i16.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t test_vle16_v_u16m4(const uint16_t *base, size_t vl) { +vuint16m4_t test_vle16_v_u16m4 (const uint16_t *base, size_t vl) { return vle16_v_u16m4(base, vl); } @@ -352,7 +353,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i16.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t test_vle16_v_u16m8(const uint16_t *base, size_t vl) { +vuint16m8_t test_vle16_v_u16m8 (const uint16_t *base, size_t vl) { return vle16_v_u16m8(base, vl); } @@ -362,7 +363,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vle32_v_u32mf2(const uint32_t *base, size_t vl) { +vuint32mf2_t test_vle32_v_u32mf2 (const uint32_t *base, size_t vl) { return vle32_v_u32mf2(base, vl); } @@ -372,7 +373,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vle32_v_u32m1(const uint32_t *base, size_t vl) { +vuint32m1_t test_vle32_v_u32m1 (const uint32_t *base, size_t vl) { return vle32_v_u32m1(base, vl); } @@ -382,7 +383,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vle32_v_u32m2(const uint32_t *base, size_t vl) { +vuint32m2_t test_vle32_v_u32m2 (const uint32_t *base, size_t vl) { return vle32_v_u32m2(base, vl); } @@ -392,7 +393,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vle32_v_u32m4(const uint32_t *base, size_t vl) { +vuint32m4_t test_vle32_v_u32m4 (const uint32_t *base, size_t vl) { return vle32_v_u32m4(base, vl); } @@ -402,7 +403,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vle32_v_u32m8(const uint32_t *base, size_t vl) { +vuint32m8_t test_vle32_v_u32m8 (const uint32_t *base, size_t vl) { return vle32_v_u32m8(base, vl); } @@ -412,7 +413,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i64.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vle64_v_u64m1(const uint64_t *base, size_t vl) { +vuint64m1_t test_vle64_v_u64m1 (const uint64_t *base, size_t vl) { return vle64_v_u64m1(base, vl); } @@ -422,7 +423,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i64.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vle64_v_u64m2(const uint64_t *base, size_t vl) { +vuint64m2_t test_vle64_v_u64m2 (const uint64_t *base, size_t vl) { return vle64_v_u64m2(base, vl); } @@ -432,7 +433,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i64.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vle64_v_u64m4(const uint64_t *base, size_t vl) { +vuint64m4_t test_vle64_v_u64m4 (const uint64_t *base, size_t vl) { return vle64_v_u64m4(base, vl); } @@ -442,17 +443,77 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i64.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vle64_v_u64m8(const uint64_t *base, size_t vl) { +vuint64m8_t test_vle64_v_u64m8 (const uint64_t *base, size_t vl) { return vle64_v_u64m8(base, vl); } +// CHECK-RV64-LABEL: @test_vle16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vle16_v_f16mf4 (const _Float16 *base, size_t vl) { + return vle16_v_f16mf4(base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vle16_v_f16mf2 (const _Float16 *base, size_t vl) { + return vle16_v_f16mf2(base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vle16_v_f16m1 (const _Float16 *base, size_t vl) { + return vle16_v_f16m1(base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vle16_v_f16m2 (const _Float16 *base, size_t vl) { + return vle16_v_f16m2(base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vle16_v_f16m4 (const _Float16 *base, size_t vl) { + return vle16_v_f16m4(base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vle16_v_f16m8 (const _Float16 *base, size_t vl) { + return vle16_v_f16m8(base, vl); +} + // CHECK-RV64-LABEL: @test_vle32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vle32_v_f32mf2(const float *base, size_t vl) { +vfloat32mf2_t test_vle32_v_f32mf2 (const float *base, size_t vl) { return vle32_v_f32mf2(base, vl); } @@ -462,7 +523,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vle32_v_f32m1(const float *base, size_t vl) { +vfloat32m1_t test_vle32_v_f32m1 (const float *base, size_t vl) { return vle32_v_f32m1(base, vl); } @@ -472,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vle32_v_f32m2(const float *base, size_t vl) { +vfloat32m2_t test_vle32_v_f32m2 (const float *base, size_t vl) { return vle32_v_f32m2(base, vl); } @@ -482,7 +543,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vle32_v_f32m4(const float *base, size_t vl) { +vfloat32m4_t test_vle32_v_f32m4 (const float *base, size_t vl) { return vle32_v_f32m4(base, vl); } @@ -492,7 +553,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16f32.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vle32_v_f32m8(const float *base, size_t vl) { +vfloat32m8_t test_vle32_v_f32m8 (const float *base, size_t vl) { return vle32_v_f32m8(base, vl); } @@ -502,7 +563,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f64.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vle64_v_f64m1(const double *base, size_t vl) { +vfloat64m1_t test_vle64_v_f64m1 (const double *base, size_t vl) { return vle64_v_f64m1(base, vl); } @@ -512,7 +573,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f64.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vle64_v_f64m2(const double *base, size_t vl) { +vfloat64m2_t test_vle64_v_f64m2 (const double *base, size_t vl) { return vle64_v_f64m2(base, vl); } @@ -522,7 +583,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f64.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vle64_v_f64m4(const double *base, size_t vl) { +vfloat64m4_t test_vle64_v_f64m4 (const double *base, size_t vl) { return vle64_v_f64m4(base, vl); } @@ -532,666 +593,1187 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f64.i64(* [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vle64_v_f64m8(const double *base, size_t vl) { +vfloat64m8_t test_vle64_v_f64m8 (const double *base, size_t vl) { return vle64_v_f64m8(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { +vint8mf8_t test_vle8_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { return vle8_v_i8mf8_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { +vint8mf4_t test_vle8_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { return vle8_v_i8mf4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { +vint8mf2_t test_vle8_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { return vle8_v_i8mf2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { +vint8m1_t test_vle8_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { return vle8_v_i8m1_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { +vint8m2_t test_vle8_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { return vle8_v_i8m2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { +vint8m4_t test_vle8_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { return vle8_v_i8m4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { +vint8m8_t test_vle8_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { return vle8_v_i8m8_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { +vint16mf4_t test_vle16_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { return vle16_v_i16mf4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { +vint16mf2_t test_vle16_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { return vle16_v_i16mf2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { +vint16m1_t test_vle16_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { return vle16_v_i16m1_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { +vint16m2_t test_vle16_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { return vle16_v_i16m2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { +vint16m4_t test_vle16_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { return vle16_v_i16m4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { +vint16m8_t test_vle16_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { return vle16_v_i16m8_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { +vint32mf2_t test_vle32_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { return vle32_v_i32mf2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { +vint32m1_t test_vle32_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { return vle32_v_i32m1_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { +vint32m2_t test_vle32_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { return vle32_v_i32m2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { +vint32m4_t test_vle32_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { return vle32_v_i32m4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { +vint32m8_t test_vle32_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { return vle32_v_i32m8_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { +vint64m1_t test_vle64_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { return vle64_v_i64m1_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { +vint64m2_t test_vle64_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { return vle64_v_i64m2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { +vint64m4_t test_vle64_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { return vle64_v_i64m4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { +vint64m8_t test_vle64_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { return vle64_v_i64m8_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { +vuint8mf8_t test_vle8_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { return vle8_v_u8mf8_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { +vuint8mf4_t test_vle8_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { return vle8_v_u8mf4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { +vuint8mf2_t test_vle8_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { return vle8_v_u8mf2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { +vuint8m1_t test_vle8_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { return vle8_v_u8m1_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { +vuint8m2_t test_vle8_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { return vle8_v_u8m2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { +vuint8m4_t test_vle8_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { return vle8_v_u8m4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { +vuint8m8_t test_vle8_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { return vle8_v_u8m8_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { +vuint16mf4_t test_vle16_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { return vle16_v_u16mf4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { +vuint16mf2_t test_vle16_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { return vle16_v_u16mf2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { +vuint16m1_t test_vle16_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { return vle16_v_u16m1_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { +vuint16m2_t test_vle16_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { return vle16_v_u16m2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { +vuint16m4_t test_vle16_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { return vle16_v_u16m4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { +vuint16m8_t test_vle16_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { return vle16_v_u16m8_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { +vuint32mf2_t test_vle32_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { return vle32_v_u32mf2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { +vuint32m1_t test_vle32_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { return vle32_v_u32m1_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { +vuint32m2_t test_vle32_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { return vle32_v_u32m2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { +vuint32m4_t test_vle32_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { return vle32_v_u32m4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { +vuint32m8_t test_vle32_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { return vle32_v_u32m8_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { +vuint64m1_t test_vle64_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { return vle64_v_u64m1_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { +vuint64m2_t test_vle64_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { return vle64_v_u64m2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { +vuint64m4_t test_vle64_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { return vle64_v_u64m4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { +vuint64m8_t test_vle64_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { return vle64_v_u64m8_m(mask, maskedoff, base, vl); } +// CHECK-RV64-LABEL: @test_vle16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vle16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) { + return vle16_v_f16mf4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vle16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) { + return vle16_v_f16mf2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vle16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) { + return vle16_v_f16m1_m(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vle16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) { + return vle16_v_f16m2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vle16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) { + return vle16_v_f16m4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vle16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) { + return vle16_v_f16m8_m(mask, maskedoff, base, vl); +} + // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { +vfloat32mf2_t test_vle32_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { return vle32_v_f32mf2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { +vfloat32m1_t test_vle32_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { return vle32_v_f32m1_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { +vfloat32m2_t test_vle32_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { return vle32_v_f32m2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { +vfloat32m4_t test_vle32_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { return vle32_v_f32m4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { +vfloat32m8_t test_vle32_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { return vle32_v_f32m8_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { +vfloat64m1_t test_vle64_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { return vle64_v_f64m1_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { +vfloat64m2_t test_vle64_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { return vle64_v_f64m2_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { +vfloat64m4_t test_vle64_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { return vle64_v_f64m4_m(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { +vfloat64m8_t test_vle64_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { return vle64_v_f64m8_m(mask, maskedoff, base, vl); } -// CHECK-RV64-LABEL: @test_vle1_v_b1( +// CHECK-RV64-LABEL: @test_vle8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vle8_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8mf8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vle8_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8mf4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv64i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool1_t test_vle1_v_b1(const uint8_t *base, size_t vl) { - return vle1_v_b1(base, vl); +vint8mf2_t test_vle8_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle1_v_b2( +// CHECK-RV64-LABEL: @test_vle8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv32i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool2_t test_vle1_v_b2(const uint8_t *base, size_t vl) { - return vle1_v_b2(base, vl); +vint8m1_t test_vle8_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle1_v_b4( +// CHECK-RV64-LABEL: @test_vle8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv16i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool4_t test_vle1_v_b4(const uint8_t *base, size_t vl) { - return vle1_v_b4(base, vl); +vint8m2_t test_vle8_v_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle1_v_b8( +// CHECK-RV64-LABEL: @test_vle8_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv8i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool8_t test_vle1_v_b8(const uint8_t *base, size_t vl) { - return vle1_v_b8(base, vl); +vint8m4_t test_vle8_v_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle1_v_b16( +// CHECK-RV64-LABEL: @test_vle8_v_i8m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv4i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool16_t test_vle1_v_b16(const uint8_t *base, size_t vl) { - return vle1_v_b16(base, vl); +vint8m8_t test_vle8_v_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle1_v_b32( +// CHECK-RV64-LABEL: @test_vle16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv2i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool32_t test_vle1_v_b32(const uint8_t *base, size_t vl) { - return vle1_v_b32(base, vl); +vint16mf4_t test_vle16_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl, uint8_t ta) { + return vle16_v_i16mf4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle1_v_b64( +// CHECK-RV64-LABEL: @test_vle16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv1i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool64_t test_vle1_v_b64(const uint8_t *base, size_t vl) { - return vle1_v_b64(base, vl); +vint16mf2_t test_vle16_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl, uint8_t ta) { + return vle16_v_i16mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vle16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vle16_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl, uint8_t ta) { + return vle16_v_i16m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vle16_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl, uint8_t ta) { + return vle16_v_i16m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vle16_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl, uint8_t ta) { + return vle16_v_i16m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vle16_v_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl, uint8_t ta) { + return vle16_v_i16m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vle32_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl, uint8_t ta) { + return vle32_v_i32mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vle32_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl, uint8_t ta) { + return vle32_v_i32m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vle32_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl, uint8_t ta) { + return vle32_v_i32m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vle32_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl, uint8_t ta) { + return vle32_v_i32m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vle32_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl, uint8_t ta) { + return vle32_v_i32m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vle64_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl, uint8_t ta) { + return vle64_v_i64m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vle64_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl, uint8_t ta) { + return vle64_v_i64m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vle64_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl, uint8_t ta) { + return vle64_v_i64m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vle64_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl, uint8_t ta) { + return vle64_v_i64m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vle8_v_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8mf8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vle8_v_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8mf4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vle8_v_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vle8_v_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vle8_v_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vle8_v_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vle8_v_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vle16_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl, uint8_t ta) { + return vle16_v_u16mf4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vle16_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl, uint8_t ta) { + return vle16_v_u16mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vle16_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl, uint8_t ta) { + return vle16_v_u16m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vle16_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl, uint8_t ta) { + return vle16_v_u16m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vle16_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl, uint8_t ta) { + return vle16_v_u16m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vle16_v_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl, uint8_t ta) { + return vle16_v_u16m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vle32_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl, uint8_t ta) { + return vle32_v_u32mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vle32_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl, uint8_t ta) { + return vle32_v_u32m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vle32_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl, uint8_t ta) { + return vle32_v_u32m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vle32_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl, uint8_t ta) { + return vle32_v_u32m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vle32_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl, uint8_t ta) { + return vle32_v_u32m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vle64_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl, uint8_t ta) { + return vle64_v_u64m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vle64_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl, uint8_t ta) { + return vle64_v_u64m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vle64_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl, uint8_t ta) { + return vle64_v_u64m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vle64_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl, uint8_t ta) { + return vle64_v_u64m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vle16_v_f16mf4(const _Float16 *base, size_t vl) { - return vle16_v_f16mf4(base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl, uint8_t ta) { + return vle16_v_f16mf4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vle16_v_f16mf2(const _Float16 *base, size_t vl) { - return vle16_v_f16mf2(base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl, uint8_t ta) { + return vle16_v_f16mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16m1( +// CHECK-RV64-LABEL: @test_vle16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vle16_v_f16m1(const _Float16 *base, size_t vl) { - return vle16_v_f16m1(base, vl); +vfloat16m1_t test_vle16_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl, uint8_t ta) { + return vle16_v_f16m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16m2( +// CHECK-RV64-LABEL: @test_vle16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vle16_v_f16m2(const _Float16 *base, size_t vl) { - return vle16_v_f16m2(base, vl); +vfloat16m2_t test_vle16_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl, uint8_t ta) { + return vle16_v_f16m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16m4( +// CHECK-RV64-LABEL: @test_vle16_v_f16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vle16_v_f16m4(const _Float16 *base, size_t vl) { - return vle16_v_f16m4(base, vl); +vfloat16m4_t test_vle16_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl, uint8_t ta) { + return vle16_v_f16m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16m8( +// CHECK-RV64-LABEL: @test_vle16_v_f16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vle16_v_f16m8(const _Float16 *base, size_t vl) { - return vle16_v_f16m8(base, vl); +vfloat16m8_t test_vle16_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl, uint8_t ta) { + return vle16_v_f16m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vle32_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl, uint8_t ta) { + return vle32_v_f32mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vle32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vle32_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl, uint8_t ta) { + return vle32_v_f32m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vle32_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl, uint8_t ta) { + return vle32_v_f32m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vle32_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl, uint8_t ta) { + return vle32_v_f32m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vle32_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl, uint8_t ta) { + return vle32_v_f32m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vle64_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl, uint8_t ta) { + return vle64_v_f64m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vle64_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl, uint8_t ta) { + return vle64_v_f64m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vle64_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl, uint8_t ta) { + return vle64_v_f64m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vle64_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl, uint8_t ta) { + return vle64_v_f64m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c @@ -2,7 +2,8 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ // RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -12,7 +13,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) { +void test_vse8_v_i8mf8 (int8_t *base, vint8mf8_t value, size_t vl) { return vse8_v_i8mf8(base, value, vl); } @@ -22,7 +23,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) { +void test_vse8_v_i8mf4 (int8_t *base, vint8mf4_t value, size_t vl) { return vse8_v_i8mf4(base, value, vl); } @@ -32,7 +33,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) { +void test_vse8_v_i8mf2 (int8_t *base, vint8mf2_t value, size_t vl) { return vse8_v_i8mf2(base, value, vl); } @@ -42,7 +43,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) { +void test_vse8_v_i8m1 (int8_t *base, vint8m1_t value, size_t vl) { return vse8_v_i8m1(base, value, vl); } @@ -52,7 +53,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) { +void test_vse8_v_i8m2 (int8_t *base, vint8m2_t value, size_t vl) { return vse8_v_i8m2(base, value, vl); } @@ -62,7 +63,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) { +void test_vse8_v_i8m4 (int8_t *base, vint8m4_t value, size_t vl) { return vse8_v_i8m4(base, value, vl); } @@ -72,7 +73,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) { +void test_vse8_v_i8m8 (int8_t *base, vint8m8_t value, size_t vl) { return vse8_v_i8m8(base, value, vl); } @@ -82,7 +83,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) { +void test_vse16_v_i16mf4 (int16_t *base, vint16mf4_t value, size_t vl) { return vse16_v_i16mf4(base, value, vl); } @@ -92,7 +93,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) { +void test_vse16_v_i16mf2 (int16_t *base, vint16mf2_t value, size_t vl) { return vse16_v_i16mf2(base, value, vl); } @@ -102,7 +103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) { +void test_vse16_v_i16m1 (int16_t *base, vint16m1_t value, size_t vl) { return vse16_v_i16m1(base, value, vl); } @@ -112,7 +113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) { +void test_vse16_v_i16m2 (int16_t *base, vint16m2_t value, size_t vl) { return vse16_v_i16m2(base, value, vl); } @@ -122,7 +123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) { +void test_vse16_v_i16m4 (int16_t *base, vint16m4_t value, size_t vl) { return vse16_v_i16m4(base, value, vl); } @@ -132,7 +133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) { +void test_vse16_v_i16m8 (int16_t *base, vint16m8_t value, size_t vl) { return vse16_v_i16m8(base, value, vl); } @@ -142,7 +143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) { +void test_vse32_v_i32mf2 (int32_t *base, vint32mf2_t value, size_t vl) { return vse32_v_i32mf2(base, value, vl); } @@ -152,7 +153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) { +void test_vse32_v_i32m1 (int32_t *base, vint32m1_t value, size_t vl) { return vse32_v_i32m1(base, value, vl); } @@ -162,7 +163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) { +void test_vse32_v_i32m2 (int32_t *base, vint32m2_t value, size_t vl) { return vse32_v_i32m2(base, value, vl); } @@ -172,7 +173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) { +void test_vse32_v_i32m4 (int32_t *base, vint32m4_t value, size_t vl) { return vse32_v_i32m4(base, value, vl); } @@ -182,7 +183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) { +void test_vse32_v_i32m8 (int32_t *base, vint32m8_t value, size_t vl) { return vse32_v_i32m8(base, value, vl); } @@ -192,7 +193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) { +void test_vse64_v_i64m1 (int64_t *base, vint64m1_t value, size_t vl) { return vse64_v_i64m1(base, value, vl); } @@ -202,7 +203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) { +void test_vse64_v_i64m2 (int64_t *base, vint64m2_t value, size_t vl) { return vse64_v_i64m2(base, value, vl); } @@ -212,7 +213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) { +void test_vse64_v_i64m4 (int64_t *base, vint64m4_t value, size_t vl) { return vse64_v_i64m4(base, value, vl); } @@ -222,7 +223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) { +void test_vse64_v_i64m8 (int64_t *base, vint64m8_t value, size_t vl) { return vse64_v_i64m8(base, value, vl); } @@ -232,7 +233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) { +void test_vse8_v_u8mf8 (uint8_t *base, vuint8mf8_t value, size_t vl) { return vse8_v_u8mf8(base, value, vl); } @@ -242,7 +243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) { +void test_vse8_v_u8mf4 (uint8_t *base, vuint8mf4_t value, size_t vl) { return vse8_v_u8mf4(base, value, vl); } @@ -252,7 +253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) { +void test_vse8_v_u8mf2 (uint8_t *base, vuint8mf2_t value, size_t vl) { return vse8_v_u8mf2(base, value, vl); } @@ -262,7 +263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) { +void test_vse8_v_u8m1 (uint8_t *base, vuint8m1_t value, size_t vl) { return vse8_v_u8m1(base, value, vl); } @@ -272,7 +273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) { +void test_vse8_v_u8m2 (uint8_t *base, vuint8m2_t value, size_t vl) { return vse8_v_u8m2(base, value, vl); } @@ -282,7 +283,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) { +void test_vse8_v_u8m4 (uint8_t *base, vuint8m4_t value, size_t vl) { return vse8_v_u8m4(base, value, vl); } @@ -292,7 +293,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) { +void test_vse8_v_u8m8 (uint8_t *base, vuint8m8_t value, size_t vl) { return vse8_v_u8m8(base, value, vl); } @@ -302,7 +303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) { +void test_vse16_v_u16mf4 (uint16_t *base, vuint16mf4_t value, size_t vl) { return vse16_v_u16mf4(base, value, vl); } @@ -312,7 +313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) { +void test_vse16_v_u16mf2 (uint16_t *base, vuint16mf2_t value, size_t vl) { return vse16_v_u16mf2(base, value, vl); } @@ -322,7 +323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) { +void test_vse16_v_u16m1 (uint16_t *base, vuint16m1_t value, size_t vl) { return vse16_v_u16m1(base, value, vl); } @@ -332,7 +333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) { +void test_vse16_v_u16m2 (uint16_t *base, vuint16m2_t value, size_t vl) { return vse16_v_u16m2(base, value, vl); } @@ -342,7 +343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) { +void test_vse16_v_u16m4 (uint16_t *base, vuint16m4_t value, size_t vl) { return vse16_v_u16m4(base, value, vl); } @@ -352,7 +353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) { +void test_vse16_v_u16m8 (uint16_t *base, vuint16m8_t value, size_t vl) { return vse16_v_u16m8(base, value, vl); } @@ -362,7 +363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) { +void test_vse32_v_u32mf2 (uint32_t *base, vuint32mf2_t value, size_t vl) { return vse32_v_u32mf2(base, value, vl); } @@ -372,7 +373,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) { +void test_vse32_v_u32m1 (uint32_t *base, vuint32m1_t value, size_t vl) { return vse32_v_u32m1(base, value, vl); } @@ -382,7 +383,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) { +void test_vse32_v_u32m2 (uint32_t *base, vuint32m2_t value, size_t vl) { return vse32_v_u32m2(base, value, vl); } @@ -392,7 +393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) { +void test_vse32_v_u32m4 (uint32_t *base, vuint32m4_t value, size_t vl) { return vse32_v_u32m4(base, value, vl); } @@ -402,7 +403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) { +void test_vse32_v_u32m8 (uint32_t *base, vuint32m8_t value, size_t vl) { return vse32_v_u32m8(base, value, vl); } @@ -412,7 +413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) { +void test_vse64_v_u64m1 (uint64_t *base, vuint64m1_t value, size_t vl) { return vse64_v_u64m1(base, value, vl); } @@ -422,7 +423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) { +void test_vse64_v_u64m2 (uint64_t *base, vuint64m2_t value, size_t vl) { return vse64_v_u64m2(base, value, vl); } @@ -432,7 +433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) { +void test_vse64_v_u64m4 (uint64_t *base, vuint64m4_t value, size_t vl) { return vse64_v_u64m4(base, value, vl); } @@ -442,17 +443,77 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { +void test_vse64_v_u64m8 (uint64_t *base, vuint64m8_t value, size_t vl) { return vse64_v_u64m8(base, value, vl); } +// CHECK-RV64-LABEL: @test_vse16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16mf4 (_Float16 *base, vfloat16mf4_t value, size_t vl) { + return vse16_v_f16mf4(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16mf2 (_Float16 *base, vfloat16mf2_t value, size_t vl) { + return vse16_v_f16mf2(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m1 (_Float16 *base, vfloat16m1_t value, size_t vl) { + return vse16_v_f16m1(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m2 (_Float16 *base, vfloat16m2_t value, size_t vl) { + return vse16_v_f16m2(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m4 (_Float16 *base, vfloat16m4_t value, size_t vl) { + return vse16_v_f16m4(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m8 (_Float16 *base, vfloat16m8_t value, size_t vl) { + return vse16_v_f16m8(base, value, vl); +} + // CHECK-RV64-LABEL: @test_vse32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) { +void test_vse32_v_f32mf2 (float *base, vfloat32mf2_t value, size_t vl) { return vse32_v_f32mf2(base, value, vl); } @@ -462,7 +523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) { +void test_vse32_v_f32m1 (float *base, vfloat32m1_t value, size_t vl) { return vse32_v_f32m1(base, value, vl); } @@ -472,7 +533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) { +void test_vse32_v_f32m2 (float *base, vfloat32m2_t value, size_t vl) { return vse32_v_f32m2(base, value, vl); } @@ -482,7 +543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) { +void test_vse32_v_f32m4 (float *base, vfloat32m4_t value, size_t vl) { return vse32_v_f32m4(base, value, vl); } @@ -492,7 +553,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) { +void test_vse32_v_f32m8 (float *base, vfloat32m8_t value, size_t vl) { return vse32_v_f32m8(base, value, vl); } @@ -502,7 +563,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) { +void test_vse64_v_f64m1 (double *base, vfloat64m1_t value, size_t vl) { return vse64_v_f64m1(base, value, vl); } @@ -512,7 +573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) { +void test_vse64_v_f64m2 (double *base, vfloat64m2_t value, size_t vl) { return vse64_v_f64m2(base, value, vl); } @@ -522,7 +583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) { +void test_vse64_v_f64m4 (double *base, vfloat64m4_t value, size_t vl) { return vse64_v_f64m4(base, value, vl); } @@ -532,7 +593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) { +void test_vse64_v_f64m8 (double *base, vfloat64m8_t value, size_t vl) { return vse64_v_f64m8(base, value, vl); } @@ -542,7 +603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { +void test_vse8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { return vse8_v_i8mf8_m(mask, base, value, vl); } @@ -552,7 +613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { +void test_vse8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { return vse8_v_i8mf4_m(mask, base, value, vl); } @@ -562,7 +623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { +void test_vse8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { return vse8_v_i8mf2_m(mask, base, value, vl); } @@ -572,7 +633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { +void test_vse8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { return vse8_v_i8m1_m(mask, base, value, vl); } @@ -582,7 +643,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { +void test_vse8_v_i8m2_m (vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { return vse8_v_i8m2_m(mask, base, value, vl); } @@ -592,7 +653,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { +void test_vse8_v_i8m4_m (vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { return vse8_v_i8m4_m(mask, base, value, vl); } @@ -602,7 +663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { +void test_vse8_v_i8m8_m (vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { return vse8_v_i8m8_m(mask, base, value, vl); } @@ -612,7 +673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { +void test_vse16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { return vse16_v_i16mf4_m(mask, base, value, vl); } @@ -622,7 +683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { +void test_vse16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { return vse16_v_i16mf2_m(mask, base, value, vl); } @@ -632,7 +693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { +void test_vse16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { return vse16_v_i16m1_m(mask, base, value, vl); } @@ -642,7 +703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { +void test_vse16_v_i16m2_m (vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { return vse16_v_i16m2_m(mask, base, value, vl); } @@ -652,7 +713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { +void test_vse16_v_i16m4_m (vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { return vse16_v_i16m4_m(mask, base, value, vl); } @@ -662,7 +723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { +void test_vse16_v_i16m8_m (vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { return vse16_v_i16m8_m(mask, base, value, vl); } @@ -672,7 +733,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { +void test_vse32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { return vse32_v_i32mf2_m(mask, base, value, vl); } @@ -682,7 +743,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { +void test_vse32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { return vse32_v_i32m1_m(mask, base, value, vl); } @@ -692,7 +753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { +void test_vse32_v_i32m2_m (vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { return vse32_v_i32m2_m(mask, base, value, vl); } @@ -702,7 +763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { +void test_vse32_v_i32m4_m (vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { return vse32_v_i32m4_m(mask, base, value, vl); } @@ -712,7 +773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { +void test_vse32_v_i32m8_m (vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { return vse32_v_i32m8_m(mask, base, value, vl); } @@ -722,7 +783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { +void test_vse64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { return vse64_v_i64m1_m(mask, base, value, vl); } @@ -732,7 +793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { +void test_vse64_v_i64m2_m (vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { return vse64_v_i64m2_m(mask, base, value, vl); } @@ -742,7 +803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { +void test_vse64_v_i64m4_m (vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { return vse64_v_i64m4_m(mask, base, value, vl); } @@ -752,7 +813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { +void test_vse64_v_i64m8_m (vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { return vse64_v_i64m8_m(mask, base, value, vl); } @@ -762,7 +823,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { +void test_vse8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { return vse8_v_u8mf8_m(mask, base, value, vl); } @@ -772,7 +833,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { +void test_vse8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { return vse8_v_u8mf4_m(mask, base, value, vl); } @@ -782,7 +843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { +void test_vse8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { return vse8_v_u8mf2_m(mask, base, value, vl); } @@ -792,7 +853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { +void test_vse8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { return vse8_v_u8m1_m(mask, base, value, vl); } @@ -802,7 +863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { +void test_vse8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { return vse8_v_u8m2_m(mask, base, value, vl); } @@ -812,7 +873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { +void test_vse8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { return vse8_v_u8m4_m(mask, base, value, vl); } @@ -822,7 +883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { +void test_vse8_v_u8m8_m (vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { return vse8_v_u8m8_m(mask, base, value, vl); } @@ -832,7 +893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { +void test_vse16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { return vse16_v_u16mf4_m(mask, base, value, vl); } @@ -842,7 +903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { +void test_vse16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { return vse16_v_u16mf2_m(mask, base, value, vl); } @@ -852,7 +913,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { +void test_vse16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { return vse16_v_u16m1_m(mask, base, value, vl); } @@ -862,7 +923,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { +void test_vse16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { return vse16_v_u16m2_m(mask, base, value, vl); } @@ -872,7 +933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { +void test_vse16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { return vse16_v_u16m4_m(mask, base, value, vl); } @@ -882,7 +943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { +void test_vse16_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { return vse16_v_u16m8_m(mask, base, value, vl); } @@ -892,7 +953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { +void test_vse32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { return vse32_v_u32mf2_m(mask, base, value, vl); } @@ -902,7 +963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { +void test_vse32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { return vse32_v_u32m1_m(mask, base, value, vl); } @@ -912,7 +973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { +void test_vse32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { return vse32_v_u32m2_m(mask, base, value, vl); } @@ -922,7 +983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { +void test_vse32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { return vse32_v_u32m4_m(mask, base, value, vl); } @@ -932,7 +993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { +void test_vse32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { return vse32_v_u32m8_m(mask, base, value, vl); } @@ -942,7 +1003,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { +void test_vse64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { return vse64_v_u64m1_m(mask, base, value, vl); } @@ -952,7 +1013,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { +void test_vse64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { return vse64_v_u64m2_m(mask, base, value, vl); } @@ -962,7 +1023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { +void test_vse64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { return vse64_v_u64m4_m(mask, base, value, vl); } @@ -972,17 +1033,77 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { +void test_vse64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { return vse64_v_u64m8_m(mask, base, value, vl); } +// CHECK-RV64-LABEL: @test_vse16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t value, size_t vl) { + return vse16_v_f16mf4_m(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t value, size_t vl) { + return vse16_v_f16mf2_m(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t value, size_t vl) { + return vse16_v_f16m1_m(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t value, size_t vl) { + return vse16_v_f16m2_m(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m4_m (vbool4_t mask, _Float16 *base, vfloat16m4_t value, size_t vl) { + return vse16_v_f16m4_m(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m8_m (vbool2_t mask, _Float16 *base, vfloat16m8_t value, size_t vl) { + return vse16_v_f16m8_m(mask, base, value, vl); +} + // CHECK-RV64-LABEL: @test_vse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) { +void test_vse32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) { return vse32_v_f32mf2_m(mask, base, value, vl); } @@ -992,7 +1113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) { +void test_vse32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) { return vse32_v_f32m1_m(mask, base, value, vl); } @@ -1002,7 +1123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) { +void test_vse32_v_f32m2_m (vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) { return vse32_v_f32m2_m(mask, base, value, vl); } @@ -1012,7 +1133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) { +void test_vse32_v_f32m4_m (vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) { return vse32_v_f32m4_m(mask, base, value, vl); } @@ -1022,7 +1143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) { +void test_vse32_v_f32m8_m (vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) { return vse32_v_f32m8_m(mask, base, value, vl); } @@ -1032,7 +1153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) { +void test_vse64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) { return vse64_v_f64m1_m(mask, base, value, vl); } @@ -1042,7 +1163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) { +void test_vse64_v_f64m2_m (vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) { return vse64_v_f64m2_m(mask, base, value, vl); } @@ -1052,7 +1173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) { +void test_vse64_v_f64m4_m (vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) { return vse64_v_f64m4_m(mask, base, value, vl); } @@ -1062,136 +1183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) { +void test_vse64_v_f64m8_m (vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) { return vse64_v_f64m8_m(mask, base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv64i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse1_v_b1(uint8_t *base, vbool1_t value, size_t vl) { - return vse1_v_b1(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse1_v_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv32i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse1_v_b2(uint8_t *base, vbool2_t value, size_t vl) { - return vse1_v_b2(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse1_v_b4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv16i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse1_v_b4(uint8_t *base, vbool4_t value, size_t vl) { - return vse1_v_b4(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse1_v_b8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv8i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse1_v_b8(uint8_t *base, vbool8_t value, size_t vl) { - return vse1_v_b8(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse1_v_b16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv4i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse1_v_b16(uint8_t *base, vbool16_t value, size_t vl) { - return vse1_v_b16(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse1_v_b32( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv2i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse1_v_b32(uint8_t *base, vbool32_t value, size_t vl) { - return vse1_v_b32(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse1_v_b64( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv1i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse1_v_b64(uint8_t *base, vbool64_t value, size_t vl) { - return vse1_v_b64(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16mf4(_Float16 *base, vfloat16mf4_t value, size_t vl) { - return vse16_v_f16mf4(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16mf2(_Float16 *base, vfloat16mf2_t value, size_t vl) { - return vse16_v_f16mf2(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16m1(_Float16 *base, vfloat16m1_t value, size_t vl) { - return vse16_v_f16m1(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16m2(_Float16 *base, vfloat16m2_t value, size_t vl) { - return vse16_v_f16m2(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16m4(_Float16 *base, vfloat16m4_t value, size_t vl) { - return vse16_v_f16m4(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16m8(_Float16 *base, vfloat16m8_t value, size_t vl) { - return vse16_v_f16m8(base, value, vl); -} diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -157,6 +157,7 @@ bool IsMask; bool HasMaskedOffOperand; bool HasVL; + bool HasPolicy; bool HasNoMaskedOverloaded; bool HasAutoDef; // There is automiatic definition in header std::string ManualCodegen; @@ -172,7 +173,7 @@ RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, StringRef MangledSuffix, StringRef IRName, bool HasSideEffects, bool IsMask, bool HasMaskedOffOperand, bool HasVL, - bool HasNoMaskedOverloaded, bool HasAutoDef, + bool HasPolicy, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, StringRef RequiredExtension, unsigned NF); @@ -183,6 +184,7 @@ bool hasSideEffects() const { return HasSideEffects; } bool hasMaskedOffOperand() const { return HasMaskedOffOperand; } bool hasVL() const { return HasVL; } + bool hasPolicy() const { return HasPolicy; } bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; } bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } @@ -199,6 +201,9 @@ // init the RVVIntrinsic ID and IntrinsicTypes. void emitCodeGenSwitchBody(raw_ostream &o) const; + // Emit the define macors for mask intrinsics using _mt intrinsics. + void emitIntrinsicMaskMacro(raw_ostream &o) const; + // Emit the macros for mapping C/C++ intrinsic function to builtin functions. void emitIntrinsicMacro(raw_ostream &o) const; @@ -231,6 +236,8 @@ private: /// Create all intrinsics and add them to \p Out void createRVVIntrinsics(std::vector> &Out); + /// Create Headers and add them to \p Out + void createRVVHeaders(raw_ostream &OS); /// Compute output and input types by applying different config (basic type /// and LMUL with type transformers). It also record result of type in legal /// or illegal set to avoid compute the same config again. The result maybe @@ -756,15 +763,15 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, StringRef NewMangledName, StringRef MangledSuffix, StringRef IRName, bool HasSideEffects, bool IsMask, - bool HasMaskedOffOperand, bool HasVL, + bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, StringRef RequiredExtension, unsigned NF) : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), - HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), - ManualCodegen(ManualCodegen.str()), NF(NF) { + HasPolicy(HasPolicy), HasNoMaskedOverloaded(HasNoMaskedOverloaded), + HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF) { // Init Name and MangledName Name = NewName.str(); @@ -778,6 +785,8 @@ MangledName += "_" + MangledSuffix.str(); if (IsMask) { Name += "_m"; + if (HasPolicy) + Name += "t"; } // Init RISC-V extensions for (const auto &T : OutInTypes) { @@ -830,7 +839,10 @@ if (isMask()) { if (hasVL()) { - OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; + if (hasPolicy()) + OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);\n"; + else + OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; } else { OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n"; } @@ -870,6 +882,24 @@ OS << ")\n"; } +void RVVIntrinsic::emitIntrinsicMaskMacro(raw_ostream &OS) const { + OS << "#define " << getName().drop_back() << "("; + if (!InputTypes.empty()) { + ListSeparator LS; + for (unsigned i = 0, e = InputTypes.size() - 1; i != e; ++i) + OS << LS << "op" << i; + } + OS << ") \\\n"; + OS << "__builtin_rvv_" << getName() << "("; + ListSeparator LS; + if (!InputTypes.empty()) { + for (unsigned i = 0, e = InputTypes.size() - 1; i != e; ++i) + OS << LS << "(" << InputTypes[i]->getTypeStr() << ")(op" << i << ")"; + } + OS << LS << "(size_t)VE_TAIL_AGNOSTIC"; + OS << ")\n"; +} + void RVVIntrinsic::emitMangledFuncDef(raw_ostream &OS) const { OS << "__attribute__((clang_builtin_alias("; OS << "__builtin_rvv_" << getName() << ")))\n"; @@ -915,6 +945,8 @@ OS << "extern \"C\" {\n"; OS << "#endif\n\n"; + createRVVHeaders(OS); + std::vector> Defs; createRVVIntrinsics(Defs); @@ -982,6 +1014,12 @@ Inst.emitIntrinsicMacro(OS); }); + // Use _mt to implement _m intrinsics. + emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) { + if (Inst.isMask() && Inst.hasPolicy()) + Inst.emitIntrinsicMaskMacro(OS); + }); + OS << "#define __riscv_v_intrinsic_overloading 1\n"; // Print Overloaded APIs @@ -1084,6 +1122,7 @@ bool HasMask = R->getValueAsBit("HasMask"); bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand"); bool HasVL = R->getValueAsBit("HasVL"); + bool HasPolicy = R->getValueAsBit("HasPolicy"); bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded"); bool HasSideEffects = R->getValueAsBit("HasSideEffects"); std::vector Log2LMULList = R->getValueAsListOfInts("Log2LMUL"); @@ -1144,6 +1183,10 @@ ProtoMaskSeq.push_back("z"); } + if (HasPolicy) { + ProtoMaskSeq.push_back("z"); + } + // Create Intrinsics for each type and LMUL. for (char I : TypeRange) { for (int Log2LMUL : Log2LMULList) { @@ -1158,7 +1201,7 @@ Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRName, HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, - HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, + HasVL, HasPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), IntrinsicTypes, RequiredExtension, NF)); if (HasMask) { // Create a mask intrinsic @@ -1167,7 +1210,7 @@ Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL, - HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, + HasPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF)); } } // end for Log2LMULList @@ -1175,6 +1218,15 @@ } } +void RVVEmitter::createRVVHeaders(raw_ostream &OS) { + std::vector RVVHeaders = + Records.getAllDerivedDefinitions("RVVHeader"); + for (auto *R : RVVHeaders) { + StringRef HeaderCodeStr = R->getValueAsString("HeaderCode"); + OS << HeaderCodeStr.str(); + } +} + Optional RVVEmitter::computeTypes(BasicType BT, int Log2LMUL, unsigned NF, ArrayRef PrototypeSeq) {