diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h --- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h +++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h @@ -99,7 +99,7 @@ Agnostic, }; PolicyType TailPolicy = Agnostic; - PolicyType MaskPolicy = Undisturbed; + PolicyType MaskPolicy = Agnostic; Policy() : IsUnspecified(true) {} Policy(PolicyType TailPolicy) : TailPolicy(TailPolicy) {} Policy(PolicyType TailPolicy, PolicyType MaskPolicy) diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -1014,16 +1014,11 @@ PolicyAttrs.IsUnspecified = false; if (IsMasked) { Name += "_m"; - // FIXME: Currently _m default policy implementation is different with - // RVV intrinsic spec (TUMA) - PolicyAttrs.TailPolicy = Policy::PolicyType::Undisturbed; - PolicyAttrs.MaskPolicy = Policy::PolicyType::Undisturbed; if (HasPolicy) - BuiltinName += "_tumu"; + BuiltinName += "_tama"; else BuiltinName += "_m"; } else { - PolicyAttrs.TailPolicy = Policy::PolicyType::Agnostic; if (HasPolicy) BuiltinName += "_ta"; } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaadd.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vaadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vaadd_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vaadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vaadd_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vaadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vaadd_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vaadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vaadd_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vaadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vaadd_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vaadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vaadd_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vaadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vaadd_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vaadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vaadd_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vaadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vaadd_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vaadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vaadd_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vaadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vaadd_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vaadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vaadd_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vaadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vaadd_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vaadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vaadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vaadd_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vaadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vaadd_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vaadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vaadd_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vaadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vaadd_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vaadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vaadd_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vaadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vaadd_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vaadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vaadd_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vaadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vaadd_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaaddu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaaddu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vaaddu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vaaddu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vaaddu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vaaddu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vaaddu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vaaddu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vaaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vaaddu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vaaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vaaddu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vaaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vaaddu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vaaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vaaddu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vaaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vaaddu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vaaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vaaddu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vaaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vaaddu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vaaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vaaddu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vaaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vaaddu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vaaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vaaddu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vaaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vaaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vaaddu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vaaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vaaddu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vaaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vaaddu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vaaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vaaddu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vaaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vaaddu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vaaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vaaddu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vaaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vaaddu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vaaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vaaddu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadd.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vadd_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vadd_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vadd_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vadd_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vadd_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vadd_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vadd_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vadd_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vadd_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vadd_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vadd_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vadd_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vadd_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vadd_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vadd_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vadd_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vadd_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vadd_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vadd_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vadd_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vadd_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vadd_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vadd_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vadd_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vadd_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vadd_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vadd_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vadd_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vadd_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vadd_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vadd_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vadd_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vadd_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vadd_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vadd_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vadd_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vadd_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vadd_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vadd_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vadd_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vadd_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vadd_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vadd_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vand.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vand_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vand_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vand_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vand_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vand_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vand_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vand_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vand_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vand_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vand_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vand_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vand_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vand_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vand_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vand_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vand_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vand_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vand_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vand_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vand_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vand_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vand_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vand_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vand_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vand_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vand_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vand_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vand_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vand_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vand_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vand_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vand_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vand_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vand_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vand_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vand_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vand_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vand_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vand_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vand_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vand_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vand_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vand_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vand_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vand_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vand_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vand_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vand_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vand_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vand_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vand_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vand_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vand_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vand_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vand_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vand_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vand_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vand_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vand_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vand_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vand_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vand_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vand_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vand_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vand_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vand_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vand_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vand_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vand_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vand_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vand_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vand_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vand_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vand_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vand_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vand_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vand_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vand_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vand_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vand_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vand_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vand_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vand_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vand_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vand_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vand_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vand_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vand_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vand_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vand_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vand_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasub.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vasub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vasub_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vasub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vasub_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vasub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vasub_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vasub_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vasub_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vasub_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vasub_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vasub_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vasub_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vasub_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vasub_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vasub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vasub_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vasub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vasub_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vasub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vasub_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vasub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vasub_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vasub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vasub_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vasub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vasub_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vasub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vasub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vasub_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vasub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vasub_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vasub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vasub_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vasub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vasub_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vasub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vasub_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vasub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vasub_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vasub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vasub_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vasub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vasub_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasubu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasubu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vasubu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vasubu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vasubu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vasubu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vasubu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vasubu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vasubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vasubu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vasubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vasubu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vasubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vasubu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vasubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vasubu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vasubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vasubu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vasubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vasubu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vasubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vasubu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vasubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vasubu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vasubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vasubu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vasubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vasubu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vasubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vasubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vasubu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vasubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vasubu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vasubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vasubu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vasubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vasubu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vasubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vasubu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vasubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vasubu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vasubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vasubu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vasubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vasubu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdiv.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vdiv_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vdiv_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vdiv_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vdiv_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vdiv_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vdiv_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vdiv_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vdiv_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vdiv_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vdiv_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vdiv_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vdiv_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vdiv_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vdiv_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vdiv_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vdiv_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vdiv_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vdiv_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vdiv_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vdiv_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vdiv_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vdiv_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vdiv_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vdiv_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vdiv_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vdiv_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vdiv_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vdiv_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vdiv_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vdiv_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vdiv_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vdiv_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vdiv_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vdiv_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vdiv_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vdiv_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vdiv_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vdiv_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vdiv_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vdiv_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vdiv_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vdiv_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vdiv_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdivu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdivu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdivu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdivu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vdivu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vdivu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vdivu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vdivu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vdivu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vdivu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vdivu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vdivu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vdivu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vdivu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vdivu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vdivu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vdivu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vdivu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vdivu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vdivu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vdivu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vdivu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vdivu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vdivu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vdivu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vdivu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vdivu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vdivu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vdivu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vdivu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vdivu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vdivu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vdivu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vdivu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vdivu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vdivu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vdivu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vdivu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vdivu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vdivu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vdivu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vdivu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vdivu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vdivu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vdivu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vdivu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vdivu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfabs.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfabs.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfabs_v_f16mf4_m(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfabs_v_f16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfabs_v_f16mf2_m(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfabs_v_f16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfabs_v_f16m1_m(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfabs_v_f16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfabs_v_f16m2_m(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfabs_v_f16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfabs_v_f16m4_m(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfabs_v_f16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfabs_v_f16m8_m(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfabs_v_f16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfabs_v_f32mf2_m(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfabs_v_f32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfabs_v_f32m1_m(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfabs_v_f32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfabs_v_f32m2_m(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfabs_v_f32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfabs_v_f32m4_m(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfabs_v_f32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfabs_v_f32m8_m(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfabs_v_f32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfabs_v_f64m1_m(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfabs_v_f64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfabs_v_f64m2_m(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfabs_v_f64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfabs_v_f64m4_m(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfabs_v_f64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfabs_v_f64m8_m(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfabs_v_f64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfadd_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfadd_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfadd_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfadd_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfadd_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfadd_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfadd_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfadd_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfadd_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfadd_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfadd_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfadd_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfadd_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfadd_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfadd_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfadd_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfadd_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfadd_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfadd_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfadd_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfadd_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfadd_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfadd_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfadd_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfadd_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfadd_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfadd_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfadd_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfclass_v_u16mf4_m(mask, maskedoff, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfclass_v_u16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfclass_v_u16mf2_m(mask, maskedoff, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfclass_v_u16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfclass_v_u16m1_m(mask, maskedoff, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfclass_v_u16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfclass_v_u16m2_m(mask, maskedoff, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfclass_v_u16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfclass_v_u16m4_m(mask, maskedoff, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfclass_v_u16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfclass_v_u16m8_m(mask, maskedoff, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfclass_v_u16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfclass_v_u32mf2_m(mask, maskedoff, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfclass_v_u32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfclass_v_u32m1_m(mask, maskedoff, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfclass_v_u32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfclass_v_u32m2_m(mask, maskedoff, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfclass_v_u32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfclass_v_u32m4_m(mask, maskedoff, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfclass_v_u32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfclass_v_u32m8_m(mask, maskedoff, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfclass_v_u32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfclass_v_u64m1_m(mask, maskedoff, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfclass_v_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfclass_v_u64m2_m(mask, maskedoff, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfclass_v_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfclass_v_u64m4_m(mask, maskedoff, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfclass_v_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfclass_v_u64m8_m(mask, maskedoff, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfclass_v_u64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c @@ -819,811 +819,811 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_x_f_v_i16mf4_m(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfcvt_x_f_v_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf4_m(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_x_f_v_i16mf2_m(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfcvt_x_f_v_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf2_m(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_x_f_v_i16m1_m(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfcvt_x_f_v_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m1_m(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_x_f_v_i16m2_m(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfcvt_x_f_v_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m2_m(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_x_f_v_i16m4_m(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfcvt_x_f_v_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m4_m(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_x_f_v_i16m8_m(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfcvt_x_f_v_i16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m8_m(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf4_m(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfcvt_xu_f_v_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf4_m(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf2_m(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf2_m(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_xu_f_v_u16m1_m(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfcvt_xu_f_v_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m1_m(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_xu_f_v_u16m2_m(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfcvt_xu_f_v_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m2_m(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_xu_f_v_u16m4_m(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfcvt_xu_f_v_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m4_m(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_xu_f_v_u16m8_m(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfcvt_xu_f_v_u16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m8_m(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return vfcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vint16mf4_t src, size_t vl) { + return vfcvt_f_x_v_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return vfcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vint16mf2_t src, size_t vl) { + return vfcvt_f_x_v_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return vfcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vint16m1_t src, size_t vl) { + return vfcvt_f_x_v_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return vfcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vint16m2_t src, size_t vl) { + return vfcvt_f_x_v_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return vfcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vint16m4_t src, size_t vl) { + return vfcvt_f_x_v_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return vfcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vint16m8_t src, size_t vl) { + return vfcvt_f_x_v_f16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { + return vfcvt_f_xu_v_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return vfcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint16m1_t src, size_t vl) { + return vfcvt_f_xu_v_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return vfcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint16m2_t src, size_t vl) { + return vfcvt_f_xu_v_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return vfcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint16m4_t src, size_t vl) { + return vfcvt_f_xu_v_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return vfcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint16m8_t src, size_t vl) { + return vfcvt_f_xu_v_f16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_x_f_v_i32mf2_m(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_x_f_v_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32mf2_m(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfcvt_x_f_v_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m1_m(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfcvt_x_f_v_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m2_m(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfcvt_x_f_v_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m4_m(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfcvt_x_f_v_i32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m8_m(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfcvt_xu_f_v_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m1_m(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfcvt_xu_f_v_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfcvt_xu_f_v_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m4_m(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfcvt_xu_f_v_u32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m8_m(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return vfcvt_f_x_v_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfcvt_f_x_v_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return vfcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vint32m1_t src, size_t vl) { + return vfcvt_f_x_v_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return vfcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vint32m2_t src, size_t vl) { + return vfcvt_f_x_v_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return vfcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vint32m4_t src, size_t vl) { + return vfcvt_f_x_v_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return vfcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vint32m8_t src, size_t vl) { + return vfcvt_f_x_v_f32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return vfcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint32m1_t src, size_t vl) { + return vfcvt_f_xu_v_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return vfcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint32m2_t src, size_t vl) { + return vfcvt_f_xu_v_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return vfcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint32m4_t src, size_t vl) { + return vfcvt_f_xu_v_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return vfcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint32m8_t src, size_t vl) { + return vfcvt_f_xu_v_f32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfcvt_x_f_v_i64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m1_m(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfcvt_x_f_v_i64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m2_m(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfcvt_x_f_v_i64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m4_m(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfcvt_x_f_v_i64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m8_m(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfcvt_xu_f_v_u64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m1_m(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfcvt_xu_f_v_u64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m2_m(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfcvt_xu_f_v_u64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m4_m(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfcvt_xu_f_v_u64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m8_m(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return vfcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vint64m1_t src, size_t vl) { + return vfcvt_f_x_v_f64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return vfcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vint64m2_t src, size_t vl) { + return vfcvt_f_x_v_f64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return vfcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vint64m4_t src, size_t vl) { + return vfcvt_f_x_v_f64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return vfcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vint64m8_t src, size_t vl) { + return vfcvt_f_x_v_f64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return vfcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint64m1_t src, size_t vl) { + return vfcvt_f_xu_v_f64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return vfcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint64m2_t src, size_t vl) { + return vfcvt_f_xu_v_f64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return vfcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint64m4_t src, size_t vl) { + return vfcvt_f_xu_v_f64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return vfcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint64m8_t src, size_t vl) { + return vfcvt_f_xu_v_f64m8_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfdiv.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfdiv_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfdiv_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfdiv_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfdiv_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfdiv_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfdiv_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfdiv_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfdiv_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfdiv_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfdiv_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfdiv_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfdiv_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfdiv_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfdiv_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfdiv_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfdiv_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfdiv_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfdiv_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfdiv_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfdiv_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfdiv_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfdiv_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfdiv_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfdiv_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfdiv_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfdiv_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfdiv_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfdiv_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfdiv_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfdiv_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfdiv_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfdiv_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfdiv_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfdiv_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfdiv_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfdiv_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfdiv_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmax_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmax_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmax_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmax_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmax_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmax_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmax_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmax_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmax_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmax_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmax_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmax_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmax_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmax_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmax_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfmax_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmax_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfmax_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmax_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfmax_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmax_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfmax_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmax_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfmax_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmax_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfmax_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmax_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfmax_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmax_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfmax_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmax_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfmax_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmax_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfmax_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmax_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfmax_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmax_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfmax_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmax_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfmax_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmax_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfmax_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmax_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfmax_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmax_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfmax_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmin_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmin_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmin_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmin_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmin_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmin_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmin_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmin_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmin_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmin_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmin_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmin_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmin_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmin_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmin_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfmin_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmin_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfmin_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmin_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfmin_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmin_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfmin_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmin_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfmin_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmin_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfmin_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmin_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfmin_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmin_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfmin_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmin_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfmin_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmin_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfmin_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmin_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfmin_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmin_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfmin_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmin_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfmin_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmin_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfmin_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmin_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfmin_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmin_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfmin_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmul_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmul_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmul_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmul_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmul_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmul_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmul_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmul_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmul_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmul_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmul_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmul_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmul_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmul_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmul_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfmul_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmul_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfmul_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfmul_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfmul_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfmul_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfmul_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfmul_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfmul_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmul_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfmul_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmul_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfmul_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfmul_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfmul_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfmul_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfmul_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfmul_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfmul_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c @@ -873,865 +873,865 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_x_f_w_i8mf8_m(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfncvt_x_f_w_i8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf8_m(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_x_f_w_i8mf4_m(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfncvt_x_f_w_i8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf4_m(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_x_f_w_i8mf2_m(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfncvt_x_f_w_i8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf2_m(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_x_f_w_i8m1_m(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfncvt_x_f_w_i8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m1_m(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_x_f_w_i8m2_m(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfncvt_x_f_w_i8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m2_m(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_x_f_w_i8m4_m(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfncvt_x_f_w_i8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m4_m(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf8_m(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfncvt_xu_f_w_u8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf8_m(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf4_m(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfncvt_xu_f_w_u8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf4_m(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf2_m(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfncvt_xu_f_w_u8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf2_m(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_xu_f_w_u8m1_m(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfncvt_xu_f_w_u8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m1_m(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_xu_f_w_u8m2_m(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfncvt_xu_f_w_u8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m2_m(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_xu_f_w_u8m4_m(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfncvt_xu_f_w_u8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m4_m(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_x_f_w_i16mf4_m(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_x_f_w_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf4_m(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_x_f_w_i16mf2_m(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_x_f_w_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf2_m(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_x_f_w_i16m1_m(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_x_f_w_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m1_m(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_x_f_w_i16m2_m(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_x_f_w_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m2_m(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_x_f_w_i16m4_m(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_x_f_w_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m4_m(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf4_m(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_f_w_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf4_m(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf2_m(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_xu_f_w_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf2_m(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_xu_f_w_u16m1_m(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_xu_f_w_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m1_m(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_xu_f_w_u16m2_m(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_xu_f_w_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m2_m(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_xu_f_w_u16m4_m(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_xu_f_w_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m4_m(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vfncvt_f_x_w_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfncvt_f_x_w_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vfncvt_f_x_w_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) { + return vfncvt_f_x_w_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vfncvt_f_x_w_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { + return vfncvt_f_x_w_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vfncvt_f_x_w_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { + return vfncvt_f_x_w_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vfncvt_f_x_w_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { + return vfncvt_f_x_w_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vfncvt_f_xu_w_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) { + return vfncvt_f_xu_w_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vfncvt_f_xu_w_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) { + return vfncvt_f_xu_w_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vfncvt_f_xu_w_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) { + return vfncvt_f_xu_w_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_f_f_w_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_f_w_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_f_f_w_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_f_f_w_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_f_f_w_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_f_f_w_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_f_f_w_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_f_f_w_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_f_f_w_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_f_f_w_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_x_f_w_i32mf2_m(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_x_f_w_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32mf2_m(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_x_f_w_i32m1_m(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_x_f_w_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m1_m(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_x_f_w_i32m2_m(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_x_f_w_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m2_m(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_x_f_w_i32m4_m(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_x_f_w_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m4_m(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_xu_f_w_u32mf2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_xu_f_w_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32mf2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_xu_f_w_u32m1_m(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_xu_f_w_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m1_m(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_xu_f_w_u32m2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_xu_f_w_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_xu_f_w_u32m4_m(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_xu_f_w_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m4_m(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vfncvt_f_x_w_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) { + return vfncvt_f_x_w_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vfncvt_f_x_w_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { + return vfncvt_f_x_w_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vfncvt_f_x_w_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { + return vfncvt_f_x_w_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vfncvt_f_x_w_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { + return vfncvt_f_x_w_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vfncvt_f_xu_w_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) { + return vfncvt_f_xu_w_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vfncvt_f_xu_w_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) { + return vfncvt_f_xu_w_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vfncvt_f_xu_w_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) { + return vfncvt_f_xu_w_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vfncvt_f_xu_w_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) { + return vfncvt_f_xu_w_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_f_f_w_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_f_f_w_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_rod_f_f_w_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_f_f_w_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_f_f_w_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_f_f_w_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_f_f_w_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_rod_f_f_w_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_f_f_w_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_f_f_w_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_rod_f_f_w_f32m4_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfneg.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfneg_v_f16mf4_m(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfneg_v_f16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfneg_v_f16mf2_m(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfneg_v_f16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfneg_v_f16m1_m(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfneg_v_f16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfneg_v_f16m2_m(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfneg_v_f16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfneg_v_f16m4_m(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfneg_v_f16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfneg_v_f16m8_m(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfneg_v_f16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfneg_v_f32mf2_m(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfneg_v_f32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfneg_v_f32m1_m(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfneg_v_f32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfneg_v_f32m2_m(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfneg_v_f32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfneg_v_f32m4_m(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfneg_v_f32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfneg_v_f32m8_m(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfneg_v_f32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfneg_v_f64m1_m(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfneg_v_f64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfneg_v_f64m2_m(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfneg_v_f64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfneg_v_f64m4_m(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfneg_v_f64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfneg_v_f64m8_m(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfneg_v_f64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vf